kaiko-eva 0.0.2__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaiko-eva might be problematic. Click here for more details.

Files changed (159) hide show
  1. eva/core/callbacks/__init__.py +2 -2
  2. eva/core/callbacks/writers/__init__.py +6 -3
  3. eva/core/callbacks/writers/embeddings/__init__.py +6 -0
  4. eva/core/callbacks/writers/embeddings/_manifest.py +71 -0
  5. eva/core/callbacks/writers/embeddings/base.py +192 -0
  6. eva/core/callbacks/writers/embeddings/classification.py +117 -0
  7. eva/core/callbacks/writers/embeddings/segmentation.py +78 -0
  8. eva/core/callbacks/writers/embeddings/typings.py +38 -0
  9. eva/core/data/datasets/__init__.py +2 -2
  10. eva/core/data/datasets/classification/__init__.py +8 -0
  11. eva/core/data/datasets/classification/embeddings.py +34 -0
  12. eva/core/data/datasets/{embeddings/classification → classification}/multi_embeddings.py +13 -9
  13. eva/core/data/datasets/{embeddings/base.py → embeddings.py} +47 -32
  14. eva/core/data/splitting/__init__.py +6 -0
  15. eva/core/data/splitting/random.py +41 -0
  16. eva/core/data/splitting/stratified.py +56 -0
  17. eva/core/loggers/experimental_loggers.py +2 -2
  18. eva/core/loggers/log/__init__.py +3 -2
  19. eva/core/loggers/log/image.py +71 -0
  20. eva/core/loggers/log/parameters.py +10 -0
  21. eva/core/loggers/loggers.py +6 -0
  22. eva/core/metrics/__init__.py +6 -2
  23. eva/core/metrics/defaults/__init__.py +10 -3
  24. eva/core/metrics/defaults/classification/__init__.py +1 -1
  25. eva/core/metrics/defaults/classification/binary.py +0 -9
  26. eva/core/metrics/defaults/classification/multiclass.py +0 -8
  27. eva/core/metrics/defaults/segmentation/__init__.py +5 -0
  28. eva/core/metrics/defaults/segmentation/multiclass.py +43 -0
  29. eva/core/metrics/generalized_dice.py +59 -0
  30. eva/core/metrics/mean_iou.py +120 -0
  31. eva/core/metrics/structs/schemas.py +3 -1
  32. eva/core/models/__init__.py +3 -1
  33. eva/core/models/modules/head.py +10 -4
  34. eva/core/models/modules/typings.py +14 -1
  35. eva/core/models/modules/utils/batch_postprocess.py +37 -5
  36. eva/core/models/networks/__init__.py +1 -2
  37. eva/core/models/networks/mlp.py +2 -2
  38. eva/core/models/transforms/__init__.py +6 -0
  39. eva/core/models/{networks/transforms → transforms}/extract_cls_features.py +10 -2
  40. eva/core/models/transforms/extract_patch_features.py +47 -0
  41. eva/core/models/wrappers/__init__.py +13 -0
  42. eva/core/models/{networks/wrappers → wrappers}/base.py +3 -2
  43. eva/core/models/{networks/wrappers → wrappers}/from_function.py +5 -12
  44. eva/core/models/{networks/wrappers → wrappers}/huggingface.py +15 -11
  45. eva/core/models/{networks/wrappers → wrappers}/onnx.py +6 -3
  46. eva/core/trainers/functional.py +1 -0
  47. eva/core/utils/__init__.py +6 -0
  48. eva/core/utils/clone.py +27 -0
  49. eva/core/utils/memory.py +28 -0
  50. eva/core/utils/operations.py +26 -0
  51. eva/core/utils/parser.py +20 -0
  52. eva/vision/__init__.py +2 -2
  53. eva/vision/callbacks/__init__.py +5 -0
  54. eva/vision/callbacks/loggers/__init__.py +5 -0
  55. eva/vision/callbacks/loggers/batch/__init__.py +5 -0
  56. eva/vision/callbacks/loggers/batch/base.py +130 -0
  57. eva/vision/callbacks/loggers/batch/segmentation.py +188 -0
  58. eva/vision/data/datasets/__init__.py +30 -3
  59. eva/vision/data/datasets/_validators.py +15 -2
  60. eva/vision/data/datasets/classification/__init__.py +12 -1
  61. eva/vision/data/datasets/classification/bach.py +10 -15
  62. eva/vision/data/datasets/classification/base.py +17 -24
  63. eva/vision/data/datasets/classification/camelyon16.py +244 -0
  64. eva/vision/data/datasets/classification/crc.py +10 -15
  65. eva/vision/data/datasets/classification/mhist.py +10 -15
  66. eva/vision/data/datasets/classification/panda.py +184 -0
  67. eva/vision/data/datasets/classification/patch_camelyon.py +13 -16
  68. eva/vision/data/datasets/classification/wsi.py +105 -0
  69. eva/vision/data/datasets/segmentation/__init__.py +15 -2
  70. eva/vision/data/datasets/segmentation/_utils.py +38 -0
  71. eva/vision/data/datasets/segmentation/base.py +16 -17
  72. eva/vision/data/datasets/segmentation/bcss.py +236 -0
  73. eva/vision/data/datasets/segmentation/consep.py +156 -0
  74. eva/vision/data/datasets/segmentation/embeddings.py +34 -0
  75. eva/vision/data/datasets/segmentation/lits.py +178 -0
  76. eva/vision/data/datasets/segmentation/monusac.py +236 -0
  77. eva/vision/data/datasets/segmentation/{total_segmentator.py → total_segmentator_2d.py} +130 -36
  78. eva/vision/data/datasets/wsi.py +187 -0
  79. eva/vision/data/transforms/__init__.py +3 -2
  80. eva/vision/data/transforms/common/__init__.py +2 -1
  81. eva/vision/data/transforms/common/resize_and_clamp.py +51 -0
  82. eva/vision/data/transforms/common/resize_and_crop.py +6 -7
  83. eva/vision/data/transforms/normalization/__init__.py +6 -0
  84. eva/vision/data/transforms/normalization/clamp.py +43 -0
  85. eva/vision/data/transforms/normalization/functional/__init__.py +5 -0
  86. eva/vision/data/transforms/normalization/functional/rescale_intensity.py +28 -0
  87. eva/vision/data/transforms/normalization/rescale_intensity.py +53 -0
  88. eva/vision/data/wsi/__init__.py +16 -0
  89. eva/vision/data/wsi/backends/__init__.py +69 -0
  90. eva/vision/data/wsi/backends/base.py +115 -0
  91. eva/vision/data/wsi/backends/openslide.py +73 -0
  92. eva/vision/data/wsi/backends/pil.py +52 -0
  93. eva/vision/data/wsi/backends/tiffslide.py +42 -0
  94. eva/vision/data/wsi/patching/__init__.py +6 -0
  95. eva/vision/data/wsi/patching/coordinates.py +98 -0
  96. eva/vision/data/wsi/patching/mask.py +123 -0
  97. eva/vision/data/wsi/patching/samplers/__init__.py +14 -0
  98. eva/vision/data/wsi/patching/samplers/_utils.py +50 -0
  99. eva/vision/data/wsi/patching/samplers/base.py +48 -0
  100. eva/vision/data/wsi/patching/samplers/foreground_grid.py +99 -0
  101. eva/vision/data/wsi/patching/samplers/grid.py +47 -0
  102. eva/vision/data/wsi/patching/samplers/random.py +41 -0
  103. eva/vision/losses/__init__.py +5 -0
  104. eva/vision/losses/dice.py +40 -0
  105. eva/vision/models/__init__.py +4 -2
  106. eva/vision/models/modules/__init__.py +5 -0
  107. eva/vision/models/modules/semantic_segmentation.py +161 -0
  108. eva/vision/models/networks/__init__.py +1 -2
  109. eva/vision/models/networks/backbones/__init__.py +6 -0
  110. eva/vision/models/networks/backbones/_utils.py +39 -0
  111. eva/vision/models/networks/backbones/pathology/__init__.py +31 -0
  112. eva/vision/models/networks/backbones/pathology/bioptimus.py +34 -0
  113. eva/vision/models/networks/backbones/pathology/gigapath.py +33 -0
  114. eva/vision/models/networks/backbones/pathology/histai.py +46 -0
  115. eva/vision/models/networks/backbones/pathology/kaiko.py +123 -0
  116. eva/vision/models/networks/backbones/pathology/lunit.py +68 -0
  117. eva/vision/models/networks/backbones/pathology/mahmood.py +62 -0
  118. eva/vision/models/networks/backbones/pathology/owkin.py +22 -0
  119. eva/vision/models/networks/backbones/registry.py +47 -0
  120. eva/vision/models/networks/backbones/timm/__init__.py +5 -0
  121. eva/vision/models/networks/backbones/timm/backbones.py +54 -0
  122. eva/vision/models/networks/backbones/universal/__init__.py +8 -0
  123. eva/vision/models/networks/backbones/universal/vit.py +54 -0
  124. eva/vision/models/networks/decoders/__init__.py +6 -0
  125. eva/vision/models/networks/decoders/decoder.py +7 -0
  126. eva/vision/models/networks/decoders/segmentation/__init__.py +11 -0
  127. eva/vision/models/networks/decoders/segmentation/common.py +74 -0
  128. eva/vision/models/networks/decoders/segmentation/conv2d.py +114 -0
  129. eva/vision/models/networks/decoders/segmentation/linear.py +125 -0
  130. eva/vision/models/wrappers/__init__.py +6 -0
  131. eva/vision/models/wrappers/from_registry.py +48 -0
  132. eva/vision/models/wrappers/from_timm.py +68 -0
  133. eva/vision/utils/colormap.py +77 -0
  134. eva/vision/utils/convert.py +56 -13
  135. eva/vision/utils/io/__init__.py +10 -4
  136. eva/vision/utils/io/image.py +21 -2
  137. eva/vision/utils/io/mat.py +36 -0
  138. eva/vision/utils/io/nifti.py +33 -12
  139. eva/vision/utils/io/text.py +10 -3
  140. kaiko_eva-0.1.0.dist-info/METADATA +553 -0
  141. kaiko_eva-0.1.0.dist-info/RECORD +205 -0
  142. {kaiko_eva-0.0.2.dist-info → kaiko_eva-0.1.0.dist-info}/WHEEL +1 -1
  143. {kaiko_eva-0.0.2.dist-info → kaiko_eva-0.1.0.dist-info}/entry_points.txt +2 -0
  144. eva/.DS_Store +0 -0
  145. eva/core/callbacks/writers/embeddings.py +0 -169
  146. eva/core/callbacks/writers/typings.py +0 -23
  147. eva/core/data/datasets/embeddings/__init__.py +0 -13
  148. eva/core/data/datasets/embeddings/classification/__init__.py +0 -10
  149. eva/core/data/datasets/embeddings/classification/embeddings.py +0 -66
  150. eva/core/models/networks/transforms/__init__.py +0 -5
  151. eva/core/models/networks/wrappers/__init__.py +0 -8
  152. eva/vision/models/.DS_Store +0 -0
  153. eva/vision/models/networks/.DS_Store +0 -0
  154. eva/vision/models/networks/postprocesses/__init__.py +0 -5
  155. eva/vision/models/networks/postprocesses/cls.py +0 -25
  156. kaiko_eva-0.0.2.dist-info/METADATA +0 -431
  157. kaiko_eva-0.0.2.dist-info/RECORD +0 -127
  158. /eva/core/models/{networks → wrappers}/_utils.py +0 -0
  159. {kaiko_eva-0.0.2.dist-info → kaiko_eva-0.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,21 +1,22 @@
1
1
  """NIfTI I/O related functions."""
2
2
 
3
- from typing import Any
3
+ from typing import Any, Tuple
4
4
 
5
5
  import nibabel as nib
6
+ import numpy as np
6
7
  import numpy.typing as npt
7
8
 
8
9
  from eva.vision.utils.io import _utils
9
10
 
10
11
 
11
- def read_nifti_slice(
12
- path: str, slice_index: int, *, use_storage_dtype: bool = True
12
+ def read_nifti(
13
+ path: str, slice_index: int | None = None, *, use_storage_dtype: bool = True
13
14
  ) -> npt.NDArray[Any]:
14
- """Reads and loads a NIfTI image from a file path as `uint8`.
15
+ """Reads and loads a NIfTI image from a file path.
15
16
 
16
17
  Args:
17
18
  path: The path to the NIfTI file.
18
- slice_index: The image slice index to return.
19
+ slice_index: Whether to read only a slice from the file.
19
20
  use_storage_dtype: Whether to cast the raw image
20
21
  array to the inferred type.
21
22
 
@@ -28,21 +29,42 @@ def read_nifti_slice(
28
29
  """
29
30
  _utils.check_file(path)
30
31
  image_data = nib.load(path) # type: ignore
31
- image_slice = image_data.slicer[:, :, slice_index : slice_index + 1] # type: ignore
32
- image_array = image_slice.get_fdata()
32
+ if slice_index is not None:
33
+ image_data = image_data.slicer[:, :, slice_index : slice_index + 1] # type: ignore
34
+
35
+ image_array = image_data.get_fdata() # type: ignore
33
36
  if use_storage_dtype:
34
37
  image_array = image_array.astype(image_data.get_data_dtype()) # type: ignore
38
+
35
39
  return image_array
36
40
 
37
41
 
38
- def fetch_total_nifti_slices(path: str) -> int:
39
- """Fetches the total slides of a NIfTI image file.
42
+ def save_array_as_nifti(
43
+ array: npt.ArrayLike,
44
+ filename: str,
45
+ *,
46
+ dtype: npt.DTypeLike | None = np.int64,
47
+ ) -> None:
48
+ """Saved a numpy array as a NIfTI image file.
49
+
50
+ Args:
51
+ array: The image array to save.
52
+ filename: The name to save the image like.
53
+ dtype: The data type to save the image.
54
+ """
55
+ nifti_image = nib.Nifti1Image(array, affine=np.eye(4), dtype=dtype) # type: ignore
56
+ nifti_image.header.get_xyzt_units()
57
+ nifti_image.to_filename(filename)
58
+
59
+
60
+ def fetch_nifti_shape(path: str) -> Tuple[int]:
61
+ """Fetches the NIfTI image shape from a file.
40
62
 
41
63
  Args:
42
64
  path: The path to the NIfTI file.
43
65
 
44
66
  Returns:
45
- The number of the total available slides.
67
+ The image shape.
46
68
 
47
69
  Raises:
48
70
  FileExistsError: If the path does not exist or it is unreachable.
@@ -50,5 +72,4 @@ def fetch_total_nifti_slices(path: str) -> int:
50
72
  """
51
73
  _utils.check_file(path)
52
74
  image = nib.load(path) # type: ignore
53
- image_shape = image.header.get_data_shape() # type: ignore
54
- return image_shape[-1]
75
+ return image.header.get_data_shape() # type: ignore
@@ -4,15 +4,22 @@ import csv
4
4
  from typing import Dict, List
5
5
 
6
6
 
7
- def read_csv(path: str) -> List[Dict[str, str]]:
7
+ def read_csv(
8
+ path: str,
9
+ *,
10
+ delimiter: str = ",",
11
+ encoding: str = "utf-8",
12
+ ) -> List[Dict[str, str]]:
8
13
  """Reads a CSV file and returns its contents as a list of dictionaries.
9
14
 
10
15
  Args:
11
16
  path: The path to the CSV file.
17
+ delimiter: The character that separates fields in the CSV file.
18
+ encoding: The encoding of the CSV file.
12
19
 
13
20
  Returns:
14
21
  A list of dictionaries representing the data in the CSV file.
15
22
  """
16
- with open(path, newline="") as file:
17
- data = csv.DictReader(file, skipinitialspace=True)
23
+ with open(path, newline="", encoding=encoding) as file:
24
+ data = csv.DictReader(file, skipinitialspace=True, delimiter=delimiter)
18
25
  return list(data)
@@ -0,0 +1,553 @@
1
+ Metadata-Version: 2.1
2
+ Name: kaiko-eva
3
+ Version: 0.1.0
4
+ Summary: Evaluation Framework for oncology foundation models.
5
+ Keywords: machine-learning,evaluation-framework,oncology,foundation-models
6
+ Author-Email: Ioannis Gatopoulos <ioannis@kaiko.ai>, =?utf-8?q?Nicolas_K=C3=A4nzig?= <nicolas@kaiko.ai>, Roman Moser <roman@kaiko.ai>
7
+ Maintainer-Email: Ioannis Gatopoulos <ioannis@kaiko.ai>, =?utf-8?q?Nicolas_K=C3=A4nzig?= <nicolas@kaiko.ai>, Roman Moser <roman@kaiko.ai>
8
+ License: Apache License
9
+ Version 2.0, January 2004
10
+ http://www.apache.org/licenses/
11
+
12
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
13
+
14
+ 1. Definitions.
15
+
16
+ "License" shall mean the terms and conditions for use, reproduction,
17
+ and distribution as defined by Sections 1 through 9 of this document.
18
+
19
+ "Licensor" shall mean the copyright owner or entity authorized by
20
+ the copyright owner that is granting the License.
21
+
22
+ "Legal Entity" shall mean the union of the acting entity and all
23
+ other entities that control, are controlled by, or are under common
24
+ control with that entity. For the purposes of this definition,
25
+ "control" means (i) the power, direct or indirect, to cause the
26
+ direction or management of such entity, whether by contract or
27
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
28
+ outstanding shares, or (iii) beneficial ownership of such entity.
29
+
30
+ "You" (or "Your") shall mean an individual or Legal Entity
31
+ exercising permissions granted by this License.
32
+
33
+ "Source" form shall mean the preferred form for making modifications,
34
+ including but not limited to software source code, documentation
35
+ source, and configuration files.
36
+
37
+ "Object" form shall mean any form resulting from mechanical
38
+ transformation or translation of a Source form, including but
39
+ not limited to compiled object code, generated documentation,
40
+ and conversions to other media types.
41
+
42
+ "Work" shall mean the work of authorship, whether in Source or
43
+ Object form, made available under the License, as indicated by a
44
+ copyright notice that is included in or attached to the work
45
+ (an example is provided in the Appendix below).
46
+
47
+ "Derivative Works" shall mean any work, whether in Source or Object
48
+ form, that is based on (or derived from) the Work and for which the
49
+ editorial revisions, annotations, elaborations, or other modifications
50
+ represent, as a whole, an original work of authorship. For the purposes
51
+ of this License, Derivative Works shall not include works that remain
52
+ separable from, or merely link (or bind by name) to the interfaces of,
53
+ the Work and Derivative Works thereof.
54
+
55
+ "Contribution" shall mean any work of authorship, including
56
+ the original version of the Work and any modifications or additions
57
+ to that Work or Derivative Works thereof, that is intentionally
58
+ submitted to Licensor for inclusion in the Work by the copyright owner
59
+ or by an individual or Legal Entity authorized to submit on behalf of
60
+ the copyright owner. For the purposes of this definition, "submitted"
61
+ means any form of electronic, verbal, or written communication sent
62
+ to the Licensor or its representatives, including but not limited to
63
+ communication on electronic mailing lists, source code control systems,
64
+ and issue tracking systems that are managed by, or on behalf of, the
65
+ Licensor for the purpose of discussing and improving the Work, but
66
+ excluding communication that is conspicuously marked or otherwise
67
+ designated in writing by the copyright owner as "Not a Contribution."
68
+
69
+ "Contributor" shall mean Licensor and any individual or Legal Entity
70
+ on behalf of whom a Contribution has been received by Licensor and
71
+ subsequently incorporated within the Work.
72
+
73
+ 2. Grant of Copyright License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ copyright license to reproduce, prepare Derivative Works of,
77
+ publicly display, publicly perform, sublicense, and distribute the
78
+ Work and such Derivative Works in Source or Object form.
79
+
80
+ 3. Grant of Patent License. Subject to the terms and conditions of
81
+ this License, each Contributor hereby grants to You a perpetual,
82
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
83
+ (except as stated in this section) patent license to make, have made,
84
+ use, offer to sell, sell, import, and otherwise transfer the Work,
85
+ where such license applies only to those patent claims licensable
86
+ by such Contributor that are necessarily infringed by their
87
+ Contribution(s) alone or by combination of their Contribution(s)
88
+ with the Work to which such Contribution(s) was submitted. If You
89
+ institute patent litigation against any entity (including a
90
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
91
+ or a Contribution incorporated within the Work constitutes direct
92
+ or contributory patent infringement, then any patent licenses
93
+ granted to You under this License for that Work shall terminate
94
+ as of the date such litigation is filed.
95
+
96
+ 4. Redistribution. You may reproduce and distribute copies of the
97
+ Work or Derivative Works thereof in any medium, with or without
98
+ modifications, and in Source or Object form, provided that You
99
+ meet the following conditions:
100
+
101
+ (a) You must give any other recipients of the Work or
102
+ Derivative Works a copy of this License; and
103
+
104
+ (b) You must cause any modified files to carry prominent notices
105
+ stating that You changed the files; and
106
+
107
+ (c) You must retain, in the Source form of any Derivative Works
108
+ that You distribute, all copyright, patent, trademark, and
109
+ attribution notices from the Source form of the Work,
110
+ excluding those notices that do not pertain to any part of
111
+ the Derivative Works; and
112
+
113
+ (d) If the Work includes a "NOTICE" text file as part of its
114
+ distribution, then any Derivative Works that You distribute must
115
+ include a readable copy of the attribution notices contained
116
+ within such NOTICE file, excluding those notices that do not
117
+ pertain to any part of the Derivative Works, in at least one
118
+ of the following places: within a NOTICE text file distributed
119
+ as part of the Derivative Works; within the Source form or
120
+ documentation, if provided along with the Derivative Works; or,
121
+ within a display generated by the Derivative Works, if and
122
+ wherever such third-party notices normally appear. The contents
123
+ of the NOTICE file are for informational purposes only and
124
+ do not modify the License. You may add Your own attribution
125
+ notices within Derivative Works that You distribute, alongside
126
+ or as an addendum to the NOTICE text from the Work, provided
127
+ that such additional attribution notices cannot be construed
128
+ as modifying the License.
129
+
130
+ You may add Your own copyright statement to Your modifications and
131
+ may provide additional or different license terms and conditions
132
+ for use, reproduction, or distribution of Your modifications, or
133
+ for any such Derivative Works as a whole, provided Your use,
134
+ reproduction, and distribution of the Work otherwise complies with
135
+ the conditions stated in this License.
136
+
137
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
138
+ any Contribution intentionally submitted for inclusion in the Work
139
+ by You to the Licensor shall be under the terms and conditions of
140
+ this License, without any additional terms or conditions.
141
+ Notwithstanding the above, nothing herein shall supersede or modify
142
+ the terms of any separate license agreement you may have executed
143
+ with Licensor regarding such Contributions.
144
+
145
+ 6. Trademarks. This License does not grant permission to use the trade
146
+ names, trademarks, service marks, or product names of the Licensor,
147
+ except as required for reasonable and customary use in describing the
148
+ origin of the Work and reproducing the content of the NOTICE file.
149
+
150
+ 7. Disclaimer of Warranty. Unless required by applicable law or
151
+ agreed to in writing, Licensor provides the Work (and each
152
+ Contributor provides its Contributions) on an "AS IS" BASIS,
153
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
154
+ implied, including, without limitation, any warranties or conditions
155
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
156
+ PARTICULAR PURPOSE. You are solely responsible for determining the
157
+ appropriateness of using or redistributing the Work and assume any
158
+ risks associated with Your exercise of permissions under this License.
159
+
160
+ 8. Limitation of Liability. In no event and under no legal theory,
161
+ whether in tort (including negligence), contract, or otherwise,
162
+ unless required by applicable law (such as deliberate and grossly
163
+ negligent acts) or agreed to in writing, shall any Contributor be
164
+ liable to You for damages, including any direct, indirect, special,
165
+ incidental, or consequential damages of any character arising as a
166
+ result of this License or out of the use or inability to use the
167
+ Work (including but not limited to damages for loss of goodwill,
168
+ work stoppage, computer failure or malfunction, or any and all
169
+ other commercial damages or losses), even if such Contributor
170
+ has been advised of the possibility of such damages.
171
+
172
+ 9. Accepting Warranty or Additional Liability. While redistributing
173
+ the Work or Derivative Works thereof, You may choose to offer,
174
+ and charge a fee for, acceptance of support, warranty, indemnity,
175
+ or other liability obligations and/or rights consistent with this
176
+ License. However, in accepting such obligations, You may act only
177
+ on Your own behalf and on Your sole responsibility, not on behalf
178
+ of any other Contributor, and only if You agree to indemnify,
179
+ defend, and hold each Contributor harmless for any liability
180
+ incurred by, or claims asserted against, such Contributor by reason
181
+ of your accepting any such warranty or additional liability.
182
+
183
+ END OF TERMS AND CONDITIONS
184
+
185
+ APPENDIX: How to apply the Apache License to your work.
186
+
187
+ To apply the Apache License to your work, attach the following
188
+ boilerplate notice, with the fields enclosed by brackets "[]"
189
+ replaced with your own identifying information. (Don't include
190
+ the brackets!) The text should be enclosed in the appropriate
191
+ comment syntax for the file format. We also recommend that a
192
+ file or class name and description of purpose be included on the
193
+ same "printed page" as the copyright notice for easier
194
+ identification within third-party archives.
195
+
196
+ Copyright 2024 kaiko.ai
197
+
198
+ Licensed under the Apache License, Version 2.0 (the "License");
199
+ you may not use this file except in compliance with the License.
200
+ You may obtain a copy of the License at
201
+
202
+ http://www.apache.org/licenses/LICENSE-2.0
203
+
204
+ Unless required by applicable law or agreed to in writing, software
205
+ distributed under the License is distributed on an "AS IS" BASIS,
206
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
207
+ See the License for the specific language governing permissions and
208
+ limitations under the License.
209
+
210
+ Classifier: Topic :: Software Development :: Build Tools
211
+ Classifier: Programming Language :: Python :: 3
212
+ Classifier: Programming Language :: Python :: 3.10
213
+ Classifier: Programming Language :: Python :: 3.11
214
+ Classifier: Programming Language :: Python :: 3.12
215
+ Project-URL: Homepage, https://kaiko-ai.github.io/eva/dev/
216
+ Project-URL: Repository, https://github.com/kaiko-ai/eva
217
+ Project-URL: Documentation, https://kaiko-ai.github.io/eva/dev/
218
+ Requires-Python: >=3.10
219
+ Requires-Dist: torch==2.3.0
220
+ Requires-Dist: lightning>=2.2.2
221
+ Requires-Dist: jsonargparse[omegaconf]==4.31.0
222
+ Requires-Dist: tensorboard>=2.16.2
223
+ Requires-Dist: loguru>=0.7.2
224
+ Requires-Dist: pandas>=2.2.0
225
+ Requires-Dist: transformers>=4.38.2
226
+ Requires-Dist: onnxruntime>=1.17.1
227
+ Requires-Dist: onnx>=1.16.0
228
+ Requires-Dist: toolz>=0.12.1
229
+ Requires-Dist: rich>=13.7.1
230
+ Requires-Dist: torchmetrics>=1.4.2
231
+ Provides-Extra: vision
232
+ Requires-Dist: h5py>=3.10.0; extra == "vision"
233
+ Requires-Dist: nibabel>=5.2.0; extra == "vision"
234
+ Requires-Dist: opencv-python-headless>=4.9.0.80; extra == "vision"
235
+ Requires-Dist: timm>=1.0.9; extra == "vision"
236
+ Requires-Dist: torchvision>=0.17.0; extra == "vision"
237
+ Requires-Dist: openslide-python>=1.3.1; extra == "vision"
238
+ Requires-Dist: gdown>=5.2.0; extra == "vision"
239
+ Requires-Dist: scikit-image>=0.24.0; extra == "vision"
240
+ Requires-Dist: imagesize>=1.4.1; extra == "vision"
241
+ Requires-Dist: scipy>=1.14.0; extra == "vision"
242
+ Requires-Dist: monai>=1.3.2; extra == "vision"
243
+ Provides-Extra: all
244
+ Requires-Dist: h5py>=3.10.0; extra == "all"
245
+ Requires-Dist: nibabel>=5.2.0; extra == "all"
246
+ Requires-Dist: opencv-python-headless>=4.9.0.80; extra == "all"
247
+ Requires-Dist: timm>=1.0.9; extra == "all"
248
+ Requires-Dist: torchvision>=0.17.0; extra == "all"
249
+ Requires-Dist: openslide-python>=1.3.1; extra == "all"
250
+ Requires-Dist: gdown>=5.2.0; extra == "all"
251
+ Requires-Dist: scikit-image>=0.24.0; extra == "all"
252
+ Requires-Dist: imagesize>=1.4.1; extra == "all"
253
+ Requires-Dist: scipy>=1.14.0; extra == "all"
254
+ Requires-Dist: monai>=1.3.2; extra == "all"
255
+ Description-Content-Type: text/markdown
256
+
257
+ <div align="center">
258
+
259
+ <br />
260
+
261
+ <img src="https://github.com/kaiko-ai/eva/blob/main/docs/images/eva-logo.png?raw=true" width="340">
262
+
263
+ <br />
264
+ <br />
265
+
266
+ _Oncology FM Evaluation Framework by kaiko.ai_
267
+
268
+ [![PyPI](https://img.shields.io/pypi/v/kaiko-eva.svg?logo=python)](https://pypi.python.org/pypi/kaiko-eva)
269
+ [![docs](https://img.shields.io/badge/📚_docs-latest-green)](https://kaiko-ai.github.io/eva/latest)
270
+ [![license](https://img.shields.io/badge/⚖️_License-Apache%202.0-blue.svg?labelColor=gray)](https://github.com/kaiko-ai/eva#license)<br>
271
+ [![paper](http://img.shields.io/badge/OpenReview-MIDL_2024-B31B1B.svg)](https://openreview.net/forum?id=FNBQOPj18N&noteId=FNBQOPj18N)
272
+
273
+ <p align="center">
274
+ <a href="https://github.com/kaiko-ai/eva#installation">Installation</a> •
275
+ <a href="https://github.com/kaiko-ai/eva#how-to-use">How To Use</a> •
276
+ <a href="https://github.com/kaiko-ai/eva#quick-start">Quick Start</a> •
277
+ <a href="https://kaiko-ai.github.io/eva/">Documentation</a> •
278
+ <a href="https://kaiko-ai.github.io/eva/dev/datasets/">Datasets</a> •
279
+ <a href="https://github.com/kaiko-ai/eva#benchmarks">Benchmarks</a> <br>
280
+ <a href="https://github.com/kaiko-ai/eva#contributing">Contribute</a> •
281
+ <a href="https://github.com/kaiko-ai/eva#acknowledgements">Acknowledgements</a>
282
+ </p>
283
+
284
+ </div>
285
+
286
+ <br />
287
+
288
+ _`eva`_ is an evaluation framework for oncology foundation models (FMs) by [kaiko.ai](https://kaiko.ai/).
289
+ Check out the [documentation](https://kaiko-ai.github.io/eva/) for more information.
290
+
291
+ ### Highlights:
292
+ - Easy and reliable benchmark of Oncology FMs
293
+ - Supports path-level classification, slide-level classification and semantic segmentation downstream tasks
294
+ - Automatic embedding inference and evaluation of a downstream task
295
+ - Native support of popular medical [datasets](https://kaiko-ai.github.io/eva/dev/datasets/) and models
296
+ - Produce statistics over multiple evaluation fits and multiple metrics
297
+
298
+ ## Installation
299
+
300
+ Simple installation from PyPI:
301
+ ```sh
302
+ # to install the core version only
303
+ pip install kaiko-eva
304
+
305
+ # to install the expanded `vision` version
306
+ pip install 'kaiko-eva[vision]'
307
+
308
+ # to install everything
309
+ pip install 'kaiko-eva[all]'
310
+ ```
311
+
312
+ To install the latest version of the `main` branch:
313
+ ```sh
314
+ pip install "kaiko-eva[all] @ git+https://github.com/kaiko-ai/eva.git"
315
+ ```
316
+
317
+ You can verify that the installation was successful by executing:
318
+ ```sh
319
+ eva --version
320
+ ```
321
+
322
+ ## How To Use
323
+
324
+ _`eva`_ can be used directly from the terminal as a CLI tool as follows:
325
+ ```sh
326
+ eva {fit,predict,predict_fit} --config url/or/path/to/the/config.yaml
327
+ ```
328
+
329
+ _`eva`_ uses [jsonargparse](https://jsonargparse.readthedocs.io/en/v4.31.0/) to
330
+ make it easily configurable by automatically generating command line interfaces (CLIs),
331
+ which allows to call *any* Python object from the command line. Moreover, the configuration structure is always in sync with the code. Thus, _`eva`_ can be used either directly from Python or as a CLI tool (recommended).
332
+
333
+ For more information, please refer to the [documentation](https://kaiko-ai.github.io/eva/dev/user-guide/tutorials/offline_vs_online/).
334
+
335
+ <details>
336
+ <summary>Learn about Configs</summary>
337
+
338
+ The following interfaces are identical:
339
+ <table>
340
+ <tr>
341
+ <th>Python interface</th>
342
+ <th>Configuration file</th>
343
+ </tr>
344
+ <tr>
345
+ <td>
346
+ <sub>
347
+
348
+ ```Python
349
+ # main.py
350
+ # execute with: `python main.py`
351
+
352
+ from torch import nn
353
+
354
+ from eva import core
355
+ from eva.vision import datasets, transforms
356
+
357
+ # initialize trainer
358
+ trainer = core.Trainer(max_steps=100)
359
+
360
+ # initialize model
361
+ model = core.HeadModule(
362
+ backbone=nn.Flatten(),
363
+ head=nn.Linear(150528, 4),
364
+ criterion=nn.CrossEntropyLoss(),
365
+ )
366
+
367
+ # initialize data
368
+ data = core.DataModule(
369
+ datasets=core.DatasetsSchema(
370
+ train=datasets.BACH(
371
+ root="data/bach",
372
+ split="train",
373
+ download=True,
374
+ transforms=transforms.ResizeAndCrop(),
375
+ ),
376
+ ),
377
+ dataloaders=core.DataloadersSchema(
378
+ train=core.DataLoader(batch_size=32),
379
+ ),
380
+ )
381
+
382
+ # perform fit
383
+ pipeline = core.Interface()
384
+ pipeline.fit(trainer, model=model, data=data)
385
+ ```
386
+ </sub>
387
+ <td>
388
+ <sub>
389
+
390
+ ```yaml
391
+ # main.yaml
392
+ # execute with: `eva fit --config main.yaml`
393
+
394
+ ---
395
+ trainer:
396
+ class_path: eva.Trainer
397
+ init_args:
398
+ max_steps: 100
399
+ model:
400
+ class_path: eva.HeadModule
401
+ init_args:
402
+ backbone: torch.nn.Flatten
403
+ head:
404
+ class_path: torch.nn.Linear
405
+ init_args:
406
+ in_features: 150528
407
+ out_features: 4
408
+ criterion: torch.nn.CrossEntropyLoss
409
+ data:
410
+ class_path: eva.DataModule
411
+ init_args:
412
+ datasets:
413
+ train:
414
+ class_path: eva.vision.datasets.BACH
415
+ init_args:
416
+ root: ./data/bach
417
+ split: train
418
+ download: true
419
+ transforms: eva.vision.transforms.ResizeAndCrop
420
+ dataloaders:
421
+ train:
422
+ batch_size: 32
423
+ ```
424
+ </sub>
425
+ </td>
426
+ </tr>
427
+ </table>
428
+
429
+ The `.yaml` file defines the functionality of _`eva`_
430
+ by parsing and translating its content to Python objects directly.
431
+ Native supported configs can be found at the
432
+ [configs](https://github.com/kaiko-ai/eva/tree/main/configs) directory
433
+ of the repo, which can be both locally stored or remote.
434
+
435
+ </details>
436
+
437
+ ## Quick Start
438
+
439
+ We define two types of evaluations: **online** and **offline**.
440
+ While online fit uses the backbone (FM) to perform forward passes
441
+ during the fitting process, offline fit first generates embeddings
442
+ with the backbone and then fits the model using these embeddings as
443
+ input, resulting in a faster evaluation.
444
+
445
+ Here are some examples to get you started:
446
+
447
+ - Perform a downstream offline **classification** evaluation of `DINO ViT-S/16`
448
+ on the `BACH` dataset with linear probing by first inferring the embeddings
449
+ and then performing 5 sequential fits:
450
+ ```sh
451
+ export DOWNLOAD_DATA=true
452
+ eva predict_fit --config https://raw.githubusercontent.com/kaiko-ai/eva/main/configs/vision/dino_vit/offline/bach.yaml
453
+ ```
454
+
455
+ - Perform a downstream online **segmentation** evaluation of `DINO ViT-S/16` on the
456
+ `MoNuSAC` dataset with the `ConvDecoderMS` decoder:
457
+ ```sh
458
+ export DOWNLOAD_DATA=true
459
+ eva fit --config https://raw.githubusercontent.com/kaiko-ai/eva/main/configs/vision/dino_vit/online/monusac.yaml
460
+ ```
461
+
462
+ For more examples, take a look at the [configs](https://github.com/kaiko-ai/eva/tree/main/configs)
463
+ and [tutorials](https://kaiko-ai.github.io/eva/dev/user-guide/advanced/replicate_evaluations/).
464
+
465
+ > [!NOTE]
466
+ > All the datasets that support automatic download in the repo have by default the option to automatically download set to false.
467
+ > For automatic download you have to manually set the environmental variable `DOWNLOAD_DATA=true` or in the configuration file `download=true`.
468
+
469
+ ## Leaderboards
470
+
471
+ In this section you will find model benchmarks which were generated with _`eva`_.
472
+
473
+ ### Table I: WSI and microscopy image tasks
474
+
475
+ <br />
476
+
477
+ <div align="center">
478
+
479
+ | Model | BACH | CRC | MHIST | PCam | Camelyon16 | PANDA | CoNSeP | MoNuSAC |
480
+ |---------|-------|-------|-------|--------|------------|-------|------------|-------|
481
+ | ViT-S/16 _(random)_ <sup>[1]</sup> | 0.411|0.613|0.5|0.752|0.551|0.347|0.489|0.394|
482
+ | ViT-S/16 _(ImageNet)_ <sup>[1]</sup> | 0.675|0.936|0.827|0.861|0.751|0.676|0.54|0.512|
483
+ | DINO<sub>(p=16)</sub> <sup>[2]</sup> | 0.77|0.936|0.751|0.905|0.869|0.737|0.625|0.549|
484
+ | Phikon <sup>[3]</sup> | 0.715|0.942|0.766|0.925|0.879|0.784|0.68|0.554|
485
+ | UNI <sup>[4]</sup> | 0.797|0.95|0.835|0.939|0.933|0.774|0.67|0.575|
486
+ | ViT-S/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.8|0.949|0.831|0.902|0.897|0.77|0.622|0.573|
487
+ | ViT-S/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.825|0.948|0.826|0.887|0.879|0.741|0.677|0.617|
488
+ | ViT-B/16 _(kaiko.ai)_ <sup>[5]</sup> | 0.846|0.959|0.839|0.906|0.891|0.753|0.647|0.572|
489
+ | ViT-B/8 _(kaiko.ai)_ <sup>[5]</sup> | 0.867|0.952|0.814|0.921|0.939|0.761|0.706|0.661|
490
+ | ViT-L/14 _(kaiko.ai)_ <sup>[5]</sup> | 0.862|0.935|0.822|0.907|0.941|0.769|0.686|0.599|
491
+
492
+ _Table I: Linear probing evaluation of FMs on patch-level downstream datasets.<br> We report balanced accuracy
493
+ for classification tasks and generalized Dice score for semgetnation tasks, averaged over 5 runs. Results are
494
+ reported on the "test" split if available and otherwise on the "validation" split._
495
+
496
+ </div>
497
+
498
+ <br />
499
+
500
+ _References_:
501
+ 1. _"Emerging properties in self-supervised vision transformers”_, [arXiv](https://arxiv.org/abs/2104.14294)
502
+ 2. _"Benchmarking self-supervised learning on diverse pathology datasets”_, [arXiv](https://arxiv.org/abs/2212.04690)
503
+ 3. _"Scaling self-supervised learning for histopathology with masked image modeling”_, [medRxiv](https://www.medrxiv.org/content/10.1101/2023.07.21.23292757v1)
504
+ 4. _"A General-Purpose Self-Supervised Model for Computational Pathology”_, [arXiv](https://arxiv.org/abs/2308.15474)
505
+ 5. _"Towards Training Large-Scale Pathology Foundation Models: from TCGA to Hospital Scale”_, [arXiv](https://arxiv.org/pdf/2404.15217)
506
+
507
+ ## Contributing
508
+
509
+ _`eva`_ is an open source project and welcomes contributions of all kinds. Please checkout the [developer](./docs/DEVELOPER_GUIDE.md)
510
+ and [contributing guide](./docs/CONTRIBUTING.md) for help on how to do so.
511
+
512
+ All contributors must follow the [code of conduct](./docs/CODE_OF_CONDUCT.md).
513
+
514
+
515
+ ## Acknowledgements
516
+
517
+ Our codebase is built using multiple opensource contributions
518
+
519
+ <div align="center">
520
+
521
+ [![python](https://img.shields.io/badge/-Python-blue?logo=python&logoColor=white)](https://github.com/pre-commit/pre-commit)
522
+ [![pytorch](https://img.shields.io/badge/PyTorch-ee4c2c?logo=pytorch&logoColor=white)](https://pytorch.org/get-started/locally/)
523
+ [![lightning](https://img.shields.io/badge/-⚡️_Lightning-792ee5?logo=pytorchlightning&logoColor=white)](https://pytorchlightning.ai/)<br>
524
+ [![black](https://img.shields.io/badge/Code%20Style-Black-black.svg?labelColor=gray)](https://black.readthedocs.io/en/stable/)
525
+ [![isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/)
526
+ [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)
527
+ [![Checked with pyright](https://microsoft.github.io/pyright/img/pyright_badge.svg)](https://microsoft.github.io/pyright/)<br>
528
+ [![pdm-managed](https://img.shields.io/badge/pdm-managed-blueviolet)](https://pdm-project.org)
529
+ [![Nox](https://img.shields.io/badge/%F0%9F%A6%8A-Nox-D85E00.svg)](https://github.com/wntrblm/nox)
530
+ [![Built with Material for MkDocs](https://img.shields.io/badge/Material_for_MkDocs-526CFE?logo=MaterialForMkDocs&logoColor=white)](https://squidfunk.github.io/mkdocs-material/)
531
+
532
+ </div>
533
+
534
+
535
+ ## Citation
536
+
537
+ If you find this repository useful, please consider giving a star ⭐ and adding the following citation:
538
+
539
+ ```bibtex
540
+ @inproceedings{kaiko.ai2024eva,
541
+ title={eva: Evaluation framework for pathology foundation models},
542
+ author={kaiko.ai and Ioannis Gatopoulos and Nicolas K{\"a}nzig and Roman Moser and Sebastian Ot{\'a}lora},
543
+ booktitle={Medical Imaging with Deep Learning},
544
+ year={2024},
545
+ url={https://openreview.net/forum?id=FNBQOPj18N}
546
+ }
547
+ ```
548
+
549
+ <br />
550
+
551
+ <div align="center">
552
+ <img src="https://github.com/kaiko-ai/eva/blob/main/docs/images/kaiko-logo.png?raw=true" width="200">
553
+ </div>