kaiko-eva 0.2.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. eva/core/data/datasets/base.py +7 -2
  2. eva/core/models/modules/head.py +4 -2
  3. eva/core/models/modules/typings.py +2 -2
  4. eva/core/models/transforms/__init__.py +2 -1
  5. eva/core/models/transforms/as_discrete.py +57 -0
  6. eva/core/models/wrappers/_utils.py +121 -1
  7. eva/core/utils/suppress_logs.py +28 -0
  8. eva/vision/data/__init__.py +2 -2
  9. eva/vision/data/dataloaders/__init__.py +5 -0
  10. eva/vision/data/dataloaders/collate_fn/__init__.py +5 -0
  11. eva/vision/data/dataloaders/collate_fn/collection.py +22 -0
  12. eva/vision/data/datasets/__init__.py +2 -2
  13. eva/vision/data/datasets/classification/bach.py +3 -4
  14. eva/vision/data/datasets/classification/bracs.py +3 -4
  15. eva/vision/data/datasets/classification/breakhis.py +3 -4
  16. eva/vision/data/datasets/classification/camelyon16.py +4 -5
  17. eva/vision/data/datasets/classification/crc.py +3 -4
  18. eva/vision/data/datasets/classification/gleason_arvaniti.py +3 -4
  19. eva/vision/data/datasets/classification/mhist.py +3 -4
  20. eva/vision/data/datasets/classification/panda.py +4 -5
  21. eva/vision/data/datasets/classification/patch_camelyon.py +3 -4
  22. eva/vision/data/datasets/classification/unitopatho.py +3 -4
  23. eva/vision/data/datasets/classification/wsi.py +6 -5
  24. eva/vision/data/datasets/segmentation/__init__.py +2 -2
  25. eva/vision/data/datasets/segmentation/_utils.py +47 -0
  26. eva/vision/data/datasets/segmentation/bcss.py +7 -8
  27. eva/vision/data/datasets/segmentation/btcv.py +236 -0
  28. eva/vision/data/datasets/segmentation/consep.py +6 -7
  29. eva/vision/data/datasets/segmentation/lits.py +9 -8
  30. eva/vision/data/datasets/segmentation/lits_balanced.py +2 -1
  31. eva/vision/data/datasets/segmentation/monusac.py +4 -5
  32. eva/vision/data/datasets/segmentation/total_segmentator_2d.py +12 -10
  33. eva/vision/data/datasets/vision.py +95 -4
  34. eva/vision/data/datasets/wsi.py +5 -5
  35. eva/vision/data/transforms/__init__.py +22 -3
  36. eva/vision/data/transforms/common/__init__.py +1 -2
  37. eva/vision/data/transforms/croppad/__init__.py +11 -0
  38. eva/vision/data/transforms/croppad/crop_foreground.py +110 -0
  39. eva/vision/data/transforms/croppad/rand_crop_by_pos_neg_label.py +109 -0
  40. eva/vision/data/transforms/croppad/spatial_pad.py +67 -0
  41. eva/vision/data/transforms/intensity/__init__.py +11 -0
  42. eva/vision/data/transforms/intensity/rand_scale_intensity.py +59 -0
  43. eva/vision/data/transforms/intensity/rand_shift_intensity.py +55 -0
  44. eva/vision/data/transforms/intensity/scale_intensity_ranged.py +56 -0
  45. eva/vision/data/transforms/spatial/__init__.py +7 -0
  46. eva/vision/data/transforms/spatial/flip.py +72 -0
  47. eva/vision/data/transforms/spatial/rotate.py +53 -0
  48. eva/vision/data/transforms/spatial/spacing.py +69 -0
  49. eva/vision/data/transforms/utility/__init__.py +5 -0
  50. eva/vision/data/transforms/utility/ensure_channel_first.py +51 -0
  51. eva/vision/data/tv_tensors/__init__.py +5 -0
  52. eva/vision/data/tv_tensors/volume.py +61 -0
  53. eva/vision/metrics/segmentation/monai_dice.py +9 -2
  54. eva/vision/models/modules/semantic_segmentation.py +28 -20
  55. eva/vision/models/networks/backbones/__init__.py +9 -2
  56. eva/vision/models/networks/backbones/pathology/__init__.py +11 -2
  57. eva/vision/models/networks/backbones/pathology/bioptimus.py +47 -1
  58. eva/vision/models/networks/backbones/pathology/hkust.py +69 -0
  59. eva/vision/models/networks/backbones/pathology/kaiko.py +18 -0
  60. eva/vision/models/networks/backbones/radiology/__init__.py +11 -0
  61. eva/vision/models/networks/backbones/radiology/swin_unetr.py +231 -0
  62. eva/vision/models/networks/backbones/radiology/voco.py +75 -0
  63. eva/vision/models/networks/decoders/segmentation/__init__.py +6 -2
  64. eva/vision/models/networks/decoders/segmentation/linear.py +5 -10
  65. eva/vision/models/networks/decoders/segmentation/semantic/__init__.py +8 -1
  66. eva/vision/models/networks/decoders/segmentation/semantic/swin_unetr.py +104 -0
  67. eva/vision/utils/io/__init__.py +2 -0
  68. eva/vision/utils/io/nifti.py +91 -11
  69. {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.1.dist-info}/METADATA +3 -1
  70. {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.1.dist-info}/RECORD +73 -57
  71. {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.1.dist-info}/WHEEL +1 -1
  72. eva/vision/data/datasets/classification/base.py +0 -96
  73. eva/vision/data/datasets/segmentation/base.py +0 -96
  74. eva/vision/data/transforms/common/resize_and_clamp.py +0 -51
  75. eva/vision/data/transforms/normalization/__init__.py +0 -6
  76. eva/vision/data/transforms/normalization/clamp.py +0 -43
  77. eva/vision/data/transforms/normalization/functional/__init__.py +0 -5
  78. eva/vision/data/transforms/normalization/functional/rescale_intensity.py +0 -28
  79. eva/vision/data/transforms/normalization/rescale_intensity.py +0 -53
  80. eva/vision/metrics/segmentation/BUILD +0 -1
  81. eva/vision/models/networks/backbones/torchhub/__init__.py +0 -5
  82. eva/vision/models/networks/backbones/torchhub/backbones.py +0 -61
  83. {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.1.dist-info}/entry_points.txt +0 -0
  84. {kaiko_eva-0.2.0.dist-info → kaiko_eva-0.2.1.dist-info}/licenses/LICENSE +0 -0
@@ -23,7 +23,7 @@ eva/core/data/datamodules/call.py,sha256=jjj9w3UXYuQB-qyCcw1EZpRJW10OC1I3dvgvsuQ
23
23
  eva/core/data/datamodules/datamodule.py,sha256=_pK59oXDe53oDkmv6eoJUvfl44WlFkrbC8KXSRMs_20,5533
24
24
  eva/core/data/datamodules/schemas.py,sha256=rzcf3uow6T6slVSwxEGDVmpi3QUvkiDoT_gCF3aMAEE,2262
25
25
  eva/core/data/datasets/__init__.py,sha256=jWPxT3gjQjwS6HqVZAb7KhMgzgklPgHeH51iPxDh_Tg,493
26
- eva/core/data/datasets/base.py,sha256=BLzlRFuByhrGmI7NFwn7-Tw0vpSYSRhl2Y65iX4KaMw,2526
26
+ eva/core/data/datasets/base.py,sha256=w8c9Jh3DiXVfBEdLfWpdE190lPAeOFqCQlXl7RqlDOU,2671
27
27
  eva/core/data/datasets/classification/__init__.py,sha256=wJ2jD9YODftt-dMcMf0TbCjJt47qXYBKkD4-XXajvRQ,340
28
28
  eva/core/data/datasets/classification/embeddings.py,sha256=hBO6dIRHAhoCaYb3ANc9fgvdBjyQNKPTrIhjc9y8-Ys,1108
29
29
  eva/core/data/datasets/classification/multi_embeddings.py,sha256=4hQy4741NDKqWCpm3kGq7aC28DF5gcwUuIpYhnbTyeM,4601
@@ -73,21 +73,22 @@ eva/core/metrics/structs/schemas.py,sha256=ZaSrx0j_NfIwT7joMUD1LyrKdAXTLaeSzWYTH
73
73
  eva/core/metrics/structs/typings.py,sha256=qJd-FiD2IhJgBeo8FyP0vpVUIH4RKb1k6zYvHtjUA04,388
74
74
  eva/core/models/__init__.py,sha256=T6Fo886LxMj-Y58_ylzkPkFSnFR2aISiMIbuO_weC4s,430
75
75
  eva/core/models/modules/__init__.py,sha256=QJWJ42BceXZBzDGgk5FHBcCaRrB9egTFKVF6gDsBYfM,255
76
- eva/core/models/modules/head.py,sha256=2rPlo2Osuq77gjrJmvQKCvNTaawvQRirK2CM2o24_xs,5184
76
+ eva/core/models/modules/head.py,sha256=bZ45RBPi3N8sjvKyt2_TeKWI1eB6GyBeGzV6J11ERO8,5225
77
77
  eva/core/models/modules/inference.py,sha256=ih-0Rr2oNf2N6maiXPOW7XH5KVwUT1_MOxnJKOhJ1uQ,978
78
78
  eva/core/models/modules/module.py,sha256=LtjYxTZb7UY0owonmt_yQ5EySw3sX-xD9HLN2io8EK4,6697
79
- eva/core/models/modules/typings.py,sha256=yFMJCE4Nrfd8VEXU1zk8p6Sz5M7UslwitYPVC2OPLSY,776
79
+ eva/core/models/modules/typings.py,sha256=LPR8JdIid2gJZpjMG1FcH5OZ60JlFOj_LupIh__2k_8,803
80
80
  eva/core/models/modules/utils/__init__.py,sha256=ScLCHwQfzlg_UsHVi5sf_SavUkh9secwtRn_umC_qA8,325
81
81
  eva/core/models/modules/utils/batch_postprocess.py,sha256=RwnDcjJy3uvVirpgx_80Q2CUYKfJKipVwjyX7AF2CKw,3088
82
82
  eva/core/models/modules/utils/checkpoint.py,sha256=Zp42rtmjgUC4VUMwFyG5aar-E0Hc5i7qUsxkV7AVKkE,700
83
83
  eva/core/models/modules/utils/grad.py,sha256=bl8qb8g4Nhg1KAGfbEV_9HTKkoT0azRwfs9KGX9swGs,706
84
84
  eva/core/models/networks/__init__.py,sha256=yqx6UmG1Eg3vb1O_tnK_axnJWabEl9ULkDWiPN440Xc,85
85
85
  eva/core/models/networks/mlp.py,sha256=thk-x4pviE3fCaMW9k3I2Oe5_DxfC-CqUrtolvVdXug,2418
86
- eva/core/models/transforms/__init__.py,sha256=oYL3gNUUKZFViTu6GT1jVE2Kv1xFYPuyiYp-sErtVVg,257
86
+ eva/core/models/transforms/__init__.py,sha256=AOy_2VY3ITLRk2PMqe6xfErvV7V2_XsnPQwEMhovxOU,333
87
+ eva/core/models/transforms/as_discrete.py,sha256=1w2NmcLzEuyPhaoVXl6jZTdblk7DPf6W6gQ_qi96hQM,1901
87
88
  eva/core/models/transforms/extract_cls_features.py,sha256=tFRd4H-eGFIGCfZt6wuZGibDmAoNXKSsn15bBw0IDdc,1482
88
89
  eva/core/models/transforms/extract_patch_features.py,sha256=k50jTLPWxbfvciH9QZSzTAGqWwDSVpXAteme_Qg2d6E,2202
89
90
  eva/core/models/wrappers/__init__.py,sha256=jaiANQdbO-IPgH8U-Y0ftFsuuCAM5i5KuYRHauKw5k8,450
90
- eva/core/models/wrappers/_utils.py,sha256=HXUyGcILaa8GK31ViIHCKRU4f9kbjAPYQmhvN2N7jSc,957
91
+ eva/core/models/wrappers/_utils.py,sha256=ZWe9Ih_0kH5Wg_AQAtAn77LZ_CODAve5u3G12ifLNsc,4902
91
92
  eva/core/models/wrappers/base.py,sha256=xKMUSXk93wI67p_wmh7jujK-bxvIefO1noYaAJN_5Ak,1359
92
93
  eva/core/models/wrappers/from_function.py,sha256=_vKBwtfilCNCnOaJTofE6l5bM2K3qJ8GyBT-0CM5FXY,1831
93
94
  eva/core/models/wrappers/from_torchhub.py,sha256=OAImGKRG4pfDXHsoriykC_iiO8QvK3nAWnQCE0mIGuk,3285
@@ -109,6 +110,7 @@ eva/core/utils/multiprocessing.py,sha256=BWX8AW_KPLgIIlbsPG1kYdtbHPx6Dklw13bu4u8
109
110
  eva/core/utils/operations.py,sha256=eoC_ScuHUMDCuk08j1bosiQZdPrgiIODqqheR9MtJHQ,641
110
111
  eva/core/utils/parser.py,sha256=2czmwEGJJ6PtmaD86s9I14P-_sek4DmDCkEatRGT5sI,725
111
112
  eva/core/utils/progress_bar.py,sha256=KvvsM_v3_Fhb4JvbEEPHb4PJMokg6mNLj-o6dkfzcMc,499
113
+ eva/core/utils/suppress_logs.py,sha256=pOk1076J0mKWn2lgDqEVC1g65FXhA_2IkC4LBEWhnwQ,902
112
114
  eva/core/utils/workers.py,sha256=hfx63M82qNg0Dwhre2tl53MnhtRsV7APaDONM9nhVB8,634
113
115
  eva/vision/__init__.py,sha256=oUZXFYjwtkWzi8An0uS5Xc84pLKintlXe2iti8zW6BQ,480
114
116
  eva/vision/callbacks/__init__.py,sha256=su1V73L0dDVYWSyvV_lnWbszDi2KikRraF7OsgeaKl4,139
@@ -116,46 +118,58 @@ eva/vision/callbacks/loggers/__init__.py,sha256=td1JRJbE08nsGIZdO64_yLC3FUuMDp0k
116
118
  eva/vision/callbacks/loggers/batch/__init__.py,sha256=DVYP7Aonbi4wg_ERHRj_8kb87Ee_75wRZzdduJ_icQk,173
117
119
  eva/vision/callbacks/loggers/batch/base.py,sha256=hcAd5iiHvjZ0DIf4Qt4ENT54D6ky_1OO4rKQZqeo-1k,3628
118
120
  eva/vision/callbacks/loggers/batch/segmentation.py,sha256=GYh2kfexW5pUZ0BdApYJI3e8xsuNkjIzkj5jnuKtHR4,6886
119
- eva/vision/data/__init__.py,sha256=aoKPmX8P2Q2k2W3nlq8vFU41FV6Sze-0SDuWtU-ETh4,111
120
- eva/vision/data/datasets/__init__.py,sha256=wvbkhBv_yS7hHMdMR-QpNHMkGoGzSL0L33XaXUwXTpM,1040
121
+ eva/vision/data/__init__.py,sha256=zuLOC8ExyeQGlwib1LB70RedrTC9w0siOhFTQIRb0V8,137
122
+ eva/vision/data/dataloaders/__init__.py,sha256=7AOD_UF3hMokrGxJ2tbawH44ujQaesDsaW-3HWorYv8,128
123
+ eva/vision/data/dataloaders/collate_fn/__init__.py,sha256=GCvJaeILmAc_-lhGw8yzj2cC2KG4i1PvSWAyVzPKvVo,146
124
+ eva/vision/data/dataloaders/collate_fn/collection.py,sha256=45s9fKjVBnqfnuGWmJZMtt_DDGnfuf7qkWe0QmxXMKo,611
125
+ eva/vision/data/datasets/__init__.py,sha256=NW034jrOnpDwYBQfsTjWG6jDJY_dPWKV-cq37HkBm10,1014
121
126
  eva/vision/data/datasets/_utils.py,sha256=epPcaYE4w2_LtUKLLQJh6qQxUNVBe22JA06k4WUerYQ,1430
122
127
  eva/vision/data/datasets/_validators.py,sha256=77WZj8ewsuxUjW5WegJ-7zDuR6WdF5JbaOYdywhKIK4,2594
123
128
  eva/vision/data/datasets/classification/__init__.py,sha256=5fOGZxKGPeMCf3Jd9qAOYADPrkZnYg97_QE4DC79AMI,1074
124
- eva/vision/data/datasets/classification/bach.py,sha256=kZba1dQlJWZAmA03akJ4fVUU-y9W8ezOwlgs2zL-QrE,5432
125
- eva/vision/data/datasets/classification/base.py,sha256=Ci0HoOhOuHwICTi1TUGA1PwZe642RywolTVfMhKrFHk,2772
126
- eva/vision/data/datasets/classification/bracs.py,sha256=e9SqnQ_HVm9ypQiwsFi5tbngqs0yEZsfVBk3pt91W80,3347
127
- eva/vision/data/datasets/classification/breakhis.py,sha256=_rzGx5IgJSW73es7Gusr_oOzI1jHCPhRH8yRvqcmuqw,6905
128
- eva/vision/data/datasets/classification/camelyon16.py,sha256=sChvRo0jbOVUMJvfpsFxgFOsYgci3v9wjeMBEjUysJU,8287
129
- eva/vision/data/datasets/classification/crc.py,sha256=8qjz9OklLg1gAr46RKZdlClmlO9awwfp0dkTs8v5jTE,5670
130
- eva/vision/data/datasets/classification/gleason_arvaniti.py,sha256=CCXeBA3dlic7ZRiarf4_f76qkct8PMNM_tCfz3IRUPA,5893
131
- eva/vision/data/datasets/classification/mhist.py,sha256=xzShPncSfAV6Q5ojfimeq748MfA0n77fGWa9EpdRzYU,3055
132
- eva/vision/data/datasets/classification/panda.py,sha256=BU_gDoX3ZSDUugwaO2n0XSZhzseK1rkPoHMRoJLGL84,7303
133
- eva/vision/data/datasets/classification/patch_camelyon.py,sha256=fElKteZKx4M6AjylnhhgNH1jewHegWc1K8h4FFKp0gE,7171
134
- eva/vision/data/datasets/classification/unitopatho.py,sha256=vC-dFbhETfDD9paTeQ73Dg1vLPWsK12AfpiBFznESaM,5151
135
- eva/vision/data/datasets/classification/wsi.py,sha256=x3mQ8iwyiSdfQOjJuV7_cd8-LRjjhY9tjtzuD8O87Lg,4099
136
- eva/vision/data/datasets/segmentation/__init__.py,sha256=hGNr7BM_StxvmlOKWWfHp615qgsrB6BB3qMOiYhE0Og,791
129
+ eva/vision/data/datasets/classification/bach.py,sha256=Qzkb0aUNR8yMXwjDx8RmIfvPYVmlUW3dippMKrIVqzU,5410
130
+ eva/vision/data/datasets/classification/bracs.py,sha256=FYe33SmdlFOAl1Ef10uXN7Y8kvlbKuWBqIycFPHtMMU,3325
131
+ eva/vision/data/datasets/classification/breakhis.py,sha256=e01gguDCHvp6U18exCm5svWr8EpM_HLbLAq9KJifkpo,6883
132
+ eva/vision/data/datasets/classification/camelyon16.py,sha256=7E0ju4cctUyprBK063TVXLBN4Fp6cKMICoquv3e5JhQ,8261
133
+ eva/vision/data/datasets/classification/crc.py,sha256=sv18Lw4iUqGkYXEQN-kyZV_Foc_X15praVR4nN_klkg,5648
134
+ eva/vision/data/datasets/classification/gleason_arvaniti.py,sha256=z1OQlxZYx-n3S2wcfu-GuchySRw0E70PURJDsvejFjE,5871
135
+ eva/vision/data/datasets/classification/mhist.py,sha256=I-guWIucQZBHdSx-TWP24NXHf9IA9lU4hyfPZbJop0g,3033
136
+ eva/vision/data/datasets/classification/panda.py,sha256=HVfCvByyajdo5o_waqTpzZWCbQXQqPjvvyS5I0NAvns,7277
137
+ eva/vision/data/datasets/classification/patch_camelyon.py,sha256=1yXkfP680qxkQUFAPKRFbZv0cHAFx23s2vvT9th2nKM,7149
138
+ eva/vision/data/datasets/classification/unitopatho.py,sha256=IO3msEsuOnmdcYZxF-eBpo0K97y54rWFmCb_KxuF4bk,5129
139
+ eva/vision/data/datasets/classification/wsi.py,sha256=YMGxU8ECjudizt_uXUevuPS8k66HxtEQ7M2IZJmL6kE,4079
140
+ eva/vision/data/datasets/segmentation/__init__.py,sha256=YA7qx4B-pfsILfONa2AfIQHKzDnv8l0sHwjsSXa5-vQ,765
137
141
  eva/vision/data/datasets/segmentation/_total_segmentator.py,sha256=DTaQaAisY7j1h0-zYk1_81Sr4b3D9PTMieYX0PMPtIc,3127
138
- eva/vision/data/datasets/segmentation/_utils.py,sha256=ps1qpuEkPgvwUw6H-KKaLaYqDBGmN7dNGk3bnS1l6sI,1261
139
- eva/vision/data/datasets/segmentation/base.py,sha256=11IMODMB7KJ8Bs5p7MyOsBXCyPFJXfYcDLAIMitUwEk,3023
140
- eva/vision/data/datasets/segmentation/bcss.py,sha256=NHjHd1tgIfIw6TxsZTGb63iMEwXFbWX_JAwRT5WVsj4,8274
141
- eva/vision/data/datasets/segmentation/consep.py,sha256=Pw3LvVIK2scj_ys7rVNRb9B8snP8HlDIAbaI3v6ObQk,6056
142
+ eva/vision/data/datasets/segmentation/_utils.py,sha256=aXUHrnbefP6-OgSvDQHqssFKhUwETul_8aosqYiOfm8,3065
143
+ eva/vision/data/datasets/segmentation/bcss.py,sha256=rqk6VqK0QCHLFnMnDuHd1JPJVK5_C6WnsmnNSKBw6Uo,8230
144
+ eva/vision/data/datasets/segmentation/btcv.py,sha256=GNgr8pLx7uvZ2pxnYZ8N9SfB9luduMTM9IQ1OHPgBxI,8257
145
+ eva/vision/data/datasets/segmentation/consep.py,sha256=SBH1vD3RjFNRMVeo07d2PqSAInZsWHR2d3xCpCoDVpM,6014
142
146
  eva/vision/data/datasets/segmentation/embeddings.py,sha256=RsTuAwGEJPnWPY7q3pwcjmqtEj0wtRBNRBD4a0RcGtA,1218
143
- eva/vision/data/datasets/segmentation/lits.py,sha256=cBRU5lkiTMAi_ZwyDQUN3ODyXUlLtuMWFLPDajcZnOo,7194
144
- eva/vision/data/datasets/segmentation/lits_balanced.py,sha256=s5kPfqB41Vkcm5Jh34mLAO0NweMSIlV2fMXJsRjJsF8,3384
145
- eva/vision/data/datasets/segmentation/monusac.py,sha256=OTWHAD1b48WeT6phVf466w_nJUOGdBCGKWiWw68PAdw,8423
146
- eva/vision/data/datasets/segmentation/total_segmentator_2d.py,sha256=A6A_lXmGDfV_9Mcp9KSgN6K8Q0T8XXjv6lT4I7iLUcw,16833
147
+ eva/vision/data/datasets/segmentation/lits.py,sha256=AsKsTQZBNXlYU_UllBrdr04rS8K4TDkG_vqR-aVr5ik,7267
148
+ eva/vision/data/datasets/segmentation/lits_balanced.py,sha256=OQ2AK6-wLE0uMvgQJtfBJTUJqS_WBfmsJXgBfe4gU8A,3451
149
+ eva/vision/data/datasets/segmentation/monusac.py,sha256=iv9-MFaTsGfGV1u6_lQNcSEeSpmVBDQC1Oa123iEtu0,8410
150
+ eva/vision/data/datasets/segmentation/total_segmentator_2d.py,sha256=3cWpJkZmJ7IUJhigw69YLFOg2_-yzXSLGXqWVPUsn8Y,16978
147
151
  eva/vision/data/datasets/structs.py,sha256=RaTDW-B36PumcR5gymhCiX-r8GiKqIFcjqoEEjjFyUE,389
148
- eva/vision/data/datasets/vision.py,sha256=RHcBBNTd5u1OB6l2iA5V8pv8kjZsTehi9At7J-FVqr4,657
149
- eva/vision/data/datasets/wsi.py,sha256=-rypkcd6CPBM_oPuLszUx9q4zSPzeO1H6JKqvOtLlHw,8282
150
- eva/vision/data/transforms/__init__.py,sha256=WeFii6JwB0CiOOGLR3tkgAoKgRdmOf2lm0Dadixn8OI,260
151
- eva/vision/data/transforms/common/__init__.py,sha256=6tvxUgb8wfhgvqejMVulwqssHTJLF7f4_vpf44kxgxY,234
152
- eva/vision/data/transforms/common/resize_and_clamp.py,sha256=f9-YIX0S9GMAXHP7TWlyRlGfZIVvHgoBHqQ8PzaKbKs,1736
152
+ eva/vision/data/datasets/vision.py,sha256=-_WRiyICMgqABR6Ay_RKBMfsPGwgx9MQfCA7WChHo24,3219
153
+ eva/vision/data/datasets/wsi.py,sha256=dEAT_Si_Qb3qdSovUPeoiWeoPb7m-NGYqq44e3UXHk8,8384
154
+ eva/vision/data/transforms/__init__.py,sha256=Bv1aPvjahteAZzVGSuxzHz2LRwa63NV7IcoPzKUt_fY,720
155
+ eva/vision/data/transforms/common/__init__.py,sha256=ZHzpdr-THc9CgFFbAVMWUiZrUNUiHnCDM8GYhM7tMfU,138
153
156
  eva/vision/data/transforms/common/resize_and_crop.py,sha256=GI1HTkbJ9qg4p8c6vk_XkXO0Qi6mBeUeiZIA0jVtmAw,1360
154
- eva/vision/data/transforms/normalization/__init__.py,sha256=0MZ1KphOr6LxBCOBn7LZ8H8M6-0CuFqvynTON5pedxg,240
155
- eva/vision/data/transforms/normalization/clamp.py,sha256=B-QyMCFEJPiJagpPr7JhrzOJMVuUB-D_qrmjvthJTyE,1412
156
- eva/vision/data/transforms/normalization/functional/__init__.py,sha256=ICg611_heHCiNxTNoteFX2MTav59fv7vLkTM8c4eS3w,194
157
- eva/vision/data/transforms/normalization/functional/rescale_intensity.py,sha256=ihJdDRogrJbvFpb8LcPdRzCFWdlMcBTpWD5RY2MOPbE,844
158
- eva/vision/data/transforms/normalization/rescale_intensity.py,sha256=BNzDeyzT0GG_FBtlZauCL-K4E_KVWH9SzTSN1SsFNJw,1756
157
+ eva/vision/data/transforms/croppad/__init__.py,sha256=d36WGe9x39p-d7VymRM29qdquv8YEa0RfsTfwt7Cou4,375
158
+ eva/vision/data/transforms/croppad/crop_foreground.py,sha256=3o27nOgxfRo8ap45lpmnaiAIZ08kdyp14vYpr4BC8zc,4865
159
+ eva/vision/data/transforms/croppad/rand_crop_by_pos_neg_label.py,sha256=8CwMYAOQgOIb1Uw1jc219aqY3s2tCWd6r-2nU7kqOLc,5538
160
+ eva/vision/data/transforms/croppad/spatial_pad.py,sha256=j5V2vvgGcf75GzGyAT7mGgpvlEOS2BnAcThRdt7Und4,2857
161
+ eva/vision/data/transforms/intensity/__init__.py,sha256=mNp6pi0pnHcA24kQuiGHzMb4XLRaR0Lgi-Vb7Nl-Aoo,408
162
+ eva/vision/data/transforms/intensity/rand_scale_intensity.py,sha256=DDcFWTmq5UbwISO9qGIPOQJ72rx7JQWtVi2OxggLzyE,2041
163
+ eva/vision/data/transforms/intensity/rand_shift_intensity.py,sha256=9YNREhRoCzLOt2C21daum62cbB53ZRcYOSuSW_Jz7eQ,1974
164
+ eva/vision/data/transforms/intensity/scale_intensity_ranged.py,sha256=VLvYZYG6jQCuR5poJsAlhIFjw6VjPEpcDPKBlJTjYBM,1873
165
+ eva/vision/data/transforms/spatial/__init__.py,sha256=k7C_p4fMZd7A00ikldAMsprYDedKrlMjKQB6BLA5InA,284
166
+ eva/vision/data/transforms/spatial/flip.py,sha256=jfRc-wPBvG58OtCNU3GrOkb57kcRddRqpwcAdCB0_No,2553
167
+ eva/vision/data/transforms/spatial/rotate.py,sha256=FpMTAPWtgrG10yQ3R1_Ii6obPcn3boNWOuLhsblxUbQ,1793
168
+ eva/vision/data/transforms/spatial/spacing.py,sha256=T1UhqK-OhhbLQxzejMyI8BQzYRF44PNc02Qap4nk1hY,2695
169
+ eva/vision/data/transforms/utility/__init__.py,sha256=TjncS2aOgRJwjjRuIvmr4eRz2nKVg6b76tThp4UlzII,163
170
+ eva/vision/data/transforms/utility/ensure_channel_first.py,sha256=jpnV7oWel1ZSL2VUf3wUdbB8xM2OFD8R6xpHcPCJVgw,1945
171
+ eva/vision/data/tv_tensors/__init__.py,sha256=qla_QYWN52vP0IlTmHlTZF4kLh9xj-Zy-WxQgXakYyk,125
172
+ eva/vision/data/tv_tensors/volume.py,sha256=VlWTIbswNv-aUqEWd1EJgoqEH60d-gNALPG815TD_W8,2381
159
173
  eva/vision/data/wsi/__init__.py,sha256=vfSfyogsj4OS1sGKfsYWyj2O5ZMT9iqkc1lvcuZJVGI,422
160
174
  eva/vision/data/wsi/backends/__init__.py,sha256=wX7cjeT7ktX8sH6lRDEEU5cgRKLH6RhPyey16aJthJ4,2251
161
175
  eva/vision/data/wsi/backends/base.py,sha256=0oFzMc3zklLyqyD_kzDKekydeFyDReqjBBj1qJLdM9Y,4094
@@ -177,45 +191,47 @@ eva/vision/metrics/__init__.py,sha256=zXOc1Idgfk86CGE5yBHn3B22iD5tRyfl4H-kTSB2dC
177
191
  eva/vision/metrics/defaults/__init__.py,sha256=ncQ9uH5q5SpfalyPX6dINPRLk34HLw6z9u8ny_HHbFQ,174
178
192
  eva/vision/metrics/defaults/segmentation/__init__.py,sha256=ve6dwyfhJGYBYKS6l6OySCBs32JnEBFnvhAyNvj-Uqo,191
179
193
  eva/vision/metrics/defaults/segmentation/multiclass.py,sha256=MUBp-PIyiJB2VVV_NintRrP7Ha2lJ75_3xvqSdeDYwE,2855
180
- eva/vision/metrics/segmentation/BUILD,sha256=Nf7BYWWe1USoFEIsIiEVZ8sa05J5FPkMJ-UIMDLrU8o,17
181
194
  eva/vision/metrics/segmentation/__init__.py,sha256=7iz3fFNd-iBuNyxdeSfsgp6D7oZtmPsbyA0ZKRzzRCw,402
182
195
  eva/vision/metrics/segmentation/_utils.py,sha256=_ubv2sP1-f_dLKy8Y4wLkj5ed56fAFLURfv1shQWVcs,2402
183
196
  eva/vision/metrics/segmentation/dice.py,sha256=H_U6XSZcieX0xb6aptxxW1s-Jshs8Lp4P1SAwjdwntM,2905
184
197
  eva/vision/metrics/segmentation/generalized_dice.py,sha256=T57An-lBVefnlv6dIWVRNghFxy0e0K470xwla0TbCSk,2436
185
198
  eva/vision/metrics/segmentation/mean_iou.py,sha256=2PjqTa_VAtnW4nxHzT93uBKgnml7INU-wt_jR68RM54,2104
186
- eva/vision/metrics/segmentation/monai_dice.py,sha256=febnvA2gtTyydLZMwjQBS1zq2NjZcsXf0EcV0eRn8Aw,2117
199
+ eva/vision/metrics/segmentation/monai_dice.py,sha256=I_DX6r4y5d9QzxI3WyMV14uwt1uqrKlRqbNHqGMtmy0,2421
187
200
  eva/vision/metrics/wrappers/__init__.py,sha256=V4z3hradMa6CQgTkk1bc2cbZzCgcoIYw7-hufMK3D_4,128
188
201
  eva/vision/metrics/wrappers/monai.py,sha256=FNa1yHN2U3vO6BGqS0BFm8uJAL6DCzSE4XOFCV4aBjg,885
189
202
  eva/vision/models/__init__.py,sha256=a-P6JL73A3miHQnqgqUz07XtVmQB_o4DqPImk5rEATo,275
190
203
  eva/vision/models/modules/__init__.py,sha256=vaM_V6OF2s0lYjralP8dzv8mAtv_xIMZItfXgz0NZg8,156
191
- eva/vision/models/modules/semantic_segmentation.py,sha256=PSeqm5h6YgbzQ0jA9lUexGYUE3ehfWx-LH1NgZ7cGhw,7300
204
+ eva/vision/models/modules/semantic_segmentation.py,sha256=eXRx7wXKDLqMYHGj9IH_6WxlQNYaYEU6J70soVFedp0,7629
192
205
  eva/vision/models/networks/__init__.py,sha256=j43IurizNlAyKPH2jwDHaeq49L2QvwbHWqUaptA1mG4,100
193
206
  eva/vision/models/networks/abmil.py,sha256=N1eH4fn1nXmgXurSQyQIxxonv7nsqeeuPWaQSHeltfs,6796
194
- eva/vision/models/networks/backbones/__init__.py,sha256=CvK0sHKufUq4chwX-p2cFVBZFReMuwmeHFTG5LUA6CM,318
207
+ eva/vision/models/networks/backbones/__init__.py,sha256=mvYVtmJOvYLCXDX52hP6dzQxj9cQikwSeBZvEDNyNmU,347
195
208
  eva/vision/models/networks/backbones/_utils.py,sha256=V7xeod4mElEuuO1TRW0xJE051cUyS1Saraw3-KcK1Mw,1667
196
- eva/vision/models/networks/backbones/pathology/__init__.py,sha256=goR59h8bfzd-Wa3rxPPdaSlAOH_df8SHBkTSKi08TS8,1147
197
- eva/vision/models/networks/backbones/pathology/bioptimus.py,sha256=wUSKjYgxcRV3FRHGaPwF1uRAQcGO0rHNHGmK1QDJXk4,991
209
+ eva/vision/models/networks/backbones/pathology/__init__.py,sha256=JZ1mhKm4w89JTrXDfTM02OyFWtDuxRhhvpytDk_t500,1386
210
+ eva/vision/models/networks/backbones/pathology/bioptimus.py,sha256=NrS0WJqiJKjDYT3odQGLPgnzMuCbJfWoW1Dal-L9F50,2626
198
211
  eva/vision/models/networks/backbones/pathology/gigapath.py,sha256=mfGXtKhY7XLpKQQAFNVZYsM-aeHCEbOVUrxpAEOr-l8,955
199
212
  eva/vision/models/networks/backbones/pathology/histai.py,sha256=X_we3U7GK91RrXyOX2PJB-YFDF2ozdL2fzZhNxm9SVU,1914
200
- eva/vision/models/networks/backbones/pathology/kaiko.py,sha256=GSdBG4WXrs1PWB2hr-sy_dFe2riwpPKwHx71esDoVfE,3952
213
+ eva/vision/models/networks/backbones/pathology/hkust.py,sha256=bZpzx7EvK4CVefNnJmyz-2Ta-WdYDwEDzf-zWoZkoCQ,2308
214
+ eva/vision/models/networks/backbones/pathology/kaiko.py,sha256=lVzgWhgFn1iOlfSSxsX2cH16rrFQFjzdaF6_HS1y-6c,4517
201
215
  eva/vision/models/networks/backbones/pathology/lunit.py,sha256=ku4lr9pWeeHatHN4x4OVgwlve9sVqiRqIbgI0PXLiqg,2160
202
216
  eva/vision/models/networks/backbones/pathology/mahmood.py,sha256=VYoVWrMNkoaEqa0och-GbwGd0VISQmbtzk1dSBZ1M0I,2464
203
217
  eva/vision/models/networks/backbones/pathology/owkin.py,sha256=uWJV5fgY7UZX6ilgGzkPY9fnlOiF03W7E8rc9TmlHGg,1231
204
218
  eva/vision/models/networks/backbones/pathology/paige.py,sha256=MjOLgdEKk8tdAIpCiHelasGwPE7xgzaooW6EE7IsuEE,1642
219
+ eva/vision/models/networks/backbones/radiology/__init__.py,sha256=pD8ijQZRaX_Lu3tPBV73qUVaAURDrB_2pEyyBdRZmis,294
220
+ eva/vision/models/networks/backbones/radiology/swin_unetr.py,sha256=n5lJkoKjxKogs5Q_XuKh7Q5J96Bgln5W4ShL-VwSZXs,7976
221
+ eva/vision/models/networks/backbones/radiology/voco.py,sha256=sICZnsxQYnqYEmauhB6CBmaqpzBoAB6CpXJjNm5FesI,2464
205
222
  eva/vision/models/networks/backbones/registry.py,sha256=anjILtEHHB6Ltwiw22h1bsgWtIjh_l5_fkPh87K7-d0,1631
206
223
  eva/vision/models/networks/backbones/timm/__init__.py,sha256=cZH3av9gIZcvEVD0rwKsI-MEq7zPqaW4dQ0E05CksvQ,128
207
224
  eva/vision/models/networks/backbones/timm/backbones.py,sha256=fCTiwqU6NhQ-ccAMzmpPDddXkFzRAB3mw4lcQ9um_PU,1646
208
- eva/vision/models/networks/backbones/torchhub/__init__.py,sha256=zBLJBvkwKJ1jD7M3Wt5BE6Cx-R8G2YRoyPG7p2V-3nQ,147
209
- eva/vision/models/networks/backbones/torchhub/backbones.py,sha256=hgCCoP8AdRSsli0w9a_PRNB-UR36-SLLhBIW0BFrkdE,1911
210
225
  eva/vision/models/networks/backbones/universal/__init__.py,sha256=MAlkALSJ2_w6spSbB7NmKlL0Jsk1YKEycatdI0xO0_I,252
211
226
  eva/vision/models/networks/backbones/universal/vit.py,sha256=kpUCoXpefR34hRNlQDFK9lGr4oqS8Mn5vTLKWZ-gaOs,1820
212
227
  eva/vision/models/networks/decoders/__init__.py,sha256=RXFWmoYw2i6E9VOUCJmU8c72icHannVuo-cUKy6fnLM,200
213
- eva/vision/models/networks/decoders/segmentation/__init__.py,sha256=N6jrhXHj0P7i7RptZbZ-JFehT2BM7meFyNIK0owAkaE,517
228
+ eva/vision/models/networks/decoders/segmentation/__init__.py,sha256=SqmxtzxwBRF8g2hsiqe0o3Nr0HFK97azTnWLyqsYigY,652
214
229
  eva/vision/models/networks/decoders/segmentation/base.py,sha256=b2TIJKiJR9vejVRpNyedMJLPTrpHhAEXvco8atb9TPU,411
215
230
  eva/vision/models/networks/decoders/segmentation/decoder2d.py,sha256=A7vz0LJ_YweftpKeEBJm0Y3N7hbVLDSIkAajaQv1UgE,4456
216
- eva/vision/models/networks/decoders/segmentation/linear.py,sha256=-i9RVaKM1UsB3AXDDKdMmHiD7y2sr5HfF-WvkB47Fhw,4743
217
- eva/vision/models/networks/decoders/segmentation/semantic/__init__.py,sha256=Ubs8GXyQpEHs26JUeUuiVP3jfn47eiBZM_UVbu749XU,398
231
+ eva/vision/models/networks/decoders/segmentation/linear.py,sha256=PZeEIH0ybgxgIKtmcflh8jsARo5NQqkgoGbpAZd7yj4,4650
232
+ eva/vision/models/networks/decoders/segmentation/semantic/__init__.py,sha256=2yol7W1ARXL-Ge7gYxjUzaGTjH6nfMBlNqQJHprEWGg,539
218
233
  eva/vision/models/networks/decoders/segmentation/semantic/common.py,sha256=fPTb0T-2FiOU-jT81ynASKaW7fJiRk6vQjuPkzHOluc,2530
234
+ eva/vision/models/networks/decoders/segmentation/semantic/swin_unetr.py,sha256=ODUpnJrpDQl0m8CC2SPnE_lpFflzS0GSiCZOmrjL6uQ,3373
219
235
  eva/vision/models/networks/decoders/segmentation/semantic/with_image.py,sha256=I5PyGKKo8DcXYcw4xlCFzuavRJNRrzGT-szpDidMPXI,3516
220
236
  eva/vision/models/networks/decoders/segmentation/typings.py,sha256=8zAqIJLlQdCjsx-Dl4lnF4BB1VxTg_AyIquBVwpZlHg,537
221
237
  eva/vision/models/wrappers/__init__.py,sha256=ogmr-eeVuGaOCcsuxSp6PGyauP2QqWTb8dGTtbC7lRU,210
@@ -224,14 +240,14 @@ eva/vision/models/wrappers/from_timm.py,sha256=Z38Nb1i6OPKkgvFZOvGx-O3AZQuscf1zR
224
240
  eva/vision/utils/__init__.py,sha256=vaUovprE743SmyFH8l6uk4pYSWpI4zxn7lN0EwePTJI,96
225
241
  eva/vision/utils/colormap.py,sha256=sP1F0JCX3abZfFgdxEjLJO-LhNYKjXZvXxs03ZgrEvI,2876
226
242
  eva/vision/utils/convert.py,sha256=fqGmKrg5-JJLrTkTXB4YDcWTudXPrO1gGjsckVRUesU,1881
227
- eva/vision/utils/io/__init__.py,sha256=XGJ_W94DVEYXJ_tVpr_20NMpR5JLWEWHGF3v9Low79A,610
243
+ eva/vision/utils/io/__init__.py,sha256=Oa4CjmqXN0wzkG1PW79zSsHrN1jlI7_VJ5NSXLKx0eA,652
228
244
  eva/vision/utils/io/_utils.py,sha256=JzOt7Frj6ScF_aNjFtfHBn4ROnl6NhUZucmQhLc4Cww,768
229
245
  eva/vision/utils/io/image.py,sha256=IdOkr5MYqhYHz8U9drZ7wULTM3YHwCWSjZlu_Qdl4GQ,2053
230
246
  eva/vision/utils/io/mat.py,sha256=qpGifyjmpE0Xhv567Si7-zxKrgkgE0sywP70cHiLFGU,808
231
- eva/vision/utils/io/nifti.py,sha256=4YoKjKuoNdE0qY7tYB_WlnSsYAx2oBzZRZXczc_8HAU,2555
247
+ eva/vision/utils/io/nifti.py,sha256=TFMgNhLqIK3sl3RjIRXEABM7FmSQjqVOwk1vXkuvX2w,4983
232
248
  eva/vision/utils/io/text.py,sha256=qYgfo_ZaDZWfG02NkVVYzo5QFySqdCCz5uLA9d-zXtI,701
233
- kaiko_eva-0.2.0.dist-info/METADATA,sha256=CTFbtAErERl1SU0--56Y5-d1tXrr1vBNtNVkaV3orrA,24899
234
- kaiko_eva-0.2.0.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
235
- kaiko_eva-0.2.0.dist-info/entry_points.txt,sha256=6CSLu9bmQYJSXEg8gbOzRhxH0AGs75BB-vPm3VvfcNE,88
236
- kaiko_eva-0.2.0.dist-info/licenses/LICENSE,sha256=e6AEzr7j_R-PYr2qLO-JwLn8y70jbVD3U2mxbRmwcI4,11338
237
- kaiko_eva-0.2.0.dist-info/RECORD,,
249
+ kaiko_eva-0.2.1.dist-info/METADATA,sha256=78-RgtBLumKmrWLlv6Q8iJ6JU-InxPCudfJcuy7pVUQ,24992
250
+ kaiko_eva-0.2.1.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
251
+ kaiko_eva-0.2.1.dist-info/entry_points.txt,sha256=6CSLu9bmQYJSXEg8gbOzRhxH0AGs75BB-vPm3VvfcNE,88
252
+ kaiko_eva-0.2.1.dist-info/licenses/LICENSE,sha256=e6AEzr7j_R-PYr2qLO-JwLn8y70jbVD3U2mxbRmwcI4,11338
253
+ kaiko_eva-0.2.1.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: pdm-backend (2.4.3)
2
+ Generator: pdm-backend (2.4.4)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,96 +0,0 @@
1
- """Base for image classification datasets."""
2
-
3
- import abc
4
- from typing import Any, Callable, Dict, List, Tuple
5
-
6
- import torch
7
- from torchvision import tv_tensors
8
- from typing_extensions import override
9
-
10
- from eva.vision.data.datasets import vision
11
-
12
-
13
- class ImageClassification(vision.VisionDataset[Tuple[tv_tensors.Image, torch.Tensor]], abc.ABC):
14
- """Image classification abstract dataset."""
15
-
16
- def __init__(
17
- self,
18
- transforms: Callable | None = None,
19
- ) -> None:
20
- """Initializes the image classification dataset.
21
-
22
- Args:
23
- transforms: A function/transform which returns a transformed
24
- version of the raw data samples.
25
- """
26
- super().__init__()
27
-
28
- self._transforms = transforms
29
-
30
- @property
31
- def classes(self) -> List[str] | None:
32
- """Returns the list with names of the dataset names."""
33
-
34
- @property
35
- def class_to_idx(self) -> Dict[str, int] | None:
36
- """Returns a mapping of the class name to its target index."""
37
-
38
- def load_metadata(self, index: int) -> Dict[str, Any] | None:
39
- """Returns the dataset metadata.
40
-
41
- Args:
42
- index: The index of the data sample to return the metadata of.
43
-
44
- Returns:
45
- The sample metadata.
46
- """
47
-
48
- @abc.abstractmethod
49
- def load_image(self, index: int) -> tv_tensors.Image:
50
- """Returns the `index`'th image sample.
51
-
52
- Args:
53
- index: The index of the data sample to load.
54
-
55
- Returns:
56
- The image as a numpy array.
57
- """
58
-
59
- @abc.abstractmethod
60
- def load_target(self, index: int) -> torch.Tensor:
61
- """Returns the `index`'th target sample.
62
-
63
- Args:
64
- index: The index of the data sample to load.
65
-
66
- Returns:
67
- The sample target as an array.
68
- """
69
-
70
- @abc.abstractmethod
71
- @override
72
- def __len__(self) -> int:
73
- raise NotImplementedError
74
-
75
- @override
76
- def __getitem__(self, index: int) -> Tuple[tv_tensors.Image, torch.Tensor, Dict[str, Any]]:
77
- image = self.load_image(index)
78
- target = self.load_target(index)
79
- image, target = self._apply_transforms(image, target)
80
- return image, target, self.load_metadata(index) or {}
81
-
82
- def _apply_transforms(
83
- self, image: tv_tensors.Image, target: torch.Tensor
84
- ) -> Tuple[tv_tensors.Image, torch.Tensor]:
85
- """Applies the transforms to the provided data and returns them.
86
-
87
- Args:
88
- image: The desired image.
89
- target: The target of the image.
90
-
91
- Returns:
92
- A tuple with the image and the target transformed.
93
- """
94
- if self._transforms is not None:
95
- image, target = self._transforms(image, target)
96
- return image, target
@@ -1,96 +0,0 @@
1
- """Base for image segmentation datasets."""
2
-
3
- import abc
4
- from typing import Any, Callable, Dict, List, Tuple
5
-
6
- from torchvision import tv_tensors
7
- from typing_extensions import override
8
-
9
- from eva.vision.data.datasets import vision
10
-
11
-
12
- class ImageSegmentation(vision.VisionDataset[Tuple[tv_tensors.Image, tv_tensors.Mask]], abc.ABC):
13
- """Image segmentation abstract dataset."""
14
-
15
- def __init__(self, transforms: Callable | None = None) -> None:
16
- """Initializes the image segmentation base class.
17
-
18
- Args:
19
- transforms: A function/transforms that takes in an
20
- image and a label and returns the transformed versions of both.
21
- """
22
- super().__init__()
23
-
24
- self._transforms = transforms
25
-
26
- @property
27
- def classes(self) -> List[str] | None:
28
- """Returns the list with names of the dataset names."""
29
-
30
- @property
31
- def class_to_idx(self) -> Dict[str, int] | None:
32
- """Returns a mapping of the class name to its target index."""
33
-
34
- @abc.abstractmethod
35
- def load_image(self, index: int) -> tv_tensors.Image:
36
- """Loads and returns the `index`'th image sample.
37
-
38
- Args:
39
- index: The index of the data sample to load.
40
-
41
- Returns:
42
- An image torchvision tensor (channels, height, width).
43
- """
44
-
45
- @abc.abstractmethod
46
- def load_mask(self, index: int) -> tv_tensors.Mask:
47
- """Returns the `index`'th target masks sample.
48
-
49
- Args:
50
- index: The index of the data sample target masks to load.
51
-
52
- Returns:
53
- The semantic mask as a (H x W) shaped tensor with integer
54
- values which represent the pixel class id.
55
- """
56
-
57
- def load_metadata(self, index: int) -> Dict[str, Any] | None:
58
- """Returns the dataset metadata.
59
-
60
- Args:
61
- index: The index of the data sample to return the metadata of.
62
- If `None`, it will return the metadata of the current dataset.
63
-
64
- Returns:
65
- The sample metadata.
66
- """
67
-
68
- @abc.abstractmethod
69
- @override
70
- def __len__(self) -> int:
71
- raise NotImplementedError
72
-
73
- @override
74
- def __getitem__(self, index: int) -> Tuple[tv_tensors.Image, tv_tensors.Mask, Dict[str, Any]]:
75
- image = self.load_image(index)
76
- mask = self.load_mask(index)
77
- metadata = self.load_metadata(index) or {}
78
- image_tensor, mask_tensor = self._apply_transforms(image, mask)
79
- return image_tensor, mask_tensor, metadata
80
-
81
- def _apply_transforms(
82
- self, image: tv_tensors.Image, mask: tv_tensors.Mask
83
- ) -> Tuple[tv_tensors.Image, tv_tensors.Mask]:
84
- """Applies the transforms to the provided data and returns them.
85
-
86
- Args:
87
- image: The desired image.
88
- mask: The target segmentation mask.
89
-
90
- Returns:
91
- A tuple with the image and the masks transformed.
92
- """
93
- if self._transforms is not None:
94
- image, mask = self._transforms(image, mask)
95
-
96
- return image, mask
@@ -1,51 +0,0 @@
1
- """Specialized transforms for resizing, clamping and range normalizing."""
2
-
3
- from typing import Callable, Sequence, Tuple
4
-
5
- from torchvision.transforms import v2
6
-
7
- from eva.vision.data.transforms import normalization
8
-
9
-
10
- class ResizeAndClamp(v2.Compose):
11
- """Resizes, crops, clamps and normalizes an input image."""
12
-
13
- def __init__(
14
- self,
15
- size: int | Sequence[int] = 224,
16
- clamp_range: Tuple[int, int] = (-1024, 1024),
17
- mean: Sequence[float] = (0.0, 0.0, 0.0),
18
- std: Sequence[float] = (1.0, 1.0, 1.0),
19
- ) -> None:
20
- """Initializes the transform object.
21
-
22
- Args:
23
- size: Desired output size of the crop. If size is an `int` instead
24
- of sequence like (h, w), a square crop (size, size) is made.
25
- clamp_range: The lower and upper bound to clamp the pixel values.
26
- mean: Sequence of means for each image channel.
27
- std: Sequence of standard deviations for each image channel.
28
- """
29
- self._size = size
30
- self._clamp_range = clamp_range
31
- self._mean = mean
32
- self._std = std
33
-
34
- super().__init__(transforms=self._build_transforms())
35
-
36
- def _build_transforms(self) -> Sequence[Callable]:
37
- """Builds and returns the list of transforms."""
38
- transforms = [
39
- v2.Resize(size=self._size),
40
- v2.CenterCrop(size=self._size),
41
- normalization.Clamp(out_range=self._clamp_range),
42
- normalization.RescaleIntensity(
43
- in_range=self._clamp_range,
44
- out_range=(0.0, 1.0),
45
- ),
46
- v2.Normalize(
47
- mean=self._mean,
48
- std=self._std,
49
- ),
50
- ]
51
- return transforms
@@ -1,6 +0,0 @@
1
- """Normalization related transformations."""
2
-
3
- from eva.vision.data.transforms.normalization.clamp import Clamp
4
- from eva.vision.data.transforms.normalization.rescale_intensity import RescaleIntensity
5
-
6
- __all__ = ["Clamp", "RescaleIntensity"]
@@ -1,43 +0,0 @@
1
- """Image clamp transform."""
2
-
3
- import functools
4
- from typing import Any, Dict, Tuple
5
-
6
- import torch
7
- import torchvision.transforms.v2 as torch_transforms
8
- from torchvision import tv_tensors
9
- from typing_extensions import override
10
-
11
-
12
- class Clamp(torch_transforms.Transform):
13
- """Clamps all elements in input into a specific range."""
14
-
15
- def __init__(self, out_range: Tuple[int, int]) -> None:
16
- """Initializes the transform.
17
-
18
- Args:
19
- out_range: The lower and upper bound of the range to
20
- be clamped to.
21
- """
22
- super().__init__()
23
-
24
- self._out_range = out_range
25
-
26
- @functools.singledispatchmethod
27
- @override
28
- def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
29
- return inpt
30
-
31
- @_transform.register(torch.Tensor)
32
- def _(self, inpt: torch.Tensor, params: Dict[str, Any]) -> Any:
33
- return torch.clamp(inpt, min=self._out_range[0], max=self._out_range[1])
34
-
35
- @_transform.register(tv_tensors.Image)
36
- def _(self, inpt: tv_tensors.Image, params: Dict[str, Any]) -> Any:
37
- inpt_clamp = torch.clamp(inpt, min=self._out_range[0], max=self._out_range[1])
38
- return tv_tensors.wrap(inpt_clamp, like=inpt)
39
-
40
- @_transform.register(tv_tensors.BoundingBoxes)
41
- @_transform.register(tv_tensors.Mask)
42
- def _(self, inpt: tv_tensors.BoundingBoxes | tv_tensors.Mask, params: Dict[str, Any]) -> Any:
43
- return inpt
@@ -1,5 +0,0 @@
1
- """Functional normalization related transformations API."""
2
-
3
- from eva.vision.data.transforms.normalization.functional.rescale_intensity import rescale_intensity
4
-
5
- __all__ = ["rescale_intensity"]
@@ -1,28 +0,0 @@
1
- """Intensity level functions."""
2
-
3
- import sys
4
- from typing import Tuple
5
-
6
- import torch
7
-
8
-
9
- def rescale_intensity(
10
- image: torch.Tensor,
11
- in_range: Tuple[float, float] | None = None,
12
- out_range: Tuple[float, float] = (0.0, 1.0),
13
- ) -> torch.Tensor:
14
- """Stretches or shrinks the image intensity levels.
15
-
16
- Args:
17
- image: The image tensor as float-type.
18
- in_range: The input data range. If `None`, it will
19
- fetch the min and max of the input image.
20
- out_range: The desired intensity range of the output.
21
-
22
- Returns:
23
- The image tensor after stretching or shrinking its intensity levels.
24
- """
25
- imin, imax = in_range or (image.min(), image.max())
26
- omin, omax = out_range
27
- image_scaled = (image - imin) / (imax - imin + sys.float_info.epsilon)
28
- return image_scaled * (omax - omin) + omin
@@ -1,53 +0,0 @@
1
- """Intensity level scaling transform."""
2
-
3
- import functools
4
- from typing import Any, Dict, Tuple
5
-
6
- import torch
7
- import torchvision.transforms.v2 as torch_transforms
8
- from torchvision import tv_tensors
9
- from typing_extensions import override
10
-
11
- from eva.vision.data.transforms.normalization import functional
12
-
13
-
14
- class RescaleIntensity(torch_transforms.Transform):
15
- """Stretches or shrinks the image intensity levels."""
16
-
17
- def __init__(
18
- self,
19
- in_range: Tuple[float, float] | None = None,
20
- out_range: Tuple[float, float] = (0.0, 1.0),
21
- ) -> None:
22
- """Initializes the transform.
23
-
24
- Args:
25
- in_range: The input data range. If `None`, it will
26
- fetch the min and max of the input image.
27
- out_range: The desired intensity range of the output.
28
- """
29
- super().__init__()
30
-
31
- self._in_range = in_range
32
- self._out_range = out_range
33
-
34
- @functools.singledispatchmethod
35
- @override
36
- def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
37
- return inpt
38
-
39
- @_transform.register(torch.Tensor)
40
- def _(self, inpt: torch.Tensor, params: Dict[str, Any]) -> Any:
41
- return functional.rescale_intensity(
42
- inpt, in_range=self._in_range, out_range=self._out_range
43
- )
44
-
45
- @_transform.register(tv_tensors.Image)
46
- def _(self, inpt: tv_tensors.Image, params: Dict[str, Any]) -> Any:
47
- scaled_inpt = functional.rescale_intensity(inpt, out_range=self._out_range)
48
- return tv_tensors.wrap(scaled_inpt, like=inpt)
49
-
50
- @_transform.register(tv_tensors.BoundingBoxes)
51
- @_transform.register(tv_tensors.Mask)
52
- def _(self, inpt: tv_tensors.BoundingBoxes | tv_tensors.Mask, params: Dict[str, Any]) -> Any:
53
- return inpt
@@ -1 +0,0 @@
1
- python_sources()
@@ -1,5 +0,0 @@
1
- """torch.hub backbones API."""
2
-
3
- from eva.vision.models.networks.backbones.torchhub.backbones import torch_hub_model
4
-
5
- __all__ = ["torch_hub_model"]