fastMONAI 0.4.0.1__tar.gz → 0.5.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fastmonai-0.5.0.0/PKG-INFO +149 -0
- fastmonai-0.5.0.0/README.md +99 -0
- fastmonai-0.5.0.0/fastMONAI/__init__.py +1 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI/_modidx.py +51 -2
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI/dataset_info.py +4 -4
- fastmonai-0.5.0.0/fastMONAI/utils.py +237 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI/vision_core.py +156 -14
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI/vision_data.py +20 -18
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI/vision_inference.py +17 -3
- fastmonai-0.5.0.0/fastMONAI.egg-info/PKG-INFO +149 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI.egg-info/SOURCES.txt +1 -0
- fastmonai-0.5.0.0/fastMONAI.egg-info/requires.txt +18 -0
- fastmonai-0.5.0.0/pyproject.toml +11 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/settings.ini +5 -5
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/setup.py +1 -1
- fastMONAI-0.4.0.1/PKG-INFO +0 -75
- fastMONAI-0.4.0.1/README.md +0 -53
- fastMONAI-0.4.0.1/fastMONAI/__init__.py +0 -1
- fastMONAI-0.4.0.1/fastMONAI/utils.py +0 -45
- fastMONAI-0.4.0.1/fastMONAI.egg-info/PKG-INFO +0 -75
- fastMONAI-0.4.0.1/fastMONAI.egg-info/requires.txt +0 -15
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/CONTRIBUTING.md +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/LICENSE +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/MANIFEST.in +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI/external_data.py +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI/research_utils.py +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI/vision_all.py +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI/vision_augmentation.py +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI/vision_loss.py +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI/vision_metrics.py +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI/vision_plot.py +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI.egg-info/dependency_links.txt +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI.egg-info/entry_points.txt +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI.egg-info/not-zip-safe +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/fastMONAI.egg-info/top_level.txt +0 -0
- {fastMONAI-0.4.0.1 → fastmonai-0.5.0.0}/setup.cfg +0 -0
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: fastMONAI
|
|
3
|
+
Version: 0.5.0.0
|
|
4
|
+
Summary: fastMONAI library
|
|
5
|
+
Home-page: https://github.com/MMIV-ML/fastMONAI
|
|
6
|
+
Author: Satheshkumar Kaliyugarasan
|
|
7
|
+
Author-email: skaliyugarasan@hotmail.com
|
|
8
|
+
License: Apache Software License 2.0
|
|
9
|
+
Keywords: deep learning,medical imaging
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: Natural Language :: English
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
17
|
+
Requires-Python: >=3.10
|
|
18
|
+
Description-Content-Type: text/markdown
|
|
19
|
+
License-File: LICENSE
|
|
20
|
+
Requires-Dist: fastai==2.8.3
|
|
21
|
+
Requires-Dist: monai==1.5.0
|
|
22
|
+
Requires-Dist: torchio==0.20.19
|
|
23
|
+
Requires-Dist: xlrd>=1.2.0
|
|
24
|
+
Requires-Dist: scikit-image==0.25.2
|
|
25
|
+
Requires-Dist: imagedata==3.8.4
|
|
26
|
+
Requires-Dist: mlflow==3.3.1
|
|
27
|
+
Requires-Dist: huggingface-hub
|
|
28
|
+
Requires-Dist: gdown
|
|
29
|
+
Requires-Dist: gradio
|
|
30
|
+
Requires-Dist: opencv-python
|
|
31
|
+
Requires-Dist: plum-dispatch
|
|
32
|
+
Provides-Extra: dev
|
|
33
|
+
Requires-Dist: ipywidgets; extra == "dev"
|
|
34
|
+
Requires-Dist: nbdev; extra == "dev"
|
|
35
|
+
Requires-Dist: tabulate; extra == "dev"
|
|
36
|
+
Requires-Dist: quarto; extra == "dev"
|
|
37
|
+
Dynamic: author
|
|
38
|
+
Dynamic: author-email
|
|
39
|
+
Dynamic: classifier
|
|
40
|
+
Dynamic: description
|
|
41
|
+
Dynamic: description-content-type
|
|
42
|
+
Dynamic: home-page
|
|
43
|
+
Dynamic: keywords
|
|
44
|
+
Dynamic: license
|
|
45
|
+
Dynamic: license-file
|
|
46
|
+
Dynamic: provides-extra
|
|
47
|
+
Dynamic: requires-dist
|
|
48
|
+
Dynamic: requires-python
|
|
49
|
+
Dynamic: summary
|
|
50
|
+
|
|
51
|
+
# Overview
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
|
|
55
|
+
|
|
56
|
+

|
|
57
|
+
|
|
58
|
+

|
|
59
|
+
[](https://fastmonai.no)
|
|
60
|
+
[](https://pypi.org/project/fastMONAI)
|
|
61
|
+
|
|
62
|
+
A low-code Python-based open source deep learning library built on top
|
|
63
|
+
of [fastai](https://github.com/fastai/fastai),
|
|
64
|
+
[MONAI](https://monai.io/), [TorchIO](https://torchio.readthedocs.io/),
|
|
65
|
+
and [Imagedata](https://imagedata.readthedocs.io/).
|
|
66
|
+
|
|
67
|
+
fastMONAI simplifies the use of state-of-the-art deep learning
|
|
68
|
+
techniques in 3D medical image analysis for solving classification,
|
|
69
|
+
regression, and segmentation tasks. fastMONAI provides the users with
|
|
70
|
+
functionalities to step through data loading, preprocessing, training,
|
|
71
|
+
and result interpretations.
|
|
72
|
+
|
|
73
|
+
<b>Note:</b> This documentation is also available as interactive
|
|
74
|
+
notebooks.
|
|
75
|
+
|
|
76
|
+
## Requirements
|
|
77
|
+
|
|
78
|
+
- **Python:** 3.10, 3.11, or 3.12 (Python 3.11 recommended)
|
|
79
|
+
- **GPU:** CUDA-compatible GPU recommended for training (CPU supported
|
|
80
|
+
for inference)
|
|
81
|
+
|
|
82
|
+
# Installation
|
|
83
|
+
|
|
84
|
+
## Environment setup (recommended)
|
|
85
|
+
|
|
86
|
+
We recommend using a conda environment to avoid dependency conflicts:
|
|
87
|
+
|
|
88
|
+
`conda create -n fastmonai python=3.11`
|
|
89
|
+
|
|
90
|
+
`conda activate fastmonai`
|
|
91
|
+
|
|
92
|
+
## Quick Install [(PyPI)](https://pypi.org/project/fastMONAI/)
|
|
93
|
+
|
|
94
|
+
`pip install fastMONAI`
|
|
95
|
+
|
|
96
|
+
## Development install [(GitHub)](https://github.com/MMIV-ML/fastMONAI)
|
|
97
|
+
|
|
98
|
+
If you want to install an editable version of fastMONAI for development:
|
|
99
|
+
|
|
100
|
+
git clone https://github.com/MMIV-ML/fastMONAI
|
|
101
|
+
cd fastMONAI
|
|
102
|
+
|
|
103
|
+
# Create development environment
|
|
104
|
+
conda create -n fastmonai-dev python=3.11
|
|
105
|
+
conda activate fastmonai-dev
|
|
106
|
+
|
|
107
|
+
# Install in development mode
|
|
108
|
+
pip install -e '.[dev]'
|
|
109
|
+
|
|
110
|
+
# Getting started
|
|
111
|
+
|
|
112
|
+
The best way to get started using fastMONAI is to read our
|
|
113
|
+
[paper](https://www.sciencedirect.com/science/article/pii/S2665963823001203)
|
|
114
|
+
and dive into our beginner-friendly [video
|
|
115
|
+
tutorial](https://fastmonai.no/tutorial_beginner_video). For a deeper
|
|
116
|
+
understanding and hands-on experience, our comprehensive instructional
|
|
117
|
+
notebooks will walk you through model training for various tasks like
|
|
118
|
+
classification, regression, and segmentation. See the docs at
|
|
119
|
+
https://fastmonai.no for more information.
|
|
120
|
+
|
|
121
|
+
| Notebook | 1-Click Notebook |
|
|
122
|
+
|:---|----|
|
|
123
|
+
| [10a_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10a_tutorial_classification.ipynb) <br>shows how to construct a binary classification model based on MRI data. | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10a_tutorial_classification.ipynb) |
|
|
124
|
+
| [10b_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10b_tutorial_regression.ipynb) <br>shows how to construct a model to predict the age of a subject from MRI scans (“brain age”). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10b_tutorial_regression.ipynb) |
|
|
125
|
+
| [10c_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10c_tutorial_binary_segmentation.ipynb) <br>shows how to do binary segmentation (extract the left atrium from monomodal cardiac MRI). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10c_tutorial_binary_segmentation.ipynb) |
|
|
126
|
+
| [10d_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10d_tutorial_multiclass_segmentation.ipynb) <br>shows how to perform segmentation from multimodal MRI (brain tumor segmentation). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10d_tutorial_multiclass_segmentation.ipynb) |
|
|
127
|
+
|
|
128
|
+
# How to contribute
|
|
129
|
+
|
|
130
|
+
We welcome contributions! See
|
|
131
|
+
[CONTRIBUTING.md](https://github.com/MMIV-ML/fastMONAI/blob/master/CONTRIBUTING.md)
|
|
132
|
+
|
|
133
|
+
# Citing fastMONAI
|
|
134
|
+
|
|
135
|
+
If you are using fastMONAI in your research, please use the following
|
|
136
|
+
citation:
|
|
137
|
+
|
|
138
|
+
@article{KALIYUGARASAN2023100583,
|
|
139
|
+
title = {fastMONAI: A low-code deep learning library for medical image analysis},
|
|
140
|
+
journal = {Software Impacts},
|
|
141
|
+
pages = {100583},
|
|
142
|
+
year = {2023},
|
|
143
|
+
issn = {2665-9638},
|
|
144
|
+
doi = {https://doi.org/10.1016/j.simpa.2023.100583},
|
|
145
|
+
url = {https://www.sciencedirect.com/science/article/pii/S2665963823001203},
|
|
146
|
+
author = {Satheshkumar Kaliyugarasan and Alexander S. Lundervold},
|
|
147
|
+
keywords = {Deep learning, Medical imaging, Radiology},
|
|
148
|
+
abstract = {We introduce fastMONAI, an open-source Python-based deep learning library for 3D medical imaging. Drawing upon the strengths of fastai, MONAI, and TorchIO, fastMONAI simplifies the use of advanced techniques for tasks like classification, regression, and segmentation. The library's design addresses domain-specific demands while promoting best practices, facilitating efficient model development. It offers newcomers an easier entry into the field while keeping the option to make advanced, lower-level customizations if needed. This paper describes the library's design, impact, limitations, and plans for future work.}
|
|
149
|
+
}
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
# Overview
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
|
|
5
|
+
|
|
6
|
+

|
|
7
|
+
|
|
8
|
+

|
|
9
|
+
[](https://fastmonai.no)
|
|
10
|
+
[](https://pypi.org/project/fastMONAI)
|
|
11
|
+
|
|
12
|
+
A low-code Python-based open source deep learning library built on top
|
|
13
|
+
of [fastai](https://github.com/fastai/fastai),
|
|
14
|
+
[MONAI](https://monai.io/), [TorchIO](https://torchio.readthedocs.io/),
|
|
15
|
+
and [Imagedata](https://imagedata.readthedocs.io/).
|
|
16
|
+
|
|
17
|
+
fastMONAI simplifies the use of state-of-the-art deep learning
|
|
18
|
+
techniques in 3D medical image analysis for solving classification,
|
|
19
|
+
regression, and segmentation tasks. fastMONAI provides the users with
|
|
20
|
+
functionalities to step through data loading, preprocessing, training,
|
|
21
|
+
and result interpretations.
|
|
22
|
+
|
|
23
|
+
<b>Note:</b> This documentation is also available as interactive
|
|
24
|
+
notebooks.
|
|
25
|
+
|
|
26
|
+
## Requirements
|
|
27
|
+
|
|
28
|
+
- **Python:** 3.10, 3.11, or 3.12 (Python 3.11 recommended)
|
|
29
|
+
- **GPU:** CUDA-compatible GPU recommended for training (CPU supported
|
|
30
|
+
for inference)
|
|
31
|
+
|
|
32
|
+
# Installation
|
|
33
|
+
|
|
34
|
+
## Environment setup (recommended)
|
|
35
|
+
|
|
36
|
+
We recommend using a conda environment to avoid dependency conflicts:
|
|
37
|
+
|
|
38
|
+
`conda create -n fastmonai python=3.11`
|
|
39
|
+
|
|
40
|
+
`conda activate fastmonai`
|
|
41
|
+
|
|
42
|
+
## Quick Install [(PyPI)](https://pypi.org/project/fastMONAI/)
|
|
43
|
+
|
|
44
|
+
`pip install fastMONAI`
|
|
45
|
+
|
|
46
|
+
## Development install [(GitHub)](https://github.com/MMIV-ML/fastMONAI)
|
|
47
|
+
|
|
48
|
+
If you want to install an editable version of fastMONAI for development:
|
|
49
|
+
|
|
50
|
+
git clone https://github.com/MMIV-ML/fastMONAI
|
|
51
|
+
cd fastMONAI
|
|
52
|
+
|
|
53
|
+
# Create development environment
|
|
54
|
+
conda create -n fastmonai-dev python=3.11
|
|
55
|
+
conda activate fastmonai-dev
|
|
56
|
+
|
|
57
|
+
# Install in development mode
|
|
58
|
+
pip install -e '.[dev]'
|
|
59
|
+
|
|
60
|
+
# Getting started
|
|
61
|
+
|
|
62
|
+
The best way to get started using fastMONAI is to read our
|
|
63
|
+
[paper](https://www.sciencedirect.com/science/article/pii/S2665963823001203)
|
|
64
|
+
and dive into our beginner-friendly [video
|
|
65
|
+
tutorial](https://fastmonai.no/tutorial_beginner_video). For a deeper
|
|
66
|
+
understanding and hands-on experience, our comprehensive instructional
|
|
67
|
+
notebooks will walk you through model training for various tasks like
|
|
68
|
+
classification, regression, and segmentation. See the docs at
|
|
69
|
+
https://fastmonai.no for more information.
|
|
70
|
+
|
|
71
|
+
| Notebook | 1-Click Notebook |
|
|
72
|
+
|:---|----|
|
|
73
|
+
| [10a_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10a_tutorial_classification.ipynb) <br>shows how to construct a binary classification model based on MRI data. | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10a_tutorial_classification.ipynb) |
|
|
74
|
+
| [10b_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10b_tutorial_regression.ipynb) <br>shows how to construct a model to predict the age of a subject from MRI scans (“brain age”). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10b_tutorial_regression.ipynb) |
|
|
75
|
+
| [10c_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10c_tutorial_binary_segmentation.ipynb) <br>shows how to do binary segmentation (extract the left atrium from monomodal cardiac MRI). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10c_tutorial_binary_segmentation.ipynb) |
|
|
76
|
+
| [10d_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10d_tutorial_multiclass_segmentation.ipynb) <br>shows how to perform segmentation from multimodal MRI (brain tumor segmentation). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10d_tutorial_multiclass_segmentation.ipynb) |
|
|
77
|
+
|
|
78
|
+
# How to contribute
|
|
79
|
+
|
|
80
|
+
We welcome contributions! See
|
|
81
|
+
[CONTRIBUTING.md](https://github.com/MMIV-ML/fastMONAI/blob/master/CONTRIBUTING.md)
|
|
82
|
+
|
|
83
|
+
# Citing fastMONAI
|
|
84
|
+
|
|
85
|
+
If you are using fastMONAI in your research, please use the following
|
|
86
|
+
citation:
|
|
87
|
+
|
|
88
|
+
@article{KALIYUGARASAN2023100583,
|
|
89
|
+
title = {fastMONAI: A low-code deep learning library for medical image analysis},
|
|
90
|
+
journal = {Software Impacts},
|
|
91
|
+
pages = {100583},
|
|
92
|
+
year = {2023},
|
|
93
|
+
issn = {2665-9638},
|
|
94
|
+
doi = {https://doi.org/10.1016/j.simpa.2023.100583},
|
|
95
|
+
url = {https://www.sciencedirect.com/science/article/pii/S2665963823001203},
|
|
96
|
+
author = {Satheshkumar Kaliyugarasan and Alexander S. Lundervold},
|
|
97
|
+
keywords = {Deep learning, Medical imaging, Radiology},
|
|
98
|
+
abstract = {We introduce fastMONAI, an open-source Python-based deep learning library for 3D medical imaging. Drawing upon the strengths of fastai, MONAI, and TorchIO, fastMONAI simplifies the use of advanced techniques for tasks like classification, regression, and segmentation. The library's design addresses domain-specific demands while promoting best practices, facilitating efficient model development. It offers newcomers an easier entry into the field while keeping the option to make advanced, lower-level customizations if needed. This paper describes the library's design, impact, limitations, and plans for future work.}
|
|
99
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.5.0.0"
|
|
@@ -47,7 +47,28 @@ d = { 'settings': { 'branch': 'master',
|
|
|
47
47
|
'fastMONAI/external_data.py')},
|
|
48
48
|
'fastMONAI.research_utils': { 'fastMONAI.research_utils.pred_postprocess': ( 'research_utils.html#pred_postprocess',
|
|
49
49
|
'fastMONAI/research_utils.py')},
|
|
50
|
-
'fastMONAI.utils': { 'fastMONAI.utils.
|
|
50
|
+
'fastMONAI.utils': { 'fastMONAI.utils.ModelTrackingCallback': ('utils.html#modeltrackingcallback', 'fastMONAI/utils.py'),
|
|
51
|
+
'fastMONAI.utils.ModelTrackingCallback.__init__': ( 'utils.html#modeltrackingcallback.__init__',
|
|
52
|
+
'fastMONAI/utils.py'),
|
|
53
|
+
'fastMONAI.utils.ModelTrackingCallback._build_config': ( 'utils.html#modeltrackingcallback._build_config',
|
|
54
|
+
'fastMONAI/utils.py'),
|
|
55
|
+
'fastMONAI.utils.ModelTrackingCallback._extract_epoch_metrics': ( 'utils.html#modeltrackingcallback._extract_epoch_metrics',
|
|
56
|
+
'fastMONAI/utils.py'),
|
|
57
|
+
'fastMONAI.utils.ModelTrackingCallback._extract_training_params': ( 'utils.html#modeltrackingcallback._extract_training_params',
|
|
58
|
+
'fastMONAI/utils.py'),
|
|
59
|
+
'fastMONAI.utils.ModelTrackingCallback._register_pytorch_model': ( 'utils.html#modeltrackingcallback._register_pytorch_model',
|
|
60
|
+
'fastMONAI/utils.py'),
|
|
61
|
+
'fastMONAI.utils.ModelTrackingCallback._save_model_artifacts': ( 'utils.html#modeltrackingcallback._save_model_artifacts',
|
|
62
|
+
'fastMONAI/utils.py'),
|
|
63
|
+
'fastMONAI.utils.ModelTrackingCallback.after_epoch': ( 'utils.html#modeltrackingcallback.after_epoch',
|
|
64
|
+
'fastMONAI/utils.py'),
|
|
65
|
+
'fastMONAI.utils.ModelTrackingCallback.after_fit': ( 'utils.html#modeltrackingcallback.after_fit',
|
|
66
|
+
'fastMONAI/utils.py'),
|
|
67
|
+
'fastMONAI.utils.ModelTrackingCallback.before_fit': ( 'utils.html#modeltrackingcallback.before_fit',
|
|
68
|
+
'fastMONAI/utils.py'),
|
|
69
|
+
'fastMONAI.utils.ModelTrackingCallback.extract_all_params': ( 'utils.html#modeltrackingcallback.extract_all_params',
|
|
70
|
+
'fastMONAI/utils.py'),
|
|
71
|
+
'fastMONAI.utils.load_variables': ('utils.html#load_variables', 'fastMONAI/utils.py'),
|
|
51
72
|
'fastMONAI.utils.print_colab_gpu_info': ('utils.html#print_colab_gpu_info', 'fastMONAI/utils.py'),
|
|
52
73
|
'fastMONAI.utils.store_variables': ('utils.html#store_variables', 'fastMONAI/utils.py')},
|
|
53
74
|
'fastMONAI.vision_all': {},
|
|
@@ -138,23 +159,51 @@ d = { 'settings': { 'branch': 'master',
|
|
|
138
159
|
'fastMONAI.vision_augmentation.do_pad_or_crop': ( 'vision_augment.html#do_pad_or_crop',
|
|
139
160
|
'fastMONAI/vision_augmentation.py')},
|
|
140
161
|
'fastMONAI.vision_core': { 'fastMONAI.vision_core.MedBase': ('vision_core.html#medbase', 'fastMONAI/vision_core.py'),
|
|
162
|
+
'fastMONAI.vision_core.MedBase.__copy__': ( 'vision_core.html#medbase.__copy__',
|
|
163
|
+
'fastMONAI/vision_core.py'),
|
|
164
|
+
'fastMONAI.vision_core.MedBase.__deepcopy__': ( 'vision_core.html#medbase.__deepcopy__',
|
|
165
|
+
'fastMONAI/vision_core.py'),
|
|
166
|
+
'fastMONAI.vision_core.MedBase.__new__': ( 'vision_core.html#medbase.__new__',
|
|
167
|
+
'fastMONAI/vision_core.py'),
|
|
141
168
|
'fastMONAI.vision_core.MedBase.__repr__': ( 'vision_core.html#medbase.__repr__',
|
|
142
169
|
'fastMONAI/vision_core.py'),
|
|
143
170
|
'fastMONAI.vision_core.MedBase.create': ( 'vision_core.html#medbase.create',
|
|
144
171
|
'fastMONAI/vision_core.py'),
|
|
145
172
|
'fastMONAI.vision_core.MedBase.item_preprocessing': ( 'vision_core.html#medbase.item_preprocessing',
|
|
146
173
|
'fastMONAI/vision_core.py'),
|
|
174
|
+
'fastMONAI.vision_core.MedBase.new_empty': ( 'vision_core.html#medbase.new_empty',
|
|
175
|
+
'fastMONAI/vision_core.py'),
|
|
147
176
|
'fastMONAI.vision_core.MedBase.show': ('vision_core.html#medbase.show', 'fastMONAI/vision_core.py'),
|
|
148
177
|
'fastMONAI.vision_core.MedImage': ('vision_core.html#medimage', 'fastMONAI/vision_core.py'),
|
|
149
178
|
'fastMONAI.vision_core.MedMask': ('vision_core.html#medmask', 'fastMONAI/vision_core.py'),
|
|
150
179
|
'fastMONAI.vision_core.MetaResolver': ('vision_core.html#metaresolver', 'fastMONAI/vision_core.py'),
|
|
180
|
+
'fastMONAI.vision_core.VSCodeProgressCallback': ( 'vision_core.html#vscodeprogresscallback',
|
|
181
|
+
'fastMONAI/vision_core.py'),
|
|
182
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.__init__': ( 'vision_core.html#vscodeprogresscallback.__init__',
|
|
183
|
+
'fastMONAI/vision_core.py'),
|
|
184
|
+
'fastMONAI.vision_core.VSCodeProgressCallback._detect_vscode_environment': ( 'vision_core.html#vscodeprogresscallback._detect_vscode_environment',
|
|
185
|
+
'fastMONAI/vision_core.py'),
|
|
186
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.after_batch': ( 'vision_core.html#vscodeprogresscallback.after_batch',
|
|
187
|
+
'fastMONAI/vision_core.py'),
|
|
188
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.after_fit': ( 'vision_core.html#vscodeprogresscallback.after_fit',
|
|
189
|
+
'fastMONAI/vision_core.py'),
|
|
190
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.after_validate': ( 'vision_core.html#vscodeprogresscallback.after_validate',
|
|
191
|
+
'fastMONAI/vision_core.py'),
|
|
192
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.before_epoch': ( 'vision_core.html#vscodeprogresscallback.before_epoch',
|
|
193
|
+
'fastMONAI/vision_core.py'),
|
|
194
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.before_fit': ( 'vision_core.html#vscodeprogresscallback.before_fit',
|
|
195
|
+
'fastMONAI/vision_core.py'),
|
|
196
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.before_validate': ( 'vision_core.html#vscodeprogresscallback.before_validate',
|
|
197
|
+
'fastMONAI/vision_core.py'),
|
|
151
198
|
'fastMONAI.vision_core._load_and_preprocess': ( 'vision_core.html#_load_and_preprocess',
|
|
152
199
|
'fastMONAI/vision_core.py'),
|
|
153
200
|
'fastMONAI.vision_core._multi_channel': ( 'vision_core.html#_multi_channel',
|
|
154
201
|
'fastMONAI/vision_core.py'),
|
|
155
202
|
'fastMONAI.vision_core._preprocess': ('vision_core.html#_preprocess', 'fastMONAI/vision_core.py'),
|
|
156
203
|
'fastMONAI.vision_core.med_img_reader': ( 'vision_core.html#med_img_reader',
|
|
157
|
-
'fastMONAI/vision_core.py')
|
|
204
|
+
'fastMONAI/vision_core.py'),
|
|
205
|
+
'fastMONAI.vision_core.setup_vscode_progress': ( 'vision_core.html#setup_vscode_progress',
|
|
206
|
+
'fastMONAI/vision_core.py')},
|
|
158
207
|
'fastMONAI.vision_data': { 'fastMONAI.vision_data.MedDataBlock': ('vision_data.html#meddatablock', 'fastMONAI/vision_data.py'),
|
|
159
208
|
'fastMONAI.vision_data.MedDataBlock.__init__': ( 'vision_data.html#meddatablock.__init__',
|
|
160
209
|
'fastMONAI/vision_data.py'),
|
|
@@ -69,12 +69,12 @@ class MedDataset:
|
|
|
69
69
|
def suggestion(self):
|
|
70
70
|
"""Voxel value that appears most often in dim_0, dim_1 and dim_2, and whether the data should be reoriented."""
|
|
71
71
|
|
|
72
|
-
resample = [self.df.voxel_0.mode()[0], self.df.voxel_1.mode()[0], self.df.voxel_2.mode()[0]]
|
|
72
|
+
resample = [float(self.df.voxel_0.mode()[0]), float(self.df.voxel_1.mode()[0]), float(self.df.voxel_2.mode()[0])]
|
|
73
73
|
return resample, self.reorder
|
|
74
74
|
|
|
75
75
|
def _get_data_info(self, fn: str):
|
|
76
76
|
"""Private method to collect information about an image file."""
|
|
77
|
-
_, o, _ = med_img_reader(fn,
|
|
77
|
+
_, o, _ = med_img_reader(fn, reorder=self.reorder, only_tensor=False, dtype=self.dtype)
|
|
78
78
|
|
|
79
79
|
info_dict = {'path': fn, 'dim_0': o.shape[1], 'dim_1': o.shape[2], 'dim_2': o.shape[3],
|
|
80
80
|
'voxel_0': round(o.spacing[0], 4), 'voxel_1': round(o.spacing[1], 4), 'voxel_2': round(o.spacing[2], 4),
|
|
@@ -98,10 +98,10 @@ class MedDataset:
|
|
|
98
98
|
|
|
99
99
|
ratio = org_voxels/resample
|
|
100
100
|
new_dims = (org_dims * ratio).T
|
|
101
|
-
dims = [new_dims[0].max().round(), new_dims[1].max().round(), new_dims[2].max().round()]
|
|
101
|
+
dims = [float(new_dims[0].max().round()), float(new_dims[1].max().round()), float(new_dims[2].max().round())]
|
|
102
102
|
|
|
103
103
|
else:
|
|
104
|
-
dims = [df.dim_0.max(), df.dim_1.max(), df.dim_2.max()]
|
|
104
|
+
dims = [float(self.df.dim_0.max()), float(self.df.dim_1.max()), float(self.df.dim_2.max())]
|
|
105
105
|
|
|
106
106
|
return dims
|
|
107
107
|
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/07_utils.ipynb.
|
|
2
|
+
|
|
3
|
+
# %% auto 0
|
|
4
|
+
__all__ = ['store_variables', 'load_variables', 'print_colab_gpu_info', 'ModelTrackingCallback']
|
|
5
|
+
|
|
6
|
+
# %% ../nbs/07_utils.ipynb 1
|
|
7
|
+
import pickle
|
|
8
|
+
import torch
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
import mlflow
|
|
11
|
+
import mlflow.pytorch
|
|
12
|
+
import os
|
|
13
|
+
import tempfile
|
|
14
|
+
import json
|
|
15
|
+
from fastai.callback.core import Callback
|
|
16
|
+
from typing import Any
|
|
17
|
+
|
|
18
|
+
# %% ../nbs/07_utils.ipynb 3
|
|
19
|
+
def store_variables(pkl_fn: str | Path, size: list, reorder: bool, resample: int | list):
|
|
20
|
+
"""Save variable values in a pickle file."""
|
|
21
|
+
|
|
22
|
+
var_vals = [size, reorder, resample]
|
|
23
|
+
|
|
24
|
+
with open(pkl_fn, 'wb') as f:
|
|
25
|
+
pickle.dump(var_vals, f)
|
|
26
|
+
|
|
27
|
+
# %% ../nbs/07_utils.ipynb 4
|
|
28
|
+
def load_variables(pkl_fn: (str, Path)):
|
|
29
|
+
"""Loads stored variable values from a pickle file.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
pkl_fn: File path of the pickle file to be loaded.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
The deserialized value of the pickled data.
|
|
36
|
+
"""
|
|
37
|
+
with open(pkl_fn, 'rb') as f:
|
|
38
|
+
return pickle.load(f)
|
|
39
|
+
|
|
40
|
+
# %% ../nbs/07_utils.ipynb 5
|
|
41
|
+
def print_colab_gpu_info():
|
|
42
|
+
"""Check if we have a GPU attached to the runtime."""
|
|
43
|
+
|
|
44
|
+
colab_gpu_msg =(f"{'#'*80}\n"
|
|
45
|
+
"Remember to attach a GPU to your Colab Runtime:"
|
|
46
|
+
"\n1. From the **Runtime** menu select **Change Runtime Type**"
|
|
47
|
+
"\n2. Choose **GPU** from the drop-down menu"
|
|
48
|
+
"\n3. Click **'SAVE'**\n"
|
|
49
|
+
f"{'#'*80}")
|
|
50
|
+
|
|
51
|
+
if torch.cuda.is_available(): print('GPU attached.')
|
|
52
|
+
else: print(colab_gpu_msg)
|
|
53
|
+
|
|
54
|
+
# %% ../nbs/07_utils.ipynb 6
|
|
55
|
+
class ModelTrackingCallback(Callback):
|
|
56
|
+
"""
|
|
57
|
+
A FastAI callback for comprehensive MLflow experiment tracking.
|
|
58
|
+
|
|
59
|
+
This callback automatically logs hyperparameters, metrics, model artifacts,
|
|
60
|
+
and configuration to MLflow during training.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
def __init__(
|
|
64
|
+
self,
|
|
65
|
+
model_name: str,
|
|
66
|
+
loss_function: str,
|
|
67
|
+
item_tfms: list[Any],
|
|
68
|
+
size: list[int],
|
|
69
|
+
resample: list[float],
|
|
70
|
+
reorder: bool
|
|
71
|
+
):
|
|
72
|
+
"""
|
|
73
|
+
Initialize the MLflow tracking callback.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
model_name: Name of the model architecture for registration
|
|
77
|
+
loss_function: Name of the loss function being used
|
|
78
|
+
size: Model input dimensions
|
|
79
|
+
resample: Resampling dimensions
|
|
80
|
+
reorder: Whether reordering augmentation is applied
|
|
81
|
+
"""
|
|
82
|
+
self.model_name = model_name
|
|
83
|
+
self.loss_function = loss_function
|
|
84
|
+
self.item_tfms = item_tfms
|
|
85
|
+
self.size = size
|
|
86
|
+
self.resample = resample
|
|
87
|
+
self.reorder = reorder
|
|
88
|
+
|
|
89
|
+
self.config = self._build_config()
|
|
90
|
+
|
|
91
|
+
def extract_all_params(self, tfm):
|
|
92
|
+
"""
|
|
93
|
+
Extract all parameters from a transform object for detailed logging.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
tfm: Transform object to extract parameters from
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
dict: Dictionary with 'name' and 'params' keys containing transform details
|
|
100
|
+
"""
|
|
101
|
+
class_name = tfm.__class__.__name__
|
|
102
|
+
params = {}
|
|
103
|
+
|
|
104
|
+
for key, value in tfm.__dict__.items():
|
|
105
|
+
if not key.startswith('_') and key != '__signature__':
|
|
106
|
+
if hasattr(value, '__dict__') and hasattr(value, 'target_shape'):
|
|
107
|
+
params['target_shape'] = value.target_shape
|
|
108
|
+
elif hasattr(value, '__dict__') and not key.startswith('_'):
|
|
109
|
+
nested_params = {k: v for k, v in value.__dict__.items()
|
|
110
|
+
if not k.startswith('_') and isinstance(v, (int, float, str, bool, tuple, list))}
|
|
111
|
+
params.update(nested_params)
|
|
112
|
+
elif isinstance(value, (int, float, str, bool, tuple, list)):
|
|
113
|
+
params[key] = value
|
|
114
|
+
|
|
115
|
+
return {
|
|
116
|
+
'name': class_name,
|
|
117
|
+
'params': params
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
def _build_config(self) -> dict[str, Any]:
|
|
121
|
+
"""Build configuration dictionary from initialization parameters."""
|
|
122
|
+
# Extract detailed transform information
|
|
123
|
+
transform_details = [self.extract_all_params(tfm) for tfm in self.item_tfms]
|
|
124
|
+
|
|
125
|
+
return {
|
|
126
|
+
"model_name": self.model_name,
|
|
127
|
+
"loss_function": self.loss_function,
|
|
128
|
+
"transform_details": transform_details,
|
|
129
|
+
"size": self.size,
|
|
130
|
+
"resample": self.resample,
|
|
131
|
+
"reorder": self.reorder,
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
def _extract_training_params(self) -> dict[str, Any]:
|
|
135
|
+
"""Extract training hyperparameters from the learner."""
|
|
136
|
+
params = {}
|
|
137
|
+
|
|
138
|
+
params["epochs"] = self.learn.n_epoch
|
|
139
|
+
params["learning_rate"] = float(self.learn.lr)
|
|
140
|
+
params["optimizer"] = self.learn.opt_func.__name__
|
|
141
|
+
params["batch_size"] = self.learn.dls.bs
|
|
142
|
+
|
|
143
|
+
params["loss_function"] = self.config["loss_function"]
|
|
144
|
+
params["size"] = self.config["size"]
|
|
145
|
+
params["resample"] = self.config["resample"]
|
|
146
|
+
params["reorder"] = self.config["reorder"]
|
|
147
|
+
|
|
148
|
+
params["transformations"] = json.dumps(
|
|
149
|
+
self.config["transform_details"],
|
|
150
|
+
indent=2,
|
|
151
|
+
separators=(',', ': ')
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
return params
|
|
155
|
+
|
|
156
|
+
def _extract_epoch_metrics(self) -> dict[str, float]:
|
|
157
|
+
"""Extract metrics from the current epoch."""
|
|
158
|
+
recorder = self.learn.recorder
|
|
159
|
+
|
|
160
|
+
# Get custom metric names and values (skip 'epoch' and 'time')
|
|
161
|
+
metric_names = recorder.metric_names[2:]
|
|
162
|
+
raw_metric_values = recorder.log[2:]
|
|
163
|
+
|
|
164
|
+
metrics = {}
|
|
165
|
+
|
|
166
|
+
# Process each metric, handling both scalars and tensors
|
|
167
|
+
for name, val in zip(metric_names, raw_metric_values):
|
|
168
|
+
if isinstance(val, torch.Tensor):
|
|
169
|
+
if val.numel() == 1:
|
|
170
|
+
# Single value tensor (like binary dice score)
|
|
171
|
+
metrics[name] = float(val)
|
|
172
|
+
else:
|
|
173
|
+
# Multi-element tensor (like multiclass dice scores)
|
|
174
|
+
val_list = val.tolist() if hasattr(val, 'tolist') else list(val)
|
|
175
|
+
# Log individual class scores
|
|
176
|
+
for i, class_score in enumerate(val_list):
|
|
177
|
+
metrics[f"{name}_class_{i+1}"] = float(class_score)
|
|
178
|
+
# Log mean across classes
|
|
179
|
+
metrics[f"{name}_mean"] = float(torch.mean(val))
|
|
180
|
+
else:
|
|
181
|
+
metrics[name] = float(val)
|
|
182
|
+
|
|
183
|
+
# Handle loss values
|
|
184
|
+
if len(recorder.log) >= 2:
|
|
185
|
+
metrics['train_loss'] = float(recorder.log[1])
|
|
186
|
+
if len(recorder.log) >= 3:
|
|
187
|
+
metrics['valid_loss'] = float(recorder.log[2])
|
|
188
|
+
|
|
189
|
+
return metrics
|
|
190
|
+
|
|
191
|
+
def _save_model_artifacts(self, temp_dir: Path) -> None:
|
|
192
|
+
"""Save model weights, learner, and configuration as artifacts."""
|
|
193
|
+
weights_path = temp_dir / "weights"
|
|
194
|
+
self.learn.save(str(weights_path))
|
|
195
|
+
|
|
196
|
+
weights_file = f"{weights_path}.pth"
|
|
197
|
+
if os.path.exists(weights_file):
|
|
198
|
+
mlflow.log_artifact(weights_file, "model")
|
|
199
|
+
|
|
200
|
+
learner_path = temp_dir / "learner.pkl"
|
|
201
|
+
self.learn.export(str(learner_path))
|
|
202
|
+
mlflow.log_artifact(str(learner_path), "model")
|
|
203
|
+
|
|
204
|
+
config_path = temp_dir / "inference_settings.pkl"
|
|
205
|
+
store_variables(config_path, self.size, self.reorder, self.resample)
|
|
206
|
+
mlflow.log_artifact(str(config_path), "config")
|
|
207
|
+
|
|
208
|
+
def _register_pytorch_model(self) -> None:
|
|
209
|
+
"""Register the PyTorch model with MLflow."""
|
|
210
|
+
mlflow.pytorch.log_model(
|
|
211
|
+
pytorch_model=self.learn.model,
|
|
212
|
+
registered_model_name=self.model_name
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
def before_fit(self) -> None:
|
|
216
|
+
"""Log hyperparameters before training starts."""
|
|
217
|
+
params = self._extract_training_params()
|
|
218
|
+
mlflow.log_params(params)
|
|
219
|
+
|
|
220
|
+
def after_epoch(self) -> None:
|
|
221
|
+
"""Log metrics after each epoch."""
|
|
222
|
+
metrics = self._extract_epoch_metrics()
|
|
223
|
+
if metrics:
|
|
224
|
+
mlflow.log_metrics(metrics, step=self.learn.epoch)
|
|
225
|
+
|
|
226
|
+
def after_fit(self) -> None:
|
|
227
|
+
"""Log model artifacts after training completion."""
|
|
228
|
+
print("\nTraining finished. Logging model artifacts to MLflow...")
|
|
229
|
+
|
|
230
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
|
231
|
+
temp_path = Path(temp_dir)
|
|
232
|
+
|
|
233
|
+
self._save_model_artifacts(temp_path)
|
|
234
|
+
|
|
235
|
+
self._register_pytorch_model()
|
|
236
|
+
|
|
237
|
+
print(f"MLflow run completed. Run ID: {mlflow.active_run().info.run_id}")
|