fastMONAI 0.4.0.2__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fastMONAI/__init__.py +1 -1
- fastMONAI/_modidx.py +64 -2
- fastMONAI/dataset_info.py +4 -4
- fastMONAI/utils.py +368 -2
- fastMONAI/vision_core.py +156 -14
- fastMONAI/vision_data.py +20 -18
- fastMONAI/vision_inference.py +17 -3
- fastmonai-0.5.1.dist-info/METADATA +149 -0
- fastmonai-0.5.1.dist-info/RECORD +20 -0
- {fastMONAI-0.4.0.2.dist-info → fastmonai-0.5.1.dist-info}/WHEEL +1 -1
- fastMONAI-0.4.0.2.dist-info/METADATA +0 -104
- fastMONAI-0.4.0.2.dist-info/RECORD +0 -20
- {fastMONAI-0.4.0.2.dist-info → fastmonai-0.5.1.dist-info}/entry_points.txt +0 -0
- {fastMONAI-0.4.0.2.dist-info → fastmonai-0.5.1.dist-info/licenses}/LICENSE +0 -0
- {fastMONAI-0.4.0.2.dist-info → fastmonai-0.5.1.dist-info}/top_level.txt +0 -0
fastMONAI/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.
|
|
1
|
+
__version__ = "0.5.1"
|
fastMONAI/_modidx.py
CHANGED
|
@@ -47,7 +47,41 @@ d = { 'settings': { 'branch': 'master',
|
|
|
47
47
|
'fastMONAI/external_data.py')},
|
|
48
48
|
'fastMONAI.research_utils': { 'fastMONAI.research_utils.pred_postprocess': ( 'research_utils.html#pred_postprocess',
|
|
49
49
|
'fastMONAI/research_utils.py')},
|
|
50
|
-
'fastMONAI.utils': { 'fastMONAI.utils.
|
|
50
|
+
'fastMONAI.utils': { 'fastMONAI.utils.MLflowUIManager': ('utils.html#mlflowuimanager', 'fastMONAI/utils.py'),
|
|
51
|
+
'fastMONAI.utils.MLflowUIManager.__init__': ('utils.html#mlflowuimanager.__init__', 'fastMONAI/utils.py'),
|
|
52
|
+
'fastMONAI.utils.MLflowUIManager.check_mlflow_installed': ( 'utils.html#mlflowuimanager.check_mlflow_installed',
|
|
53
|
+
'fastMONAI/utils.py'),
|
|
54
|
+
'fastMONAI.utils.MLflowUIManager.find_available_port': ( 'utils.html#mlflowuimanager.find_available_port',
|
|
55
|
+
'fastMONAI/utils.py'),
|
|
56
|
+
'fastMONAI.utils.MLflowUIManager.is_mlflow_running': ( 'utils.html#mlflowuimanager.is_mlflow_running',
|
|
57
|
+
'fastMONAI/utils.py'),
|
|
58
|
+
'fastMONAI.utils.MLflowUIManager.is_port_available': ( 'utils.html#mlflowuimanager.is_port_available',
|
|
59
|
+
'fastMONAI/utils.py'),
|
|
60
|
+
'fastMONAI.utils.MLflowUIManager.start_ui': ('utils.html#mlflowuimanager.start_ui', 'fastMONAI/utils.py'),
|
|
61
|
+
'fastMONAI.utils.MLflowUIManager.status': ('utils.html#mlflowuimanager.status', 'fastMONAI/utils.py'),
|
|
62
|
+
'fastMONAI.utils.MLflowUIManager.stop': ('utils.html#mlflowuimanager.stop', 'fastMONAI/utils.py'),
|
|
63
|
+
'fastMONAI.utils.ModelTrackingCallback': ('utils.html#modeltrackingcallback', 'fastMONAI/utils.py'),
|
|
64
|
+
'fastMONAI.utils.ModelTrackingCallback.__init__': ( 'utils.html#modeltrackingcallback.__init__',
|
|
65
|
+
'fastMONAI/utils.py'),
|
|
66
|
+
'fastMONAI.utils.ModelTrackingCallback._build_config': ( 'utils.html#modeltrackingcallback._build_config',
|
|
67
|
+
'fastMONAI/utils.py'),
|
|
68
|
+
'fastMONAI.utils.ModelTrackingCallback._extract_epoch_metrics': ( 'utils.html#modeltrackingcallback._extract_epoch_metrics',
|
|
69
|
+
'fastMONAI/utils.py'),
|
|
70
|
+
'fastMONAI.utils.ModelTrackingCallback._extract_training_params': ( 'utils.html#modeltrackingcallback._extract_training_params',
|
|
71
|
+
'fastMONAI/utils.py'),
|
|
72
|
+
'fastMONAI.utils.ModelTrackingCallback._register_pytorch_model': ( 'utils.html#modeltrackingcallback._register_pytorch_model',
|
|
73
|
+
'fastMONAI/utils.py'),
|
|
74
|
+
'fastMONAI.utils.ModelTrackingCallback._save_model_artifacts': ( 'utils.html#modeltrackingcallback._save_model_artifacts',
|
|
75
|
+
'fastMONAI/utils.py'),
|
|
76
|
+
'fastMONAI.utils.ModelTrackingCallback.after_epoch': ( 'utils.html#modeltrackingcallback.after_epoch',
|
|
77
|
+
'fastMONAI/utils.py'),
|
|
78
|
+
'fastMONAI.utils.ModelTrackingCallback.after_fit': ( 'utils.html#modeltrackingcallback.after_fit',
|
|
79
|
+
'fastMONAI/utils.py'),
|
|
80
|
+
'fastMONAI.utils.ModelTrackingCallback.before_fit': ( 'utils.html#modeltrackingcallback.before_fit',
|
|
81
|
+
'fastMONAI/utils.py'),
|
|
82
|
+
'fastMONAI.utils.ModelTrackingCallback.extract_all_params': ( 'utils.html#modeltrackingcallback.extract_all_params',
|
|
83
|
+
'fastMONAI/utils.py'),
|
|
84
|
+
'fastMONAI.utils.load_variables': ('utils.html#load_variables', 'fastMONAI/utils.py'),
|
|
51
85
|
'fastMONAI.utils.print_colab_gpu_info': ('utils.html#print_colab_gpu_info', 'fastMONAI/utils.py'),
|
|
52
86
|
'fastMONAI.utils.store_variables': ('utils.html#store_variables', 'fastMONAI/utils.py')},
|
|
53
87
|
'fastMONAI.vision_all': {},
|
|
@@ -138,23 +172,51 @@ d = { 'settings': { 'branch': 'master',
|
|
|
138
172
|
'fastMONAI.vision_augmentation.do_pad_or_crop': ( 'vision_augment.html#do_pad_or_crop',
|
|
139
173
|
'fastMONAI/vision_augmentation.py')},
|
|
140
174
|
'fastMONAI.vision_core': { 'fastMONAI.vision_core.MedBase': ('vision_core.html#medbase', 'fastMONAI/vision_core.py'),
|
|
175
|
+
'fastMONAI.vision_core.MedBase.__copy__': ( 'vision_core.html#medbase.__copy__',
|
|
176
|
+
'fastMONAI/vision_core.py'),
|
|
177
|
+
'fastMONAI.vision_core.MedBase.__deepcopy__': ( 'vision_core.html#medbase.__deepcopy__',
|
|
178
|
+
'fastMONAI/vision_core.py'),
|
|
179
|
+
'fastMONAI.vision_core.MedBase.__new__': ( 'vision_core.html#medbase.__new__',
|
|
180
|
+
'fastMONAI/vision_core.py'),
|
|
141
181
|
'fastMONAI.vision_core.MedBase.__repr__': ( 'vision_core.html#medbase.__repr__',
|
|
142
182
|
'fastMONAI/vision_core.py'),
|
|
143
183
|
'fastMONAI.vision_core.MedBase.create': ( 'vision_core.html#medbase.create',
|
|
144
184
|
'fastMONAI/vision_core.py'),
|
|
145
185
|
'fastMONAI.vision_core.MedBase.item_preprocessing': ( 'vision_core.html#medbase.item_preprocessing',
|
|
146
186
|
'fastMONAI/vision_core.py'),
|
|
187
|
+
'fastMONAI.vision_core.MedBase.new_empty': ( 'vision_core.html#medbase.new_empty',
|
|
188
|
+
'fastMONAI/vision_core.py'),
|
|
147
189
|
'fastMONAI.vision_core.MedBase.show': ('vision_core.html#medbase.show', 'fastMONAI/vision_core.py'),
|
|
148
190
|
'fastMONAI.vision_core.MedImage': ('vision_core.html#medimage', 'fastMONAI/vision_core.py'),
|
|
149
191
|
'fastMONAI.vision_core.MedMask': ('vision_core.html#medmask', 'fastMONAI/vision_core.py'),
|
|
150
192
|
'fastMONAI.vision_core.MetaResolver': ('vision_core.html#metaresolver', 'fastMONAI/vision_core.py'),
|
|
193
|
+
'fastMONAI.vision_core.VSCodeProgressCallback': ( 'vision_core.html#vscodeprogresscallback',
|
|
194
|
+
'fastMONAI/vision_core.py'),
|
|
195
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.__init__': ( 'vision_core.html#vscodeprogresscallback.__init__',
|
|
196
|
+
'fastMONAI/vision_core.py'),
|
|
197
|
+
'fastMONAI.vision_core.VSCodeProgressCallback._detect_vscode_environment': ( 'vision_core.html#vscodeprogresscallback._detect_vscode_environment',
|
|
198
|
+
'fastMONAI/vision_core.py'),
|
|
199
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.after_batch': ( 'vision_core.html#vscodeprogresscallback.after_batch',
|
|
200
|
+
'fastMONAI/vision_core.py'),
|
|
201
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.after_fit': ( 'vision_core.html#vscodeprogresscallback.after_fit',
|
|
202
|
+
'fastMONAI/vision_core.py'),
|
|
203
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.after_validate': ( 'vision_core.html#vscodeprogresscallback.after_validate',
|
|
204
|
+
'fastMONAI/vision_core.py'),
|
|
205
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.before_epoch': ( 'vision_core.html#vscodeprogresscallback.before_epoch',
|
|
206
|
+
'fastMONAI/vision_core.py'),
|
|
207
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.before_fit': ( 'vision_core.html#vscodeprogresscallback.before_fit',
|
|
208
|
+
'fastMONAI/vision_core.py'),
|
|
209
|
+
'fastMONAI.vision_core.VSCodeProgressCallback.before_validate': ( 'vision_core.html#vscodeprogresscallback.before_validate',
|
|
210
|
+
'fastMONAI/vision_core.py'),
|
|
151
211
|
'fastMONAI.vision_core._load_and_preprocess': ( 'vision_core.html#_load_and_preprocess',
|
|
152
212
|
'fastMONAI/vision_core.py'),
|
|
153
213
|
'fastMONAI.vision_core._multi_channel': ( 'vision_core.html#_multi_channel',
|
|
154
214
|
'fastMONAI/vision_core.py'),
|
|
155
215
|
'fastMONAI.vision_core._preprocess': ('vision_core.html#_preprocess', 'fastMONAI/vision_core.py'),
|
|
156
216
|
'fastMONAI.vision_core.med_img_reader': ( 'vision_core.html#med_img_reader',
|
|
157
|
-
'fastMONAI/vision_core.py')
|
|
217
|
+
'fastMONAI/vision_core.py'),
|
|
218
|
+
'fastMONAI.vision_core.setup_vscode_progress': ( 'vision_core.html#setup_vscode_progress',
|
|
219
|
+
'fastMONAI/vision_core.py')},
|
|
158
220
|
'fastMONAI.vision_data': { 'fastMONAI.vision_data.MedDataBlock': ('vision_data.html#meddatablock', 'fastMONAI/vision_data.py'),
|
|
159
221
|
'fastMONAI.vision_data.MedDataBlock.__init__': ( 'vision_data.html#meddatablock.__init__',
|
|
160
222
|
'fastMONAI/vision_data.py'),
|
fastMONAI/dataset_info.py
CHANGED
|
@@ -69,12 +69,12 @@ class MedDataset:
|
|
|
69
69
|
def suggestion(self):
|
|
70
70
|
"""Voxel value that appears most often in dim_0, dim_1 and dim_2, and whether the data should be reoriented."""
|
|
71
71
|
|
|
72
|
-
resample = [self.df.voxel_0.mode()[0], self.df.voxel_1.mode()[0], self.df.voxel_2.mode()[0]]
|
|
72
|
+
resample = [float(self.df.voxel_0.mode()[0]), float(self.df.voxel_1.mode()[0]), float(self.df.voxel_2.mode()[0])]
|
|
73
73
|
return resample, self.reorder
|
|
74
74
|
|
|
75
75
|
def _get_data_info(self, fn: str):
|
|
76
76
|
"""Private method to collect information about an image file."""
|
|
77
|
-
_, o, _ = med_img_reader(fn,
|
|
77
|
+
_, o, _ = med_img_reader(fn, reorder=self.reorder, only_tensor=False, dtype=self.dtype)
|
|
78
78
|
|
|
79
79
|
info_dict = {'path': fn, 'dim_0': o.shape[1], 'dim_1': o.shape[2], 'dim_2': o.shape[3],
|
|
80
80
|
'voxel_0': round(o.spacing[0], 4), 'voxel_1': round(o.spacing[1], 4), 'voxel_2': round(o.spacing[2], 4),
|
|
@@ -98,10 +98,10 @@ class MedDataset:
|
|
|
98
98
|
|
|
99
99
|
ratio = org_voxels/resample
|
|
100
100
|
new_dims = (org_dims * ratio).T
|
|
101
|
-
dims = [new_dims[0].max().round(), new_dims[1].max().round(), new_dims[2].max().round()]
|
|
101
|
+
dims = [float(new_dims[0].max().round()), float(new_dims[1].max().round()), float(new_dims[2].max().round())]
|
|
102
102
|
|
|
103
103
|
else:
|
|
104
|
-
dims = [df.dim_0.max(), df.dim_1.max(), df.dim_2.max()]
|
|
104
|
+
dims = [float(self.df.dim_0.max()), float(self.df.dim_1.max()), float(self.df.dim_2.max())]
|
|
105
105
|
|
|
106
106
|
return dims
|
|
107
107
|
|
fastMONAI/utils.py
CHANGED
|
@@ -1,15 +1,23 @@
|
|
|
1
1
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/07_utils.ipynb.
|
|
2
2
|
|
|
3
3
|
# %% auto 0
|
|
4
|
-
__all__ = ['store_variables', 'load_variables', 'print_colab_gpu_info']
|
|
4
|
+
__all__ = ['store_variables', 'load_variables', 'print_colab_gpu_info', 'ModelTrackingCallback', 'MLflowUIManager']
|
|
5
5
|
|
|
6
6
|
# %% ../nbs/07_utils.ipynb 1
|
|
7
7
|
import pickle
|
|
8
8
|
import torch
|
|
9
9
|
from pathlib import Path
|
|
10
|
+
import mlflow
|
|
11
|
+
import mlflow.pytorch
|
|
12
|
+
import os
|
|
13
|
+
import tempfile
|
|
14
|
+
import json
|
|
15
|
+
from fastai.callback.core import Callback
|
|
16
|
+
from fastcore.foundation import L
|
|
17
|
+
from typing import Any
|
|
10
18
|
|
|
11
19
|
# %% ../nbs/07_utils.ipynb 3
|
|
12
|
-
def store_variables(pkl_fn:
|
|
20
|
+
def store_variables(pkl_fn: str | Path, size: list, reorder: bool, resample: int | list):
|
|
13
21
|
"""Save variable values in a pickle file."""
|
|
14
22
|
|
|
15
23
|
var_vals = [size, reorder, resample]
|
|
@@ -43,3 +51,361 @@ def print_colab_gpu_info():
|
|
|
43
51
|
|
|
44
52
|
if torch.cuda.is_available(): print('GPU attached.')
|
|
45
53
|
else: print(colab_gpu_msg)
|
|
54
|
+
|
|
55
|
+
# %% ../nbs/07_utils.ipynb 6
|
|
56
|
+
class ModelTrackingCallback(Callback):
|
|
57
|
+
"""
|
|
58
|
+
A FastAI callback for comprehensive MLflow experiment tracking.
|
|
59
|
+
|
|
60
|
+
This callback automatically logs hyperparameters, metrics, model artifacts,
|
|
61
|
+
and configuration to MLflow during training.
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
def __init__(
|
|
65
|
+
self,
|
|
66
|
+
model_name: str,
|
|
67
|
+
loss_function: str,
|
|
68
|
+
item_tfms: list[Any],
|
|
69
|
+
size: list[int],
|
|
70
|
+
resample: list[float],
|
|
71
|
+
reorder: bool
|
|
72
|
+
):
|
|
73
|
+
"""
|
|
74
|
+
Initialize the MLflow tracking callback.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
model_name: Name of the model architecture for registration
|
|
78
|
+
loss_function: Name of the loss function being used
|
|
79
|
+
size: Model input dimensions
|
|
80
|
+
resample: Resampling dimensions
|
|
81
|
+
reorder: Whether reordering augmentation is applied
|
|
82
|
+
"""
|
|
83
|
+
self.model_name = model_name
|
|
84
|
+
self.loss_function = loss_function
|
|
85
|
+
self.item_tfms = item_tfms
|
|
86
|
+
self.size = size
|
|
87
|
+
self.resample = resample
|
|
88
|
+
self.reorder = reorder
|
|
89
|
+
|
|
90
|
+
self.config = self._build_config()
|
|
91
|
+
|
|
92
|
+
def extract_all_params(self, tfm):
|
|
93
|
+
"""
|
|
94
|
+
Extract all parameters from a transform object for detailed logging.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
tfm: Transform object to extract parameters from
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
dict: Dictionary with 'name' and 'params' keys containing transform details
|
|
101
|
+
"""
|
|
102
|
+
class_name = tfm.__class__.__name__
|
|
103
|
+
params = {}
|
|
104
|
+
|
|
105
|
+
for key, value in tfm.__dict__.items():
|
|
106
|
+
if not key.startswith('_') and key != '__signature__':
|
|
107
|
+
if hasattr(value, '__dict__') and hasattr(value, 'target_shape'):
|
|
108
|
+
params['target_shape'] = value.target_shape
|
|
109
|
+
elif hasattr(value, '__dict__') and not key.startswith('_'):
|
|
110
|
+
nested_params = {k: v for k, v in value.__dict__.items()
|
|
111
|
+
if not k.startswith('_') and isinstance(v, (int, float, str, bool, tuple, list))}
|
|
112
|
+
params.update(nested_params)
|
|
113
|
+
elif isinstance(value, (int, float, str, bool, tuple, list)):
|
|
114
|
+
params[key] = value
|
|
115
|
+
|
|
116
|
+
return {
|
|
117
|
+
'name': class_name,
|
|
118
|
+
'params': params
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
def _build_config(self) -> dict[str, Any]:
|
|
122
|
+
"""Build configuration dictionary from initialization parameters."""
|
|
123
|
+
# Extract detailed transform information
|
|
124
|
+
transform_details = [self.extract_all_params(tfm) for tfm in self.item_tfms]
|
|
125
|
+
|
|
126
|
+
return {
|
|
127
|
+
"model_name": self.model_name,
|
|
128
|
+
"loss_function": self.loss_function,
|
|
129
|
+
"transform_details": transform_details,
|
|
130
|
+
"size": self.size,
|
|
131
|
+
"resample": self.resample,
|
|
132
|
+
"reorder": self.reorder,
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
def _extract_training_params(self) -> dict[str, Any]:
|
|
136
|
+
"""Extract training hyperparameters from the learner."""
|
|
137
|
+
params = {}
|
|
138
|
+
|
|
139
|
+
params["epochs"] = self.learn.n_epoch
|
|
140
|
+
params["learning_rate"] = float(self.learn.lr)
|
|
141
|
+
params["optimizer"] = self.learn.opt_func.__name__
|
|
142
|
+
params["batch_size"] = self.learn.dls.bs
|
|
143
|
+
|
|
144
|
+
params["loss_function"] = self.config["loss_function"]
|
|
145
|
+
params["size"] = self.config["size"]
|
|
146
|
+
params["resample"] = self.config["resample"]
|
|
147
|
+
params["reorder"] = self.config["reorder"]
|
|
148
|
+
|
|
149
|
+
params["transformations"] = json.dumps(
|
|
150
|
+
self.config["transform_details"],
|
|
151
|
+
indent=2,
|
|
152
|
+
separators=(',', ': ')
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
return params
|
|
156
|
+
|
|
157
|
+
def _extract_epoch_metrics(self) -> dict[str, float]:
|
|
158
|
+
"""Extract metrics from the current epoch."""
|
|
159
|
+
recorder = self.learn.recorder
|
|
160
|
+
|
|
161
|
+
# Get custom metric names and values (skip 'epoch' and 'time')
|
|
162
|
+
metric_names = recorder.metric_names[2:]
|
|
163
|
+
raw_metric_values = recorder.log[2:]
|
|
164
|
+
|
|
165
|
+
metrics = {}
|
|
166
|
+
|
|
167
|
+
# Process each metric, handling both scalars and tensors
|
|
168
|
+
for name, val in zip(metric_names, raw_metric_values):
|
|
169
|
+
if val is None:
|
|
170
|
+
continue # Skip None values during inference
|
|
171
|
+
if isinstance(val, torch.Tensor):
|
|
172
|
+
if val.numel() == 1:
|
|
173
|
+
# Single value tensor (like binary dice score)
|
|
174
|
+
metrics[name] = float(val)
|
|
175
|
+
else:
|
|
176
|
+
# Multi-element tensor (like multiclass dice scores)
|
|
177
|
+
val_list = val.tolist() if hasattr(val, 'tolist') else list(val)
|
|
178
|
+
# Log individual class scores
|
|
179
|
+
for i, class_score in enumerate(val_list):
|
|
180
|
+
metrics[f"{name}_class_{i+1}"] = float(class_score)
|
|
181
|
+
# Log mean across classes
|
|
182
|
+
metrics[f"{name}_mean"] = float(torch.mean(val))
|
|
183
|
+
else:
|
|
184
|
+
metrics[name] = float(val)
|
|
185
|
+
|
|
186
|
+
# Handle loss values
|
|
187
|
+
if len(recorder.log) >= 2:
|
|
188
|
+
if recorder.log[1] is not None:
|
|
189
|
+
metrics['train_loss'] = float(recorder.log[1])
|
|
190
|
+
if len(recorder.log) >= 3 and recorder.log[2] is not None:
|
|
191
|
+
metrics['valid_loss'] = float(recorder.log[2])
|
|
192
|
+
|
|
193
|
+
return metrics
|
|
194
|
+
|
|
195
|
+
def _save_model_artifacts(self, temp_dir: Path) -> None:
|
|
196
|
+
"""Save model weights, learner, and configuration as artifacts."""
|
|
197
|
+
weights_path = temp_dir / "weights"
|
|
198
|
+
self.learn.save(str(weights_path))
|
|
199
|
+
|
|
200
|
+
weights_file = f"{weights_path}.pth"
|
|
201
|
+
if os.path.exists(weights_file):
|
|
202
|
+
mlflow.log_artifact(weights_file, "model")
|
|
203
|
+
|
|
204
|
+
# Remove MLflow callbacks before exporting learner for inference
|
|
205
|
+
# This prevents the callback from being triggered during inference
|
|
206
|
+
original_cbs = self.learn.cbs.copy() # Save original callbacks
|
|
207
|
+
|
|
208
|
+
# Remove ModelTrackingCallback instances from learner using proper collection type
|
|
209
|
+
filtered_cbs = L([cb for cb in self.learn.cbs if not isinstance(cb, ModelTrackingCallback)])
|
|
210
|
+
self.learn.cbs = filtered_cbs
|
|
211
|
+
|
|
212
|
+
# Export clean learner without MLflow callbacks
|
|
213
|
+
learner_path = temp_dir / "learner.pkl"
|
|
214
|
+
self.learn.export(str(learner_path))
|
|
215
|
+
mlflow.log_artifact(str(learner_path), "model")
|
|
216
|
+
|
|
217
|
+
# Restore original callbacks for current session
|
|
218
|
+
self.learn.cbs = original_cbs
|
|
219
|
+
|
|
220
|
+
config_path = temp_dir / "inference_settings.pkl"
|
|
221
|
+
store_variables(config_path, self.size, self.reorder, self.resample)
|
|
222
|
+
mlflow.log_artifact(str(config_path), "config")
|
|
223
|
+
|
|
224
|
+
def _register_pytorch_model(self) -> None:
|
|
225
|
+
"""Register the PyTorch model with MLflow."""
|
|
226
|
+
mlflow.pytorch.log_model(
|
|
227
|
+
pytorch_model=self.learn.model,
|
|
228
|
+
registered_model_name=self.model_name
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
def before_fit(self) -> None:
|
|
232
|
+
"""Log hyperparameters before training starts."""
|
|
233
|
+
params = self._extract_training_params()
|
|
234
|
+
mlflow.log_params(params)
|
|
235
|
+
|
|
236
|
+
def after_epoch(self) -> None:
|
|
237
|
+
"""Log metrics after each epoch."""
|
|
238
|
+
metrics = self._extract_epoch_metrics()
|
|
239
|
+
if metrics:
|
|
240
|
+
mlflow.log_metrics(metrics, step=self.learn.epoch)
|
|
241
|
+
|
|
242
|
+
def after_fit(self) -> None:
|
|
243
|
+
"""Log model artifacts after training completion."""
|
|
244
|
+
print("\nTraining finished. Logging model artifacts to MLflow...")
|
|
245
|
+
|
|
246
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
|
247
|
+
temp_path = Path(temp_dir)
|
|
248
|
+
|
|
249
|
+
self._save_model_artifacts(temp_path)
|
|
250
|
+
|
|
251
|
+
self._register_pytorch_model()
|
|
252
|
+
|
|
253
|
+
print(f"MLflow run completed. Run ID: {mlflow.active_run().info.run_id}")
|
|
254
|
+
|
|
255
|
+
# %% ../nbs/07_utils.ipynb 7
|
|
256
|
+
import subprocess
|
|
257
|
+
import threading
|
|
258
|
+
import time
|
|
259
|
+
import socket
|
|
260
|
+
import os
|
|
261
|
+
from IPython.display import display, HTML, clear_output
|
|
262
|
+
from IPython.core.magic import register_line_magic
|
|
263
|
+
from IPython import get_ipython
|
|
264
|
+
import requests
|
|
265
|
+
import shutil
|
|
266
|
+
|
|
267
|
+
class MLflowUIManager:
|
|
268
|
+
def __init__(self):
|
|
269
|
+
self.process = None
|
|
270
|
+
self.thread = None
|
|
271
|
+
self.port = 5001
|
|
272
|
+
self.host = '0.0.0.0'
|
|
273
|
+
self.backend_store_uri = './mlruns'
|
|
274
|
+
|
|
275
|
+
def is_port_available(self, port):
|
|
276
|
+
"""Check if a port is available."""
|
|
277
|
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
|
278
|
+
try:
|
|
279
|
+
s.bind(('localhost', port))
|
|
280
|
+
return True
|
|
281
|
+
except OSError:
|
|
282
|
+
return False
|
|
283
|
+
|
|
284
|
+
def is_mlflow_running(self):
|
|
285
|
+
"""Check if MLflow UI is actually responding."""
|
|
286
|
+
try:
|
|
287
|
+
response = requests.get(f'http://localhost:{self.port}', timeout=2)
|
|
288
|
+
return response.status_code == 200
|
|
289
|
+
except:
|
|
290
|
+
return False
|
|
291
|
+
|
|
292
|
+
def find_available_port(self, start_port=5001):
|
|
293
|
+
"""Find an available port starting from start_port."""
|
|
294
|
+
for port in range(start_port, start_port + 10):
|
|
295
|
+
if self.is_port_available(port):
|
|
296
|
+
return port
|
|
297
|
+
return None
|
|
298
|
+
|
|
299
|
+
def check_mlflow_installed(self):
|
|
300
|
+
"""Check if MLflow is installed."""
|
|
301
|
+
return shutil.which('mlflow') is not None
|
|
302
|
+
|
|
303
|
+
def start_ui(self, auto_open=True, quiet=False):
|
|
304
|
+
"""Start MLflow UI with better error handling and user feedback."""
|
|
305
|
+
|
|
306
|
+
# Check if MLflow is installed
|
|
307
|
+
if not self.check_mlflow_installed():
|
|
308
|
+
if not quiet:
|
|
309
|
+
display(HTML('<div style="color: #d32f2f; font-weight: bold; font-size: 14px;">❌ MLflow not installed. Run: pip install mlflow</div>'))
|
|
310
|
+
return False
|
|
311
|
+
|
|
312
|
+
# Find available port
|
|
313
|
+
available_port = self.find_available_port(self.port)
|
|
314
|
+
if available_port is None:
|
|
315
|
+
if not quiet:
|
|
316
|
+
display(HTML('<div style="color: #d32f2f; font-weight: bold; font-size: 14px;">❌ No available ports found (5001-5010)</div>'))
|
|
317
|
+
return False
|
|
318
|
+
|
|
319
|
+
self.port = available_port
|
|
320
|
+
|
|
321
|
+
# Start MLflow UI in a separate thread
|
|
322
|
+
def run_mlflow():
|
|
323
|
+
try:
|
|
324
|
+
self.process = subprocess.Popen([
|
|
325
|
+
'mlflow', 'ui',
|
|
326
|
+
'--host', self.host,
|
|
327
|
+
'--port', str(self.port),
|
|
328
|
+
'--backend-store-uri', self.backend_store_uri
|
|
329
|
+
], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
|
330
|
+
self.process.wait()
|
|
331
|
+
except Exception as e:
|
|
332
|
+
if not quiet:
|
|
333
|
+
display(HTML(f'<div style="color: #d32f2f; font-weight: bold; font-size: 14px;">❌ Error: {str(e)}</div>'))
|
|
334
|
+
|
|
335
|
+
self.thread = threading.Thread(target=run_mlflow, daemon=True)
|
|
336
|
+
self.thread.start()
|
|
337
|
+
|
|
338
|
+
# Wait and check if server started successfully
|
|
339
|
+
max_wait = 10
|
|
340
|
+
for i in range(max_wait):
|
|
341
|
+
time.sleep(1)
|
|
342
|
+
if self.is_mlflow_running():
|
|
343
|
+
if quiet:
|
|
344
|
+
# Bright, visible link for quiet mode
|
|
345
|
+
display(HTML(f'''
|
|
346
|
+
<a href="http://localhost:{self.port}" target="_blank"
|
|
347
|
+
style="color: #1976d2; font-weight: bold; font-size: 16px; text-decoration: underline;">
|
|
348
|
+
🔗 MLflow UI (Port {self.port})
|
|
349
|
+
</a>
|
|
350
|
+
'''))
|
|
351
|
+
else:
|
|
352
|
+
# Success message with high contrast colors
|
|
353
|
+
display(HTML(f'''
|
|
354
|
+
<div style="background-color: #c8e6c9; border: 2px solid #388e3c; padding: 15px; border-radius: 8px; margin: 10px 0;">
|
|
355
|
+
<div style="color: #1b5e20; font-weight: bold; font-size: 16px; margin-bottom: 10px;">
|
|
356
|
+
✅ MLflow UI is running successfully!
|
|
357
|
+
</div>
|
|
358
|
+
<a href="http://localhost:{self.port}" target="_blank"
|
|
359
|
+
style="background-color: #1976d2; color: white; padding: 12px 24px; text-decoration: none; border-radius: 6px; font-weight: bold; font-size: 14px; display: inline-block; margin: 5px 0;">
|
|
360
|
+
🔗 Open MLflow UI
|
|
361
|
+
</a>
|
|
362
|
+
<div style="margin-top: 10px;">
|
|
363
|
+
<div style="color: #424242; font-size: 13px;">URL: http://localhost:{self.port}</div>
|
|
364
|
+
</div>
|
|
365
|
+
</div>
|
|
366
|
+
'''))
|
|
367
|
+
return True
|
|
368
|
+
|
|
369
|
+
# If we get here, server didn't start properly
|
|
370
|
+
if not quiet:
|
|
371
|
+
display(HTML('<div style="color: #d32f2f; font-weight: bold; font-size: 14px;">❌ Failed to start MLflow UI</div>'))
|
|
372
|
+
return False
|
|
373
|
+
|
|
374
|
+
def stop(self):
|
|
375
|
+
"""Stop the MLflow UI server."""
|
|
376
|
+
if self.process:
|
|
377
|
+
self.process.terminate()
|
|
378
|
+
self.process = None
|
|
379
|
+
display(HTML('''
|
|
380
|
+
<div style="background-color: #ffecb3; border: 2px solid #f57c00; padding: 10px; border-radius: 6px;">
|
|
381
|
+
<span style="color: #e65100; font-weight: bold; font-size: 14px;">🛑 MLflow UI stopped</span>
|
|
382
|
+
</div>
|
|
383
|
+
'''))
|
|
384
|
+
else:
|
|
385
|
+
display(HTML('''
|
|
386
|
+
<div style="background-color: #f0f0f0; border: 2px solid #757575; padding: 10px; border-radius: 6px;">
|
|
387
|
+
<span style="color: #424242; font-weight: bold; font-size: 14px;">ℹ️ MLflow UI is not currently running</span>
|
|
388
|
+
</div>
|
|
389
|
+
'''))
|
|
390
|
+
|
|
391
|
+
def status(self):
|
|
392
|
+
"""Check MLflow UI status."""
|
|
393
|
+
if self.is_mlflow_running():
|
|
394
|
+
display(HTML(f'''
|
|
395
|
+
<div style="background-color: #c8e6c9; border: 2px solid #388e3c; padding: 10px; border-radius: 6px;">
|
|
396
|
+
<div style="color: #1b5e20; font-weight: bold; font-size: 14px;">✅ MLflow UI is running</div>
|
|
397
|
+
<a href="http://localhost:{self.port}" target="_blank"
|
|
398
|
+
style="color: #1976d2; font-weight: bold; text-decoration: underline;">
|
|
399
|
+
http://localhost:{self.port}
|
|
400
|
+
</a>
|
|
401
|
+
</div>
|
|
402
|
+
'''))
|
|
403
|
+
else:
|
|
404
|
+
display(HTML('''
|
|
405
|
+
<div style="background-color: #ffcdd2; border: 2px solid #d32f2f; padding: 10px; border-radius: 6px;">
|
|
406
|
+
<div style="color: #b71c1c; font-weight: bold; font-size: 14px;">❌ MLflow UI is not running</div>
|
|
407
|
+
<div style="color: #424242; font-size: 13px; margin-top: 5px;">
|
|
408
|
+
Run <code style="background-color: #f5f5f5; padding: 2px 4px; border-radius: 3px;">mlflow_ui.start_ui()</code> to start it.
|
|
409
|
+
</div>
|
|
410
|
+
</div>
|
|
411
|
+
'''))
|
fastMONAI/vision_core.py
CHANGED
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/01_vision_core.ipynb.
|
|
2
2
|
|
|
3
3
|
# %% auto 0
|
|
4
|
-
__all__ = ['med_img_reader', 'MetaResolver', 'MedBase', 'MedImage', 'MedMask']
|
|
4
|
+
__all__ = ['med_img_reader', 'MetaResolver', 'MedBase', 'MedImage', 'MedMask', 'VSCodeProgressCallback', 'setup_vscode_progress']
|
|
5
5
|
|
|
6
6
|
# %% ../nbs/01_vision_core.ipynb 2
|
|
7
7
|
from .vision_plot import *
|
|
8
8
|
from fastai.data.all import *
|
|
9
9
|
from torchio import ScalarImage, LabelMap, ToCanonical, Resample
|
|
10
|
+
import copy
|
|
10
11
|
|
|
11
12
|
# %% ../nbs/01_vision_core.ipynb 5
|
|
12
13
|
def _preprocess(obj, reorder, resample):
|
|
@@ -56,7 +57,7 @@ def _load_and_preprocess(file_path, reorder, resample, dtype):
|
|
|
56
57
|
return org_img, input_img, org_size
|
|
57
58
|
|
|
58
59
|
# %% ../nbs/01_vision_core.ipynb 7
|
|
59
|
-
def _multi_channel(image_paths:
|
|
60
|
+
def _multi_channel(image_paths: L | list, reorder: bool, resample: list, only_tensor: bool, dtype):
|
|
60
61
|
"""
|
|
61
62
|
Load and preprocess multisequence data.
|
|
62
63
|
|
|
@@ -64,8 +65,8 @@ def _multi_channel(image_paths: (L, list), reorder: bool, resample: list, dtype,
|
|
|
64
65
|
image_paths: List of image paths (e.g., T1, T2, T1CE, DWI).
|
|
65
66
|
reorder: Whether to reorder data for canonical (RAS+) orientation.
|
|
66
67
|
resample: Whether to resample image to different voxel sizes and dimensions.
|
|
67
|
-
dtype: Desired datatype for output.
|
|
68
68
|
only_tensor: Whether to return only image tensor.
|
|
69
|
+
dtype: Desired datatype for output.
|
|
69
70
|
|
|
70
71
|
Returns:
|
|
71
72
|
torch.Tensor: A stacked 4D tensor, if `only_tensor` is True.
|
|
@@ -82,33 +83,28 @@ def _multi_channel(image_paths: (L, list), reorder: bool, resample: list, dtype,
|
|
|
82
83
|
input_img.set_data(tensor)
|
|
83
84
|
return org_img, input_img, org_size
|
|
84
85
|
|
|
85
|
-
|
|
86
86
|
# %% ../nbs/01_vision_core.ipynb 8
|
|
87
|
-
def med_img_reader(file_path:
|
|
88
|
-
|
|
89
|
-
):
|
|
87
|
+
def med_img_reader(file_path: str | Path | L | list, reorder: bool = False, resample: list = None,
|
|
88
|
+
only_tensor: bool = True, dtype = torch.Tensor):
|
|
90
89
|
"""Loads and preprocesses a medical image.
|
|
91
90
|
|
|
92
91
|
Args:
|
|
93
92
|
file_path: Path to the image. Can be a string, Path object or a list.
|
|
94
|
-
dtype: Datatype for the return value. Defaults to torch.Tensor.
|
|
95
93
|
reorder: Whether to reorder the data to be closest to canonical
|
|
96
94
|
(RAS+) orientation. Defaults to False.
|
|
97
95
|
resample: Whether to resample image to different voxel sizes and
|
|
98
96
|
image dimensions. Defaults to None.
|
|
99
97
|
only_tensor: Whether to return only image tensor. Defaults to True.
|
|
98
|
+
dtype: Datatype for the return value. Defaults to torch.Tensor.
|
|
100
99
|
|
|
101
100
|
Returns:
|
|
102
101
|
The preprocessed image. Returns only the image tensor if
|
|
103
102
|
only_tensor is True, otherwise returns original image,
|
|
104
103
|
preprocessed image, and original size.
|
|
105
104
|
"""
|
|
106
|
-
# if isinstance(file_path, str) and ';' in file_path:
|
|
107
|
-
# return _multi_channel(
|
|
108
|
-
# file_path.split(';'), reorder, resample, dtype, only_tensor)
|
|
109
105
|
|
|
110
|
-
if isinstance(file_path, (
|
|
111
|
-
return _multi_channel(file_path, reorder, resample,
|
|
106
|
+
if isinstance(file_path, (list, L)):
|
|
107
|
+
return _multi_channel(file_path, reorder, resample, only_tensor, dtype)
|
|
112
108
|
|
|
113
109
|
org_img, input_img, org_size = _load_and_preprocess(
|
|
114
110
|
file_path, reorder, resample, dtype)
|
|
@@ -154,7 +150,50 @@ class MedBase(torch.Tensor, metaclass=MetaResolver):
|
|
|
154
150
|
if isinstance(fn, torch.Tensor):
|
|
155
151
|
return cls(fn)
|
|
156
152
|
|
|
157
|
-
return med_img_reader(fn,
|
|
153
|
+
return med_img_reader(fn, resample=cls.resample, reorder=cls.reorder, dtype=cls)
|
|
154
|
+
|
|
155
|
+
def __new__(cls, x, **kwargs):
|
|
156
|
+
"""Creates a new instance of MedBase from a tensor."""
|
|
157
|
+
if isinstance(x, torch.Tensor):
|
|
158
|
+
# Create tensor of the same type and copy data
|
|
159
|
+
res = torch.Tensor._make_subclass(cls, x.data, x.requires_grad)
|
|
160
|
+
# Copy any additional attributes
|
|
161
|
+
if hasattr(x, 'affine_matrix'):
|
|
162
|
+
res.affine_matrix = x.affine_matrix
|
|
163
|
+
return res
|
|
164
|
+
else:
|
|
165
|
+
# Handle other types by converting to tensor first
|
|
166
|
+
tensor = torch.as_tensor(x, **kwargs)
|
|
167
|
+
return cls.__new__(cls, tensor)
|
|
168
|
+
|
|
169
|
+
def new_empty(self, size, **kwargs):
|
|
170
|
+
"""Create a new empty tensor of the same type."""
|
|
171
|
+
# Create new tensor with same type and device/dtype
|
|
172
|
+
kwargs.setdefault('dtype', self.dtype)
|
|
173
|
+
kwargs.setdefault('device', self.device)
|
|
174
|
+
new_tensor = torch.empty(size, **kwargs)
|
|
175
|
+
# Use __new__ to create proper subclass instance
|
|
176
|
+
return self.__class__.__new__(self.__class__, new_tensor)
|
|
177
|
+
|
|
178
|
+
def __copy__(self):
|
|
179
|
+
"""Shallow copy implementation."""
|
|
180
|
+
copied = self.__class__.__new__(self.__class__, self.clone())
|
|
181
|
+
# Copy class attributes
|
|
182
|
+
if hasattr(self, 'affine_matrix'):
|
|
183
|
+
copied.affine_matrix = self.affine_matrix
|
|
184
|
+
return copied
|
|
185
|
+
|
|
186
|
+
def __deepcopy__(self, memo):
|
|
187
|
+
"""Deep copy implementation."""
|
|
188
|
+
# Create a deep copy of the tensor data
|
|
189
|
+
copied_data = self.clone()
|
|
190
|
+
copied = self.__class__.__new__(self.__class__, copied_data)
|
|
191
|
+
# Deep copy class attributes
|
|
192
|
+
if hasattr(self, 'affine_matrix') and self.affine_matrix is not None:
|
|
193
|
+
copied.affine_matrix = copy.deepcopy(self.affine_matrix, memo)
|
|
194
|
+
else:
|
|
195
|
+
copied.affine_matrix = None
|
|
196
|
+
return copied
|
|
158
197
|
|
|
159
198
|
@classmethod
|
|
160
199
|
def item_preprocessing(cls, resample: (list, int, tuple), reorder: bool):
|
|
@@ -208,3 +247,106 @@ class MedImage(MedBase):
|
|
|
208
247
|
class MedMask(MedBase):
|
|
209
248
|
"""Subclass of MedBase that represents an mask object."""
|
|
210
249
|
_show_args = {'alpha':0.5, 'cmap':'tab20'}
|
|
250
|
+
|
|
251
|
+
# %% ../nbs/01_vision_core.ipynb 14
|
|
252
|
+
import os
|
|
253
|
+
from fastai.callback.progress import ProgressCallback
|
|
254
|
+
from fastai.callback.core import Callback
|
|
255
|
+
import sys
|
|
256
|
+
from IPython import get_ipython
|
|
257
|
+
|
|
258
|
+
class VSCodeProgressCallback(ProgressCallback):
|
|
259
|
+
"""Enhanced progress callback that works better in VS Code notebooks."""
|
|
260
|
+
|
|
261
|
+
def __init__(self, **kwargs):
|
|
262
|
+
super().__init__(**kwargs)
|
|
263
|
+
self.is_vscode = self._detect_vscode_environment()
|
|
264
|
+
self.lr_find_progress = None
|
|
265
|
+
|
|
266
|
+
def _detect_vscode_environment(self):
|
|
267
|
+
"""Detect if running in VS Code Jupyter environment."""
|
|
268
|
+
ipython = get_ipython()
|
|
269
|
+
if ipython is None:
|
|
270
|
+
return True # Assume VS Code if no IPython (safer default)
|
|
271
|
+
# VS Code detection - more comprehensive check
|
|
272
|
+
kernel_name = str(type(ipython.kernel)).lower() if hasattr(ipython, 'kernel') else ''
|
|
273
|
+
return ('vscode' in kernel_name or
|
|
274
|
+
'zmq' in kernel_name or # VS Code often uses ZMQInteractiveShell
|
|
275
|
+
not hasattr(ipython, 'display_pub')) # Missing display publisher often indicates VS Code
|
|
276
|
+
|
|
277
|
+
def before_fit(self):
|
|
278
|
+
"""Initialize progress tracking before training."""
|
|
279
|
+
if self.is_vscode:
|
|
280
|
+
if hasattr(self.learn, 'lr_finder') and self.learn.lr_finder:
|
|
281
|
+
# This is lr_find, handle differently
|
|
282
|
+
print("🔍 Starting Learning Rate Finder...")
|
|
283
|
+
self.lr_find_progress = 0
|
|
284
|
+
else:
|
|
285
|
+
# Regular training
|
|
286
|
+
print(f"🚀 Training for {self.learn.n_epoch} epochs...")
|
|
287
|
+
super().before_fit()
|
|
288
|
+
|
|
289
|
+
def before_epoch(self):
|
|
290
|
+
"""Initialize epoch progress."""
|
|
291
|
+
if self.is_vscode:
|
|
292
|
+
if hasattr(self.learn, 'lr_finder') and self.learn.lr_finder:
|
|
293
|
+
print(f"📊 LR Find - Testing learning rates...")
|
|
294
|
+
else:
|
|
295
|
+
print(f"📈 Epoch {self.epoch+1}/{self.learn.n_epoch}")
|
|
296
|
+
sys.stdout.flush()
|
|
297
|
+
super().before_epoch()
|
|
298
|
+
|
|
299
|
+
def after_batch(self):
|
|
300
|
+
"""Update progress after each batch."""
|
|
301
|
+
super().after_batch()
|
|
302
|
+
if self.is_vscode:
|
|
303
|
+
if hasattr(self.learn, 'lr_finder') and self.learn.lr_finder:
|
|
304
|
+
# Special handling for lr_find
|
|
305
|
+
self.lr_find_progress = getattr(self, 'iter', 0) + 1
|
|
306
|
+
total = getattr(self, 'n_iter', 100)
|
|
307
|
+
if self.lr_find_progress % max(1, total // 10) == 0:
|
|
308
|
+
progress = (self.lr_find_progress / total) * 100
|
|
309
|
+
print(f"⏳ LR Find Progress: {self.lr_find_progress}/{total} ({progress:.1f}%)")
|
|
310
|
+
sys.stdout.flush()
|
|
311
|
+
else:
|
|
312
|
+
# Regular training progress
|
|
313
|
+
if hasattr(self, 'iter') and hasattr(self, 'n_iter'):
|
|
314
|
+
if self.iter % max(1, self.n_iter // 20) == 0:
|
|
315
|
+
progress = (self.iter / self.n_iter) * 100
|
|
316
|
+
print(f"⏳ Batch {self.iter}/{self.n_iter} ({progress:.1f}%)")
|
|
317
|
+
sys.stdout.flush()
|
|
318
|
+
|
|
319
|
+
def after_fit(self):
|
|
320
|
+
"""Complete progress tracking after training."""
|
|
321
|
+
if self.is_vscode:
|
|
322
|
+
if hasattr(self.learn, 'lr_finder') and self.learn.lr_finder:
|
|
323
|
+
print("✅ Learning Rate Finder completed!")
|
|
324
|
+
else:
|
|
325
|
+
print("✅ Training completed!")
|
|
326
|
+
sys.stdout.flush()
|
|
327
|
+
super().after_fit()
|
|
328
|
+
|
|
329
|
+
def before_validate(self):
|
|
330
|
+
"""Update before validation."""
|
|
331
|
+
if self.is_vscode and not (hasattr(self.learn, 'lr_finder') and self.learn.lr_finder):
|
|
332
|
+
print("🔄 Validating...")
|
|
333
|
+
sys.stdout.flush()
|
|
334
|
+
super().before_validate()
|
|
335
|
+
|
|
336
|
+
def after_validate(self):
|
|
337
|
+
"""Update after validation."""
|
|
338
|
+
if self.is_vscode and not (hasattr(self.learn, 'lr_finder') and self.learn.lr_finder):
|
|
339
|
+
print("✅ Validation completed")
|
|
340
|
+
sys.stdout.flush()
|
|
341
|
+
super().after_validate()
|
|
342
|
+
|
|
343
|
+
def setup_vscode_progress():
|
|
344
|
+
"""Configure fastai to use VS Code-compatible progress callback."""
|
|
345
|
+
from fastai.learner import defaults
|
|
346
|
+
|
|
347
|
+
# Replace default ProgressCallback with VSCodeProgressCallback
|
|
348
|
+
if ProgressCallback in defaults.callbacks:
|
|
349
|
+
defaults.callbacks = [cb if cb != ProgressCallback else VSCodeProgressCallback
|
|
350
|
+
for cb in defaults.callbacks]
|
|
351
|
+
|
|
352
|
+
print("✅ Configured VS Code-compatible progress callback")
|
fastMONAI/vision_data.py
CHANGED
|
@@ -5,10 +5,12 @@ __all__ = ['pred_to_multiclass_mask', 'batch_pred_to_multiclass_mask', 'pred_to_
|
|
|
5
5
|
'MedImageDataLoaders', 'show_batch', 'show_results', 'plot_top_losses']
|
|
6
6
|
|
|
7
7
|
# %% ../nbs/02_vision_data.ipynb 2
|
|
8
|
+
import torch
|
|
8
9
|
from fastai.data.all import *
|
|
9
10
|
from fastai.vision.data import *
|
|
10
11
|
from .vision_core import *
|
|
11
12
|
from .vision_plot import find_max_slice
|
|
13
|
+
from plum import dispatch
|
|
12
14
|
|
|
13
15
|
# %% ../nbs/02_vision_data.ipynb 5
|
|
14
16
|
def pred_to_multiclass_mask(pred: torch.Tensor) -> torch.Tensor:
|
|
@@ -65,7 +67,7 @@ class MedDataBlock(DataBlock):
|
|
|
65
67
|
"""Container to quickly build dataloaders."""
|
|
66
68
|
#TODO add get_x
|
|
67
69
|
def __init__(self, blocks: list = None, dl_type: TfmdDL = None, getters: list = None,
|
|
68
|
-
n_inp: int = None, item_tfms: list = None, batch_tfms: list = None,
|
|
70
|
+
n_inp: int | None = None, item_tfms: list = None, batch_tfms: list = None,
|
|
69
71
|
reorder: bool = False, resample: (int, list) = None, **kwargs):
|
|
70
72
|
|
|
71
73
|
super().__init__(blocks, dl_type, getters, n_inp, item_tfms,
|
|
@@ -109,7 +111,7 @@ class MedImageDataLoaders(DataLoaders):
|
|
|
109
111
|
return cls.from_dblock(dblock, df, **kwargs)
|
|
110
112
|
|
|
111
113
|
# %% ../nbs/02_vision_data.ipynb 16
|
|
112
|
-
@
|
|
114
|
+
@dispatch
|
|
113
115
|
def show_batch(x: MedImage, y, samples, ctxs=None, max_n=6, nrows=None,
|
|
114
116
|
ncols=None, figsize=None, channel=0, slice_index=None,
|
|
115
117
|
anatomical_plane=0, **kwargs):
|
|
@@ -131,7 +133,7 @@ def show_batch(x: MedImage, y, samples, ctxs=None, max_n=6, nrows=None,
|
|
|
131
133
|
return ctxs
|
|
132
134
|
|
|
133
135
|
# %% ../nbs/02_vision_data.ipynb 17
|
|
134
|
-
@
|
|
136
|
+
@dispatch
|
|
135
137
|
def show_batch(x: MedImage, y: MedMask, samples, ctxs=None, max_n=6, nrows=None,
|
|
136
138
|
ncols=None, figsize=None, channel=0, slice_index=None,
|
|
137
139
|
anatomical_plane=0, **kwargs):
|
|
@@ -162,10 +164,10 @@ def show_batch(x: MedImage, y: MedMask, samples, ctxs=None, max_n=6, nrows=None,
|
|
|
162
164
|
return ctxs
|
|
163
165
|
|
|
164
166
|
# %% ../nbs/02_vision_data.ipynb 19
|
|
165
|
-
@
|
|
167
|
+
@dispatch
|
|
166
168
|
def show_results(x: MedImage, y: torch.Tensor, samples, outs, ctxs=None, max_n: int = 6,
|
|
167
|
-
nrows: int = None, ncols: int = None, figsize=None, channel: int = 0,
|
|
168
|
-
slice_index: int = None, anatomical_plane: int = 0, **kwargs):
|
|
169
|
+
nrows: int | None = None, ncols: int | None = None, figsize=None, channel: int = 0,
|
|
170
|
+
slice_index: int | None = None, anatomical_plane: int = 0, **kwargs):
|
|
169
171
|
"""Showing samples and their corresponding predictions for regression tasks."""
|
|
170
172
|
|
|
171
173
|
if ctxs is None:
|
|
@@ -188,10 +190,10 @@ def show_results(x: MedImage, y: torch.Tensor, samples, outs, ctxs=None, max_n:
|
|
|
188
190
|
return ctxs
|
|
189
191
|
|
|
190
192
|
# %% ../nbs/02_vision_data.ipynb 20
|
|
191
|
-
@
|
|
193
|
+
@dispatch
|
|
192
194
|
def show_results(x: MedImage, y: TensorCategory, samples, outs, ctxs=None,
|
|
193
|
-
max_n: int = 6, nrows: int = None, ncols: int = None, figsize=None, channel: int = 0,
|
|
194
|
-
slice_index: int = None, anatomical_plane: int = 0, **kwargs):
|
|
195
|
+
max_n: int = 6, nrows: int | None = None, ncols: int | None = None, figsize=None, channel: int = 0,
|
|
196
|
+
slice_index: int | None = None, anatomical_plane: int = 0, **kwargs):
|
|
195
197
|
"""Showing samples and their corresponding predictions for classification tasks."""
|
|
196
198
|
|
|
197
199
|
if ctxs is None:
|
|
@@ -209,10 +211,10 @@ def show_results(x: MedImage, y: TensorCategory, samples, outs, ctxs=None,
|
|
|
209
211
|
return ctxs
|
|
210
212
|
|
|
211
213
|
# %% ../nbs/02_vision_data.ipynb 21
|
|
212
|
-
@
|
|
214
|
+
@dispatch
|
|
213
215
|
def show_results(x: MedImage, y: MedMask, samples, outs, ctxs=None, max_n: int = 6,
|
|
214
|
-
nrows: int = None, ncols: int = 3, figsize=None, channel: int = 0,
|
|
215
|
-
slice_index: int = None, anatomical_plane: int = 0, **kwargs):
|
|
216
|
+
nrows: int | None = None, ncols: int = 3, figsize=None, channel: int = 0,
|
|
217
|
+
slice_index: int | None = None, anatomical_plane: int = 0, **kwargs):
|
|
216
218
|
"""Showing decoded samples and their corresponding predictions for segmentation tasks."""
|
|
217
219
|
|
|
218
220
|
if ctxs is None:
|
|
@@ -240,9 +242,9 @@ def show_results(x: MedImage, y: MedMask, samples, outs, ctxs=None, max_n: int =
|
|
|
240
242
|
return ctxs
|
|
241
243
|
|
|
242
244
|
# %% ../nbs/02_vision_data.ipynb 23
|
|
243
|
-
@
|
|
244
|
-
def plot_top_losses(x: MedImage, y: TensorCategory, samples, outs, raws, losses, nrows: int = None,
|
|
245
|
-
ncols: int = None, figsize=None, channel: int = 0, slice_index: int = None,
|
|
245
|
+
@dispatch
|
|
246
|
+
def plot_top_losses(x: MedImage, y: TensorCategory, samples, outs, raws, losses, nrows: int | None = None,
|
|
247
|
+
ncols: int | None = None, figsize=None, channel: int = 0, slice_index: int | None = None,
|
|
246
248
|
anatomical_plane: int = 0, **kwargs):
|
|
247
249
|
"""Show images in top_losses along with their prediction, actual, loss, and probability of actual class."""
|
|
248
250
|
|
|
@@ -258,10 +260,10 @@ def plot_top_losses(x: MedImage, y: TensorCategory, samples, outs, raws, losses,
|
|
|
258
260
|
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
|
|
259
261
|
|
|
260
262
|
# %% ../nbs/02_vision_data.ipynb 24
|
|
261
|
-
@
|
|
263
|
+
@dispatch
|
|
262
264
|
def plot_top_losses(x: MedImage, y: TensorMultiCategory, samples, outs, raws,
|
|
263
|
-
losses, nrows: int = None, ncols: int = None, figsize=None,
|
|
264
|
-
channel: int = 0, slice_index: int = None,
|
|
265
|
+
losses, nrows: int | None = None, ncols: int | None = None, figsize=None,
|
|
266
|
+
channel: int = 0, slice_index: int | None = None,
|
|
265
267
|
anatomical_plane: int = 0, **kwargs):
|
|
266
268
|
# TODO: not tested yet
|
|
267
269
|
axs = get_grid(len(samples), nrows=nrows, ncols=ncols, figsize=figsize)
|
fastMONAI/vision_inference.py
CHANGED
|
@@ -94,14 +94,28 @@ def inference(learn_inf, reorder, resample, fn: (str, Path) = '',
|
|
|
94
94
|
reoriented_array = _to_original_orientation(input_img.as_sitk(),
|
|
95
95
|
('').join(org_img.orientation))
|
|
96
96
|
|
|
97
|
-
|
|
97
|
+
# Create new TorchIO image with proper affine matrix
|
|
98
|
+
# Spatial properties (spacing, origin, direction) are automatically derived from affine
|
|
99
|
+
from torchio import ScalarImage
|
|
100
|
+
import numpy as np
|
|
101
|
+
|
|
102
|
+
# Ensure we have a valid affine matrix
|
|
103
|
+
if hasattr(org_img, 'affine') and org_img.affine is not None:
|
|
104
|
+
affine_matrix = org_img.affine.copy()
|
|
105
|
+
elif MedBase.affine_matrix is not None:
|
|
106
|
+
affine_matrix = MedBase.affine_matrix.copy()
|
|
107
|
+
else:
|
|
108
|
+
# Fallback to identity affine if no affine available
|
|
109
|
+
affine_matrix = np.eye(4, dtype=np.float64)
|
|
110
|
+
|
|
111
|
+
pred_img = ScalarImage(tensor=reoriented_array, affine=affine_matrix)
|
|
98
112
|
|
|
99
113
|
if save_path:
|
|
100
114
|
save_fn = Path(save_path)/('pred_' + Path(fn).parts[-1])
|
|
101
|
-
|
|
115
|
+
pred_img.save(save_fn)
|
|
102
116
|
return save_fn
|
|
103
117
|
|
|
104
|
-
return
|
|
118
|
+
return pred_img
|
|
105
119
|
|
|
106
120
|
# %% ../nbs/06_vision_inference.ipynb 9
|
|
107
121
|
def compute_binary_tumor_volume(mask_data: Image):
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: fastMONAI
|
|
3
|
+
Version: 0.5.1
|
|
4
|
+
Summary: fastMONAI library
|
|
5
|
+
Home-page: https://github.com/MMIV-ML/fastMONAI
|
|
6
|
+
Author: Satheshkumar Kaliyugarasan
|
|
7
|
+
Author-email: skaliyugarasan@hotmail.com
|
|
8
|
+
License: Apache Software License 2.0
|
|
9
|
+
Keywords: deep learning,medical imaging
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: Natural Language :: English
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
17
|
+
Requires-Python: >=3.10
|
|
18
|
+
Description-Content-Type: text/markdown
|
|
19
|
+
License-File: LICENSE
|
|
20
|
+
Requires-Dist: fastai==2.8.3
|
|
21
|
+
Requires-Dist: monai==1.5.0
|
|
22
|
+
Requires-Dist: torchio==0.20.19
|
|
23
|
+
Requires-Dist: xlrd>=1.2.0
|
|
24
|
+
Requires-Dist: scikit-image==0.25.2
|
|
25
|
+
Requires-Dist: imagedata==3.8.4
|
|
26
|
+
Requires-Dist: mlflow==3.3.1
|
|
27
|
+
Requires-Dist: huggingface-hub
|
|
28
|
+
Requires-Dist: gdown
|
|
29
|
+
Requires-Dist: gradio
|
|
30
|
+
Requires-Dist: opencv-python
|
|
31
|
+
Requires-Dist: plum-dispatch
|
|
32
|
+
Provides-Extra: dev
|
|
33
|
+
Requires-Dist: ipywidgets; extra == "dev"
|
|
34
|
+
Requires-Dist: nbdev; extra == "dev"
|
|
35
|
+
Requires-Dist: tabulate; extra == "dev"
|
|
36
|
+
Requires-Dist: quarto; extra == "dev"
|
|
37
|
+
Dynamic: author
|
|
38
|
+
Dynamic: author-email
|
|
39
|
+
Dynamic: classifier
|
|
40
|
+
Dynamic: description
|
|
41
|
+
Dynamic: description-content-type
|
|
42
|
+
Dynamic: home-page
|
|
43
|
+
Dynamic: keywords
|
|
44
|
+
Dynamic: license
|
|
45
|
+
Dynamic: license-file
|
|
46
|
+
Dynamic: provides-extra
|
|
47
|
+
Dynamic: requires-dist
|
|
48
|
+
Dynamic: requires-python
|
|
49
|
+
Dynamic: summary
|
|
50
|
+
|
|
51
|
+
# Overview
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
|
|
55
|
+
|
|
56
|
+

|
|
57
|
+
|
|
58
|
+

|
|
59
|
+
[](https://fastmonai.no)
|
|
60
|
+
[](https://pypi.org/project/fastMONAI)
|
|
61
|
+
|
|
62
|
+
A low-code Python-based open source deep learning library built on top
|
|
63
|
+
of [fastai](https://github.com/fastai/fastai),
|
|
64
|
+
[MONAI](https://monai.io/), [TorchIO](https://torchio.readthedocs.io/),
|
|
65
|
+
and [Imagedata](https://imagedata.readthedocs.io/).
|
|
66
|
+
|
|
67
|
+
fastMONAI simplifies the use of state-of-the-art deep learning
|
|
68
|
+
techniques in 3D medical image analysis for solving classification,
|
|
69
|
+
regression, and segmentation tasks. fastMONAI provides the users with
|
|
70
|
+
functionalities to step through data loading, preprocessing, training,
|
|
71
|
+
and result interpretations.
|
|
72
|
+
|
|
73
|
+
<b>Note:</b> This documentation is also available as interactive
|
|
74
|
+
notebooks.
|
|
75
|
+
|
|
76
|
+
## Requirements
|
|
77
|
+
|
|
78
|
+
- **Python:** 3.10, 3.11, or 3.12 (Python 3.11 recommended)
|
|
79
|
+
- **GPU:** CUDA-compatible GPU recommended for training (CPU supported
|
|
80
|
+
for inference)
|
|
81
|
+
|
|
82
|
+
# Installation
|
|
83
|
+
|
|
84
|
+
## Environment setup (recommended)
|
|
85
|
+
|
|
86
|
+
We recommend using a conda environment to avoid dependency conflicts:
|
|
87
|
+
|
|
88
|
+
`conda create -n fastmonai python=3.11`
|
|
89
|
+
|
|
90
|
+
`conda activate fastmonai`
|
|
91
|
+
|
|
92
|
+
## Quick Install [(PyPI)](https://pypi.org/project/fastMONAI/)
|
|
93
|
+
|
|
94
|
+
`pip install fastMONAI`
|
|
95
|
+
|
|
96
|
+
## Development install [(GitHub)](https://github.com/MMIV-ML/fastMONAI)
|
|
97
|
+
|
|
98
|
+
If you want to install an editable version of fastMONAI for development:
|
|
99
|
+
|
|
100
|
+
git clone https://github.com/MMIV-ML/fastMONAI
|
|
101
|
+
cd fastMONAI
|
|
102
|
+
|
|
103
|
+
# Create development environment
|
|
104
|
+
conda create -n fastmonai-dev python=3.11
|
|
105
|
+
conda activate fastmonai-dev
|
|
106
|
+
|
|
107
|
+
# Install in development mode
|
|
108
|
+
pip install -e '.[dev]'
|
|
109
|
+
|
|
110
|
+
# Getting started
|
|
111
|
+
|
|
112
|
+
The best way to get started using fastMONAI is to read our
|
|
113
|
+
[paper](https://www.sciencedirect.com/science/article/pii/S2665963823001203)
|
|
114
|
+
and dive into our beginner-friendly [video
|
|
115
|
+
tutorial](https://fastmonai.no/tutorial_beginner_video). For a deeper
|
|
116
|
+
understanding and hands-on experience, our comprehensive instructional
|
|
117
|
+
notebooks will walk you through model training for various tasks like
|
|
118
|
+
classification, regression, and segmentation. See the docs at
|
|
119
|
+
https://fastmonai.no for more information.
|
|
120
|
+
|
|
121
|
+
| Notebook | 1-Click Notebook |
|
|
122
|
+
|:---|----|
|
|
123
|
+
| [10a_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10a_tutorial_classification.ipynb) <br>shows how to construct a binary classification model based on MRI data. | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10a_tutorial_classification.ipynb) |
|
|
124
|
+
| [10b_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10b_tutorial_regression.ipynb) <br>shows how to construct a model to predict the age of a subject from MRI scans (“brain age”). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10b_tutorial_regression.ipynb) |
|
|
125
|
+
| [10c_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10c_tutorial_binary_segmentation.ipynb) <br>shows how to do binary segmentation (extract the left atrium from monomodal cardiac MRI). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10c_tutorial_binary_segmentation.ipynb) |
|
|
126
|
+
| [10d_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10d_tutorial_multiclass_segmentation.ipynb) <br>shows how to perform segmentation from multimodal MRI (brain tumor segmentation). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10d_tutorial_multiclass_segmentation.ipynb) |
|
|
127
|
+
|
|
128
|
+
# How to contribute
|
|
129
|
+
|
|
130
|
+
We welcome contributions! See
|
|
131
|
+
[CONTRIBUTING.md](https://github.com/MMIV-ML/fastMONAI/blob/master/CONTRIBUTING.md)
|
|
132
|
+
|
|
133
|
+
# Citing fastMONAI
|
|
134
|
+
|
|
135
|
+
If you are using fastMONAI in your research, please use the following
|
|
136
|
+
citation:
|
|
137
|
+
|
|
138
|
+
@article{KALIYUGARASAN2023100583,
|
|
139
|
+
title = {fastMONAI: A low-code deep learning library for medical image analysis},
|
|
140
|
+
journal = {Software Impacts},
|
|
141
|
+
pages = {100583},
|
|
142
|
+
year = {2023},
|
|
143
|
+
issn = {2665-9638},
|
|
144
|
+
doi = {https://doi.org/10.1016/j.simpa.2023.100583},
|
|
145
|
+
url = {https://www.sciencedirect.com/science/article/pii/S2665963823001203},
|
|
146
|
+
author = {Satheshkumar Kaliyugarasan and Alexander S. Lundervold},
|
|
147
|
+
keywords = {Deep learning, Medical imaging, Radiology},
|
|
148
|
+
abstract = {We introduce fastMONAI, an open-source Python-based deep learning library for 3D medical imaging. Drawing upon the strengths of fastai, MONAI, and TorchIO, fastMONAI simplifies the use of advanced techniques for tasks like classification, regression, and segmentation. The library's design addresses domain-specific demands while promoting best practices, facilitating efficient model development. It offers newcomers an easier entry into the field while keeping the option to make advanced, lower-level customizations if needed. This paper describes the library's design, impact, limitations, and plans for future work.}
|
|
149
|
+
}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
fastMONAI/__init__.py,sha256=eZ1bOun1DDVV0YLOBW4wj2FP1ajReLjbIrGmzN7ASBw,22
|
|
2
|
+
fastMONAI/_modidx.py,sha256=iuYWVoaM3bmpb_Dv6lMQU8zOVPA76hYfnyFxVgKyuBg,37759
|
|
3
|
+
fastMONAI/dataset_info.py,sha256=aJ-utYZ1OrA32RIQbF7jHxcDE8SgOZE3Vt1AojxnvZc,5026
|
|
4
|
+
fastMONAI/external_data.py,sha256=IVj9GbIRFh9bTFkIa2wySUObSnNfZiaVtuzFxOFAi0Q,12219
|
|
5
|
+
fastMONAI/research_utils.py,sha256=LZu62g8BQAVYS4dD7qDsKHJXZnDd1uLkJ6LoaMDhUhk,590
|
|
6
|
+
fastMONAI/utils.py,sha256=jG8SiYebcrPJsmnmMZh4SokWRj7McdJ_gftINnfcE1A,16590
|
|
7
|
+
fastMONAI/vision_all.py,sha256=_l6F8ZlUaPYcplNG6mg1-1xssYforByEe4zECbPzTck,359
|
|
8
|
+
fastMONAI/vision_augmentation.py,sha256=lAlrLm8jbXRmk9a6e8_o_CNTS6Pyp-KKNXwjpelUUJc,9070
|
|
9
|
+
fastMONAI/vision_core.py,sha256=k4RUBzZuh9W8J4zbcVzXCKfJxkKCsBDG0oSRMwiCNp0,13848
|
|
10
|
+
fastMONAI/vision_data.py,sha256=VCB3hyBN7dYuLiYGSGeuWlBTMvb2cLVo_sbENrRWe5Q,11510
|
|
11
|
+
fastMONAI/vision_inference.py,sha256=3SaJbKGbgaf9ON9PH5DtvfNlhAurov_Idnrlp4jyU9w,6625
|
|
12
|
+
fastMONAI/vision_loss.py,sha256=NrHnk1yD4EBKsp6aippppXU4l-mwmsZOqE_bsZP3ZNI,3591
|
|
13
|
+
fastMONAI/vision_metrics.py,sha256=CVxdOBPaMJT6Mo5jF3WoQj6a3C-_FsnBicMAU_ZrFS8,3549
|
|
14
|
+
fastMONAI/vision_plot.py,sha256=-X_nNBXx7lYCZSFBIN1587ZTA3T_-2ASBM4K31wU660,3792
|
|
15
|
+
fastmonai-0.5.1.dist-info/licenses/LICENSE,sha256=xV8xoN4VOL0uw9X8RSs2IMuD_Ss_a9yAbtGNeBWZwnw,11337
|
|
16
|
+
fastmonai-0.5.1.dist-info/METADATA,sha256=VZcJOlNQR7g3mJZkMnnoukPPJZ0yt_B5T9r5yPdCLBk,7094
|
|
17
|
+
fastmonai-0.5.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
18
|
+
fastmonai-0.5.1.dist-info/entry_points.txt,sha256=mVBsykSXMairzzk3hJaQ8c-UiwUZqGnn4aFZ24CpsBM,40
|
|
19
|
+
fastmonai-0.5.1.dist-info/top_level.txt,sha256=o8y7SWF9odtnIT3jvYtUn9okbJRlaAMCy7oPFCeQvQ8,10
|
|
20
|
+
fastmonai-0.5.1.dist-info/RECORD,,
|
|
@@ -1,104 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: fastMONAI
|
|
3
|
-
Version: 0.4.0.2
|
|
4
|
-
Summary: fastMONAI library
|
|
5
|
-
Home-page: https://github.com/MMIV-ML/fastMONAI
|
|
6
|
-
Author: Satheshkumar Kaliyugarasan
|
|
7
|
-
Author-email: skaliyugarasan@hotmail.com
|
|
8
|
-
License: Apache Software License 2.0
|
|
9
|
-
Keywords: deep learning,medical imaging
|
|
10
|
-
Classifier: Development Status :: 3 - Alpha
|
|
11
|
-
Classifier: Intended Audience :: Developers
|
|
12
|
-
Classifier: Natural Language :: English
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.7
|
|
14
|
-
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
-
Classifier: License :: OSI Approved :: Apache Software License
|
|
18
|
-
Requires-Python: >=3.7
|
|
19
|
-
Description-Content-Type: text/markdown
|
|
20
|
-
License-File: LICENSE
|
|
21
|
-
Requires-Dist: fastai (==2.7.12)
|
|
22
|
-
Requires-Dist: monai (==1.2.0)
|
|
23
|
-
Requires-Dist: torchio (==0.18.91)
|
|
24
|
-
Requires-Dist: xlrd (>=1.2.0)
|
|
25
|
-
Requires-Dist: scikit-image (==0.19.3)
|
|
26
|
-
Requires-Dist: imagedata (==2.1.3)
|
|
27
|
-
Requires-Dist: huggingface-hub
|
|
28
|
-
Requires-Dist: gdown
|
|
29
|
-
Requires-Dist: gradio
|
|
30
|
-
Requires-Dist: opencv-python
|
|
31
|
-
Provides-Extra: dev
|
|
32
|
-
Requires-Dist: ipywidgets ; extra == 'dev'
|
|
33
|
-
Requires-Dist: nbdev ; extra == 'dev'
|
|
34
|
-
Requires-Dist: tabulate ; extra == 'dev'
|
|
35
|
-
|
|
36
|
-
Overview
|
|
37
|
-
================
|
|
38
|
-
|
|
39
|
-
<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
|
|
40
|
-
|
|
41
|
-

|
|
42
|
-
|
|
43
|
-

|
|
44
|
-
[](https://fastmonai.no)
|
|
45
|
-
[](https://pypi.org/project/fastMONAI)
|
|
46
|
-
|
|
47
|
-
A low-code Python-based open source deep learning library built on top of [fastai](https://github.com/fastai/fastai), [MONAI](https://monai.io/), [TorchIO](https://torchio.readthedocs.io/), and [Imagedata](https://imagedata.readthedocs.io/).
|
|
48
|
-
|
|
49
|
-
fastMONAI simplifies the use of state-of-the-art deep learning
|
|
50
|
-
techniques in 3D medical image analysis for solving classification,
|
|
51
|
-
regression, and segmentation tasks. fastMONAI provides the users with
|
|
52
|
-
functionalities to step through data loading, preprocessing, training,
|
|
53
|
-
and result interpretations.
|
|
54
|
-
|
|
55
|
-
<b>Note:</b> This documentation is also available as interactive
|
|
56
|
-
notebooks.
|
|
57
|
-
|
|
58
|
-
# Installing
|
|
59
|
-
|
|
60
|
-
## From [PyPI](https://pypi.org/project/fastMONAI/)
|
|
61
|
-
|
|
62
|
-
`pip install fastMONAI`
|
|
63
|
-
|
|
64
|
-
## From [GitHub](https://github.com/MMIV-ML/fastMONAI)
|
|
65
|
-
|
|
66
|
-
If you want to install an editable version of fastMONAI run:
|
|
67
|
-
|
|
68
|
-
- `git clone https://github.com/MMIV-ML/fastMONAI`
|
|
69
|
-
- `pip install -e 'fastMONAI[dev]'`
|
|
70
|
-
|
|
71
|
-
# Getting started
|
|
72
|
-
|
|
73
|
-
The best way to get started using fastMONAI is to read our [paper](https://www.sciencedirect.com/science/article/pii/S2665963823001203) and dive into our beginner-friendly [video tutorial](https://fastmonai.no/tutorial_beginner_video). For a deeper understanding and hands-on experience, our comprehensive instructional notebooks will walk you through model training for various tasks like classification, regression, and segmentation. See the docs at https://fastmonai.no for more information.
|
|
74
|
-
|
|
75
|
-
| Notebook | 1-Click Notebook |
|
|
76
|
-
|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
77
|
-
| [10a_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10a_tutorial_classification.ipynb) <br>shows how to construct a binary classification model based on MRI data. | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10a_tutorial_classification.ipynb) |
|
|
78
|
-
| [10b_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10b_tutorial_regression.ipynb) <br>shows how to construct a model to predict the age of a subject from MRI scans (“brain age”). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10b_tutorial_regression.ipynb) |
|
|
79
|
-
| [10c_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10c_tutorial_binary_segmentation.ipynb) <br>shows how to do binary segmentation (extract the left atrium from monomodal cardiac MRI). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10c_tutorial_binary_segmentation.ipynb) |
|
|
80
|
-
| [10d_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10d_tutorial_multiclass_segmentation.ipynb) <br>shows how to perform segmentation from multimodal MRI (brain tumor segmentation). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10d_tutorial_multiclass_segmentation.ipynb) |
|
|
81
|
-
|
|
82
|
-
# How to contribute
|
|
83
|
-
|
|
84
|
-
See
|
|
85
|
-
[CONTRIBUTING.md](https://github.com/MMIV-ML/fastMONAI/blob/master/CONTRIBUTING.md)
|
|
86
|
-
|
|
87
|
-
# Citing fastMONAI
|
|
88
|
-
|
|
89
|
-
If you are using fastMONAI in your research, please use the following citation:
|
|
90
|
-
|
|
91
|
-
```
|
|
92
|
-
@article{KALIYUGARASAN2023100583,
|
|
93
|
-
title = {fastMONAI: A low-code deep learning library for medical image analysis},
|
|
94
|
-
journal = {Software Impacts},
|
|
95
|
-
pages = {100583},
|
|
96
|
-
year = {2023},
|
|
97
|
-
issn = {2665-9638},
|
|
98
|
-
doi = {https://doi.org/10.1016/j.simpa.2023.100583},
|
|
99
|
-
url = {https://www.sciencedirect.com/science/article/pii/S2665963823001203},
|
|
100
|
-
author = {Satheshkumar Kaliyugarasan and Alexander S. Lundervold},
|
|
101
|
-
keywords = {Deep learning, Medical imaging, Radiology},
|
|
102
|
-
abstract = {We introduce fastMONAI, an open-source Python-based deep learning library for 3D medical imaging. Drawing upon the strengths of fastai, MONAI, and TorchIO, fastMONAI simplifies the use of advanced techniques for tasks like classification, regression, and segmentation. The library's design addresses domain-specific demands while promoting best practices, facilitating efficient model development. It offers newcomers an easier entry into the field while keeping the option to make advanced, lower-level customizations if needed. This paper describes the library's design, impact, limitations, and plans for future work.}
|
|
103
|
-
}
|
|
104
|
-
```
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
fastMONAI/__init__.py,sha256=q8C3BwoPX4eeCOynkPZhIOcrFKxXKQQU16YGyth8EAs,24
|
|
2
|
-
fastMONAI/_modidx.py,sha256=qlr-dyFcDjvXOZgM6-r-pYRHtodH07QRsioHD8n72cc,29553
|
|
3
|
-
fastMONAI/dataset_info.py,sha256=w5LKGmEzFtlqhxqzhFLWUfyVkUL0EYypZQCH9Ay6jgg,4948
|
|
4
|
-
fastMONAI/external_data.py,sha256=IVj9GbIRFh9bTFkIa2wySUObSnNfZiaVtuzFxOFAi0Q,12219
|
|
5
|
-
fastMONAI/research_utils.py,sha256=LZu62g8BQAVYS4dD7qDsKHJXZnDd1uLkJ6LoaMDhUhk,590
|
|
6
|
-
fastMONAI/utils.py,sha256=9I5nl6Sb0NbTxhr6FDnW4dapDhzPtmxGcJeXkkY4v3E,1406
|
|
7
|
-
fastMONAI/vision_all.py,sha256=_l6F8ZlUaPYcplNG6mg1-1xssYforByEe4zECbPzTck,359
|
|
8
|
-
fastMONAI/vision_augmentation.py,sha256=lAlrLm8jbXRmk9a6e8_o_CNTS6Pyp-KKNXwjpelUUJc,9070
|
|
9
|
-
fastMONAI/vision_core.py,sha256=PBovhzMzzE16t5V-DYLdpWDqYCKPZdmTUQErsev8kmg,7603
|
|
10
|
-
fastMONAI/vision_data.py,sha256=I7i1rzCaubfZVmU3CyJ0aITcin-fMO9jrb50Edpiro0,11394
|
|
11
|
-
fastMONAI/vision_inference.py,sha256=7sX_ZCE_6myW_hTpJbnPG6s78WN8V7flVRRke1g__jc,5995
|
|
12
|
-
fastMONAI/vision_loss.py,sha256=NrHnk1yD4EBKsp6aippppXU4l-mwmsZOqE_bsZP3ZNI,3591
|
|
13
|
-
fastMONAI/vision_metrics.py,sha256=CVxdOBPaMJT6Mo5jF3WoQj6a3C-_FsnBicMAU_ZrFS8,3549
|
|
14
|
-
fastMONAI/vision_plot.py,sha256=-X_nNBXx7lYCZSFBIN1587ZTA3T_-2ASBM4K31wU660,3792
|
|
15
|
-
fastMONAI-0.4.0.2.dist-info/LICENSE,sha256=xV8xoN4VOL0uw9X8RSs2IMuD_Ss_a9yAbtGNeBWZwnw,11337
|
|
16
|
-
fastMONAI-0.4.0.2.dist-info/METADATA,sha256=kaDdvzXsqWr1x2qIxo8gtFnuQsdaMwkQ4bZWTqhbSFI,7095
|
|
17
|
-
fastMONAI-0.4.0.2.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
|
18
|
-
fastMONAI-0.4.0.2.dist-info/entry_points.txt,sha256=mVBsykSXMairzzk3hJaQ8c-UiwUZqGnn4aFZ24CpsBM,40
|
|
19
|
-
fastMONAI-0.4.0.2.dist-info/top_level.txt,sha256=o8y7SWF9odtnIT3jvYtUn9okbJRlaAMCy7oPFCeQvQ8,10
|
|
20
|
-
fastMONAI-0.4.0.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|