fastMONAI 0.4.0.1__py3-none-any.whl → 0.5.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fastMONAI/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.4.0.1"
1
+ __version__ = "0.5.0.0"
fastMONAI/_modidx.py CHANGED
@@ -47,7 +47,28 @@ d = { 'settings': { 'branch': 'master',
47
47
  'fastMONAI/external_data.py')},
48
48
  'fastMONAI.research_utils': { 'fastMONAI.research_utils.pred_postprocess': ( 'research_utils.html#pred_postprocess',
49
49
  'fastMONAI/research_utils.py')},
50
- 'fastMONAI.utils': { 'fastMONAI.utils.load_variables': ('utils.html#load_variables', 'fastMONAI/utils.py'),
50
+ 'fastMONAI.utils': { 'fastMONAI.utils.ModelTrackingCallback': ('utils.html#modeltrackingcallback', 'fastMONAI/utils.py'),
51
+ 'fastMONAI.utils.ModelTrackingCallback.__init__': ( 'utils.html#modeltrackingcallback.__init__',
52
+ 'fastMONAI/utils.py'),
53
+ 'fastMONAI.utils.ModelTrackingCallback._build_config': ( 'utils.html#modeltrackingcallback._build_config',
54
+ 'fastMONAI/utils.py'),
55
+ 'fastMONAI.utils.ModelTrackingCallback._extract_epoch_metrics': ( 'utils.html#modeltrackingcallback._extract_epoch_metrics',
56
+ 'fastMONAI/utils.py'),
57
+ 'fastMONAI.utils.ModelTrackingCallback._extract_training_params': ( 'utils.html#modeltrackingcallback._extract_training_params',
58
+ 'fastMONAI/utils.py'),
59
+ 'fastMONAI.utils.ModelTrackingCallback._register_pytorch_model': ( 'utils.html#modeltrackingcallback._register_pytorch_model',
60
+ 'fastMONAI/utils.py'),
61
+ 'fastMONAI.utils.ModelTrackingCallback._save_model_artifacts': ( 'utils.html#modeltrackingcallback._save_model_artifacts',
62
+ 'fastMONAI/utils.py'),
63
+ 'fastMONAI.utils.ModelTrackingCallback.after_epoch': ( 'utils.html#modeltrackingcallback.after_epoch',
64
+ 'fastMONAI/utils.py'),
65
+ 'fastMONAI.utils.ModelTrackingCallback.after_fit': ( 'utils.html#modeltrackingcallback.after_fit',
66
+ 'fastMONAI/utils.py'),
67
+ 'fastMONAI.utils.ModelTrackingCallback.before_fit': ( 'utils.html#modeltrackingcallback.before_fit',
68
+ 'fastMONAI/utils.py'),
69
+ 'fastMONAI.utils.ModelTrackingCallback.extract_all_params': ( 'utils.html#modeltrackingcallback.extract_all_params',
70
+ 'fastMONAI/utils.py'),
71
+ 'fastMONAI.utils.load_variables': ('utils.html#load_variables', 'fastMONAI/utils.py'),
51
72
  'fastMONAI.utils.print_colab_gpu_info': ('utils.html#print_colab_gpu_info', 'fastMONAI/utils.py'),
52
73
  'fastMONAI.utils.store_variables': ('utils.html#store_variables', 'fastMONAI/utils.py')},
53
74
  'fastMONAI.vision_all': {},
@@ -138,23 +159,51 @@ d = { 'settings': { 'branch': 'master',
138
159
  'fastMONAI.vision_augmentation.do_pad_or_crop': ( 'vision_augment.html#do_pad_or_crop',
139
160
  'fastMONAI/vision_augmentation.py')},
140
161
  'fastMONAI.vision_core': { 'fastMONAI.vision_core.MedBase': ('vision_core.html#medbase', 'fastMONAI/vision_core.py'),
162
+ 'fastMONAI.vision_core.MedBase.__copy__': ( 'vision_core.html#medbase.__copy__',
163
+ 'fastMONAI/vision_core.py'),
164
+ 'fastMONAI.vision_core.MedBase.__deepcopy__': ( 'vision_core.html#medbase.__deepcopy__',
165
+ 'fastMONAI/vision_core.py'),
166
+ 'fastMONAI.vision_core.MedBase.__new__': ( 'vision_core.html#medbase.__new__',
167
+ 'fastMONAI/vision_core.py'),
141
168
  'fastMONAI.vision_core.MedBase.__repr__': ( 'vision_core.html#medbase.__repr__',
142
169
  'fastMONAI/vision_core.py'),
143
170
  'fastMONAI.vision_core.MedBase.create': ( 'vision_core.html#medbase.create',
144
171
  'fastMONAI/vision_core.py'),
145
172
  'fastMONAI.vision_core.MedBase.item_preprocessing': ( 'vision_core.html#medbase.item_preprocessing',
146
173
  'fastMONAI/vision_core.py'),
174
+ 'fastMONAI.vision_core.MedBase.new_empty': ( 'vision_core.html#medbase.new_empty',
175
+ 'fastMONAI/vision_core.py'),
147
176
  'fastMONAI.vision_core.MedBase.show': ('vision_core.html#medbase.show', 'fastMONAI/vision_core.py'),
148
177
  'fastMONAI.vision_core.MedImage': ('vision_core.html#medimage', 'fastMONAI/vision_core.py'),
149
178
  'fastMONAI.vision_core.MedMask': ('vision_core.html#medmask', 'fastMONAI/vision_core.py'),
150
179
  'fastMONAI.vision_core.MetaResolver': ('vision_core.html#metaresolver', 'fastMONAI/vision_core.py'),
180
+ 'fastMONAI.vision_core.VSCodeProgressCallback': ( 'vision_core.html#vscodeprogresscallback',
181
+ 'fastMONAI/vision_core.py'),
182
+ 'fastMONAI.vision_core.VSCodeProgressCallback.__init__': ( 'vision_core.html#vscodeprogresscallback.__init__',
183
+ 'fastMONAI/vision_core.py'),
184
+ 'fastMONAI.vision_core.VSCodeProgressCallback._detect_vscode_environment': ( 'vision_core.html#vscodeprogresscallback._detect_vscode_environment',
185
+ 'fastMONAI/vision_core.py'),
186
+ 'fastMONAI.vision_core.VSCodeProgressCallback.after_batch': ( 'vision_core.html#vscodeprogresscallback.after_batch',
187
+ 'fastMONAI/vision_core.py'),
188
+ 'fastMONAI.vision_core.VSCodeProgressCallback.after_fit': ( 'vision_core.html#vscodeprogresscallback.after_fit',
189
+ 'fastMONAI/vision_core.py'),
190
+ 'fastMONAI.vision_core.VSCodeProgressCallback.after_validate': ( 'vision_core.html#vscodeprogresscallback.after_validate',
191
+ 'fastMONAI/vision_core.py'),
192
+ 'fastMONAI.vision_core.VSCodeProgressCallback.before_epoch': ( 'vision_core.html#vscodeprogresscallback.before_epoch',
193
+ 'fastMONAI/vision_core.py'),
194
+ 'fastMONAI.vision_core.VSCodeProgressCallback.before_fit': ( 'vision_core.html#vscodeprogresscallback.before_fit',
195
+ 'fastMONAI/vision_core.py'),
196
+ 'fastMONAI.vision_core.VSCodeProgressCallback.before_validate': ( 'vision_core.html#vscodeprogresscallback.before_validate',
197
+ 'fastMONAI/vision_core.py'),
151
198
  'fastMONAI.vision_core._load_and_preprocess': ( 'vision_core.html#_load_and_preprocess',
152
199
  'fastMONAI/vision_core.py'),
153
200
  'fastMONAI.vision_core._multi_channel': ( 'vision_core.html#_multi_channel',
154
201
  'fastMONAI/vision_core.py'),
155
202
  'fastMONAI.vision_core._preprocess': ('vision_core.html#_preprocess', 'fastMONAI/vision_core.py'),
156
203
  'fastMONAI.vision_core.med_img_reader': ( 'vision_core.html#med_img_reader',
157
- 'fastMONAI/vision_core.py')},
204
+ 'fastMONAI/vision_core.py'),
205
+ 'fastMONAI.vision_core.setup_vscode_progress': ( 'vision_core.html#setup_vscode_progress',
206
+ 'fastMONAI/vision_core.py')},
158
207
  'fastMONAI.vision_data': { 'fastMONAI.vision_data.MedDataBlock': ('vision_data.html#meddatablock', 'fastMONAI/vision_data.py'),
159
208
  'fastMONAI.vision_data.MedDataBlock.__init__': ( 'vision_data.html#meddatablock.__init__',
160
209
  'fastMONAI/vision_data.py'),
fastMONAI/dataset_info.py CHANGED
@@ -69,12 +69,12 @@ class MedDataset:
69
69
  def suggestion(self):
70
70
  """Voxel value that appears most often in dim_0, dim_1 and dim_2, and whether the data should be reoriented."""
71
71
 
72
- resample = [self.df.voxel_0.mode()[0], self.df.voxel_1.mode()[0], self.df.voxel_2.mode()[0]]
72
+ resample = [float(self.df.voxel_0.mode()[0]), float(self.df.voxel_1.mode()[0]), float(self.df.voxel_2.mode()[0])]
73
73
  return resample, self.reorder
74
74
 
75
75
  def _get_data_info(self, fn: str):
76
76
  """Private method to collect information about an image file."""
77
- _, o, _ = med_img_reader(fn, dtype=self.dtype, reorder=self.reorder, only_tensor=False)
77
+ _, o, _ = med_img_reader(fn, reorder=self.reorder, only_tensor=False, dtype=self.dtype)
78
78
 
79
79
  info_dict = {'path': fn, 'dim_0': o.shape[1], 'dim_1': o.shape[2], 'dim_2': o.shape[3],
80
80
  'voxel_0': round(o.spacing[0], 4), 'voxel_1': round(o.spacing[1], 4), 'voxel_2': round(o.spacing[2], 4),
@@ -98,10 +98,10 @@ class MedDataset:
98
98
 
99
99
  ratio = org_voxels/resample
100
100
  new_dims = (org_dims * ratio).T
101
- dims = [new_dims[0].max().round(), new_dims[1].max().round(), new_dims[2].max().round()]
101
+ dims = [float(new_dims[0].max().round()), float(new_dims[1].max().round()), float(new_dims[2].max().round())]
102
102
 
103
103
  else:
104
- dims = [df.dim_0.max(), df.dim_1.max(), df.dim_2.max()]
104
+ dims = [float(self.df.dim_0.max()), float(self.df.dim_1.max()), float(self.df.dim_2.max())]
105
105
 
106
106
  return dims
107
107
 
fastMONAI/utils.py CHANGED
@@ -1,15 +1,22 @@
1
1
  # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/07_utils.ipynb.
2
2
 
3
3
  # %% auto 0
4
- __all__ = ['store_variables', 'load_variables', 'print_colab_gpu_info']
4
+ __all__ = ['store_variables', 'load_variables', 'print_colab_gpu_info', 'ModelTrackingCallback']
5
5
 
6
6
  # %% ../nbs/07_utils.ipynb 1
7
7
  import pickle
8
8
  import torch
9
9
  from pathlib import Path
10
+ import mlflow
11
+ import mlflow.pytorch
12
+ import os
13
+ import tempfile
14
+ import json
15
+ from fastai.callback.core import Callback
16
+ from typing import Any
10
17
 
11
18
  # %% ../nbs/07_utils.ipynb 3
12
- def store_variables(pkl_fn: (str, Path), size: list, reorder: bool, resample: (int, list)):
19
+ def store_variables(pkl_fn: str | Path, size: list, reorder: bool, resample: int | list):
13
20
  """Save variable values in a pickle file."""
14
21
 
15
22
  var_vals = [size, reorder, resample]
@@ -43,3 +50,188 @@ def print_colab_gpu_info():
43
50
 
44
51
  if torch.cuda.is_available(): print('GPU attached.')
45
52
  else: print(colab_gpu_msg)
53
+
54
+ # %% ../nbs/07_utils.ipynb 6
55
+ class ModelTrackingCallback(Callback):
56
+ """
57
+ A FastAI callback for comprehensive MLflow experiment tracking.
58
+
59
+ This callback automatically logs hyperparameters, metrics, model artifacts,
60
+ and configuration to MLflow during training.
61
+ """
62
+
63
+ def __init__(
64
+ self,
65
+ model_name: str,
66
+ loss_function: str,
67
+ item_tfms: list[Any],
68
+ size: list[int],
69
+ resample: list[float],
70
+ reorder: bool
71
+ ):
72
+ """
73
+ Initialize the MLflow tracking callback.
74
+
75
+ Args:
76
+ model_name: Name of the model architecture for registration
77
+ loss_function: Name of the loss function being used
78
+ size: Model input dimensions
79
+ resample: Resampling dimensions
80
+ reorder: Whether reordering augmentation is applied
81
+ """
82
+ self.model_name = model_name
83
+ self.loss_function = loss_function
84
+ self.item_tfms = item_tfms
85
+ self.size = size
86
+ self.resample = resample
87
+ self.reorder = reorder
88
+
89
+ self.config = self._build_config()
90
+
91
+ def extract_all_params(self, tfm):
92
+ """
93
+ Extract all parameters from a transform object for detailed logging.
94
+
95
+ Args:
96
+ tfm: Transform object to extract parameters from
97
+
98
+ Returns:
99
+ dict: Dictionary with 'name' and 'params' keys containing transform details
100
+ """
101
+ class_name = tfm.__class__.__name__
102
+ params = {}
103
+
104
+ for key, value in tfm.__dict__.items():
105
+ if not key.startswith('_') and key != '__signature__':
106
+ if hasattr(value, '__dict__') and hasattr(value, 'target_shape'):
107
+ params['target_shape'] = value.target_shape
108
+ elif hasattr(value, '__dict__') and not key.startswith('_'):
109
+ nested_params = {k: v for k, v in value.__dict__.items()
110
+ if not k.startswith('_') and isinstance(v, (int, float, str, bool, tuple, list))}
111
+ params.update(nested_params)
112
+ elif isinstance(value, (int, float, str, bool, tuple, list)):
113
+ params[key] = value
114
+
115
+ return {
116
+ 'name': class_name,
117
+ 'params': params
118
+ }
119
+
120
+ def _build_config(self) -> dict[str, Any]:
121
+ """Build configuration dictionary from initialization parameters."""
122
+ # Extract detailed transform information
123
+ transform_details = [self.extract_all_params(tfm) for tfm in self.item_tfms]
124
+
125
+ return {
126
+ "model_name": self.model_name,
127
+ "loss_function": self.loss_function,
128
+ "transform_details": transform_details,
129
+ "size": self.size,
130
+ "resample": self.resample,
131
+ "reorder": self.reorder,
132
+ }
133
+
134
+ def _extract_training_params(self) -> dict[str, Any]:
135
+ """Extract training hyperparameters from the learner."""
136
+ params = {}
137
+
138
+ params["epochs"] = self.learn.n_epoch
139
+ params["learning_rate"] = float(self.learn.lr)
140
+ params["optimizer"] = self.learn.opt_func.__name__
141
+ params["batch_size"] = self.learn.dls.bs
142
+
143
+ params["loss_function"] = self.config["loss_function"]
144
+ params["size"] = self.config["size"]
145
+ params["resample"] = self.config["resample"]
146
+ params["reorder"] = self.config["reorder"]
147
+
148
+ params["transformations"] = json.dumps(
149
+ self.config["transform_details"],
150
+ indent=2,
151
+ separators=(',', ': ')
152
+ )
153
+
154
+ return params
155
+
156
+ def _extract_epoch_metrics(self) -> dict[str, float]:
157
+ """Extract metrics from the current epoch."""
158
+ recorder = self.learn.recorder
159
+
160
+ # Get custom metric names and values (skip 'epoch' and 'time')
161
+ metric_names = recorder.metric_names[2:]
162
+ raw_metric_values = recorder.log[2:]
163
+
164
+ metrics = {}
165
+
166
+ # Process each metric, handling both scalars and tensors
167
+ for name, val in zip(metric_names, raw_metric_values):
168
+ if isinstance(val, torch.Tensor):
169
+ if val.numel() == 1:
170
+ # Single value tensor (like binary dice score)
171
+ metrics[name] = float(val)
172
+ else:
173
+ # Multi-element tensor (like multiclass dice scores)
174
+ val_list = val.tolist() if hasattr(val, 'tolist') else list(val)
175
+ # Log individual class scores
176
+ for i, class_score in enumerate(val_list):
177
+ metrics[f"{name}_class_{i+1}"] = float(class_score)
178
+ # Log mean across classes
179
+ metrics[f"{name}_mean"] = float(torch.mean(val))
180
+ else:
181
+ metrics[name] = float(val)
182
+
183
+ # Handle loss values
184
+ if len(recorder.log) >= 2:
185
+ metrics['train_loss'] = float(recorder.log[1])
186
+ if len(recorder.log) >= 3:
187
+ metrics['valid_loss'] = float(recorder.log[2])
188
+
189
+ return metrics
190
+
191
+ def _save_model_artifacts(self, temp_dir: Path) -> None:
192
+ """Save model weights, learner, and configuration as artifacts."""
193
+ weights_path = temp_dir / "weights"
194
+ self.learn.save(str(weights_path))
195
+
196
+ weights_file = f"{weights_path}.pth"
197
+ if os.path.exists(weights_file):
198
+ mlflow.log_artifact(weights_file, "model")
199
+
200
+ learner_path = temp_dir / "learner.pkl"
201
+ self.learn.export(str(learner_path))
202
+ mlflow.log_artifact(str(learner_path), "model")
203
+
204
+ config_path = temp_dir / "inference_settings.pkl"
205
+ store_variables(config_path, self.size, self.reorder, self.resample)
206
+ mlflow.log_artifact(str(config_path), "config")
207
+
208
+ def _register_pytorch_model(self) -> None:
209
+ """Register the PyTorch model with MLflow."""
210
+ mlflow.pytorch.log_model(
211
+ pytorch_model=self.learn.model,
212
+ registered_model_name=self.model_name
213
+ )
214
+
215
+ def before_fit(self) -> None:
216
+ """Log hyperparameters before training starts."""
217
+ params = self._extract_training_params()
218
+ mlflow.log_params(params)
219
+
220
+ def after_epoch(self) -> None:
221
+ """Log metrics after each epoch."""
222
+ metrics = self._extract_epoch_metrics()
223
+ if metrics:
224
+ mlflow.log_metrics(metrics, step=self.learn.epoch)
225
+
226
+ def after_fit(self) -> None:
227
+ """Log model artifacts after training completion."""
228
+ print("\nTraining finished. Logging model artifacts to MLflow...")
229
+
230
+ with tempfile.TemporaryDirectory() as temp_dir:
231
+ temp_path = Path(temp_dir)
232
+
233
+ self._save_model_artifacts(temp_path)
234
+
235
+ self._register_pytorch_model()
236
+
237
+ print(f"MLflow run completed. Run ID: {mlflow.active_run().info.run_id}")
fastMONAI/vision_core.py CHANGED
@@ -1,12 +1,13 @@
1
1
  # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/01_vision_core.ipynb.
2
2
 
3
3
  # %% auto 0
4
- __all__ = ['med_img_reader', 'MetaResolver', 'MedBase', 'MedImage', 'MedMask']
4
+ __all__ = ['med_img_reader', 'MetaResolver', 'MedBase', 'MedImage', 'MedMask', 'VSCodeProgressCallback', 'setup_vscode_progress']
5
5
 
6
6
  # %% ../nbs/01_vision_core.ipynb 2
7
7
  from .vision_plot import *
8
8
  from fastai.data.all import *
9
9
  from torchio import ScalarImage, LabelMap, ToCanonical, Resample
10
+ import copy
10
11
 
11
12
  # %% ../nbs/01_vision_core.ipynb 5
12
13
  def _preprocess(obj, reorder, resample):
@@ -56,7 +57,7 @@ def _load_and_preprocess(file_path, reorder, resample, dtype):
56
57
  return org_img, input_img, org_size
57
58
 
58
59
  # %% ../nbs/01_vision_core.ipynb 7
59
- def _multi_channel(image_paths: (L, list), reorder: bool, resample: list, dtype, only_tensor: bool):
60
+ def _multi_channel(image_paths: L | list, reorder: bool, resample: list, only_tensor: bool, dtype):
60
61
  """
61
62
  Load and preprocess multisequence data.
62
63
 
@@ -64,8 +65,8 @@ def _multi_channel(image_paths: (L, list), reorder: bool, resample: list, dtype,
64
65
  image_paths: List of image paths (e.g., T1, T2, T1CE, DWI).
65
66
  reorder: Whether to reorder data for canonical (RAS+) orientation.
66
67
  resample: Whether to resample image to different voxel sizes and dimensions.
67
- dtype: Desired datatype for output.
68
68
  only_tensor: Whether to return only image tensor.
69
+ dtype: Desired datatype for output.
69
70
 
70
71
  Returns:
71
72
  torch.Tensor: A stacked 4D tensor, if `only_tensor` is True.
@@ -82,33 +83,28 @@ def _multi_channel(image_paths: (L, list), reorder: bool, resample: list, dtype,
82
83
  input_img.set_data(tensor)
83
84
  return org_img, input_img, org_size
84
85
 
85
-
86
86
  # %% ../nbs/01_vision_core.ipynb 8
87
- def med_img_reader(file_path: (str, Path, L, list), dtype=torch.Tensor, reorder: bool = False,
88
- resample: list = None, only_tensor: bool = True
89
- ):
87
+ def med_img_reader(file_path: str | Path | L | list, reorder: bool = False, resample: list = None,
88
+ only_tensor: bool = True, dtype = torch.Tensor):
90
89
  """Loads and preprocesses a medical image.
91
90
 
92
91
  Args:
93
92
  file_path: Path to the image. Can be a string, Path object or a list.
94
- dtype: Datatype for the return value. Defaults to torch.Tensor.
95
93
  reorder: Whether to reorder the data to be closest to canonical
96
94
  (RAS+) orientation. Defaults to False.
97
95
  resample: Whether to resample image to different voxel sizes and
98
96
  image dimensions. Defaults to None.
99
97
  only_tensor: Whether to return only image tensor. Defaults to True.
98
+ dtype: Datatype for the return value. Defaults to torch.Tensor.
100
99
 
101
100
  Returns:
102
101
  The preprocessed image. Returns only the image tensor if
103
102
  only_tensor is True, otherwise returns original image,
104
103
  preprocessed image, and original size.
105
104
  """
106
- # if isinstance(file_path, str) and ';' in file_path:
107
- # return _multi_channel(
108
- # file_path.split(';'), reorder, resample, dtype, only_tensor)
109
105
 
110
- if isinstance(file_path, (L, list)):
111
- return _multi_channel(file_path, reorder, resample, dtype, only_tensor)
106
+ if isinstance(file_path, (list, L)):
107
+ return _multi_channel(file_path, reorder, resample, only_tensor, dtype)
112
108
 
113
109
  org_img, input_img, org_size = _load_and_preprocess(
114
110
  file_path, reorder, resample, dtype)
@@ -154,7 +150,50 @@ class MedBase(torch.Tensor, metaclass=MetaResolver):
154
150
  if isinstance(fn, torch.Tensor):
155
151
  return cls(fn)
156
152
 
157
- return med_img_reader(fn, dtype=cls, resample=cls.resample, reorder=cls.reorder)
153
+ return med_img_reader(fn, resample=cls.resample, reorder=cls.reorder, dtype=cls)
154
+
155
+ def __new__(cls, x, **kwargs):
156
+ """Creates a new instance of MedBase from a tensor."""
157
+ if isinstance(x, torch.Tensor):
158
+ # Create tensor of the same type and copy data
159
+ res = torch.Tensor._make_subclass(cls, x.data, x.requires_grad)
160
+ # Copy any additional attributes
161
+ if hasattr(x, 'affine_matrix'):
162
+ res.affine_matrix = x.affine_matrix
163
+ return res
164
+ else:
165
+ # Handle other types by converting to tensor first
166
+ tensor = torch.as_tensor(x, **kwargs)
167
+ return cls.__new__(cls, tensor)
168
+
169
+ def new_empty(self, size, **kwargs):
170
+ """Create a new empty tensor of the same type."""
171
+ # Create new tensor with same type and device/dtype
172
+ kwargs.setdefault('dtype', self.dtype)
173
+ kwargs.setdefault('device', self.device)
174
+ new_tensor = torch.empty(size, **kwargs)
175
+ # Use __new__ to create proper subclass instance
176
+ return self.__class__.__new__(self.__class__, new_tensor)
177
+
178
+ def __copy__(self):
179
+ """Shallow copy implementation."""
180
+ copied = self.__class__.__new__(self.__class__, self.clone())
181
+ # Copy class attributes
182
+ if hasattr(self, 'affine_matrix'):
183
+ copied.affine_matrix = self.affine_matrix
184
+ return copied
185
+
186
+ def __deepcopy__(self, memo):
187
+ """Deep copy implementation."""
188
+ # Create a deep copy of the tensor data
189
+ copied_data = self.clone()
190
+ copied = self.__class__.__new__(self.__class__, copied_data)
191
+ # Deep copy class attributes
192
+ if hasattr(self, 'affine_matrix') and self.affine_matrix is not None:
193
+ copied.affine_matrix = copy.deepcopy(self.affine_matrix, memo)
194
+ else:
195
+ copied.affine_matrix = None
196
+ return copied
158
197
 
159
198
  @classmethod
160
199
  def item_preprocessing(cls, resample: (list, int, tuple), reorder: bool):
@@ -208,3 +247,106 @@ class MedImage(MedBase):
208
247
  class MedMask(MedBase):
209
248
  """Subclass of MedBase that represents an mask object."""
210
249
  _show_args = {'alpha':0.5, 'cmap':'tab20'}
250
+
251
+ # %% ../nbs/01_vision_core.ipynb 14
252
+ import os
253
+ from fastai.callback.progress import ProgressCallback
254
+ from fastai.callback.core import Callback
255
+ import sys
256
+ from IPython import get_ipython
257
+
258
+ class VSCodeProgressCallback(ProgressCallback):
259
+ """Enhanced progress callback that works better in VS Code notebooks."""
260
+
261
+ def __init__(self, **kwargs):
262
+ super().__init__(**kwargs)
263
+ self.is_vscode = self._detect_vscode_environment()
264
+ self.lr_find_progress = None
265
+
266
+ def _detect_vscode_environment(self):
267
+ """Detect if running in VS Code Jupyter environment."""
268
+ ipython = get_ipython()
269
+ if ipython is None:
270
+ return True # Assume VS Code if no IPython (safer default)
271
+ # VS Code detection - more comprehensive check
272
+ kernel_name = str(type(ipython.kernel)).lower() if hasattr(ipython, 'kernel') else ''
273
+ return ('vscode' in kernel_name or
274
+ 'zmq' in kernel_name or # VS Code often uses ZMQInteractiveShell
275
+ not hasattr(ipython, 'display_pub')) # Missing display publisher often indicates VS Code
276
+
277
+ def before_fit(self):
278
+ """Initialize progress tracking before training."""
279
+ if self.is_vscode:
280
+ if hasattr(self.learn, 'lr_finder') and self.learn.lr_finder:
281
+ # This is lr_find, handle differently
282
+ print("🔍 Starting Learning Rate Finder...")
283
+ self.lr_find_progress = 0
284
+ else:
285
+ # Regular training
286
+ print(f"🚀 Training for {self.learn.n_epoch} epochs...")
287
+ super().before_fit()
288
+
289
+ def before_epoch(self):
290
+ """Initialize epoch progress."""
291
+ if self.is_vscode:
292
+ if hasattr(self.learn, 'lr_finder') and self.learn.lr_finder:
293
+ print(f"📊 LR Find - Testing learning rates...")
294
+ else:
295
+ print(f"📈 Epoch {self.epoch+1}/{self.learn.n_epoch}")
296
+ sys.stdout.flush()
297
+ super().before_epoch()
298
+
299
+ def after_batch(self):
300
+ """Update progress after each batch."""
301
+ super().after_batch()
302
+ if self.is_vscode:
303
+ if hasattr(self.learn, 'lr_finder') and self.learn.lr_finder:
304
+ # Special handling for lr_find
305
+ self.lr_find_progress = getattr(self, 'iter', 0) + 1
306
+ total = getattr(self, 'n_iter', 100)
307
+ if self.lr_find_progress % max(1, total // 10) == 0:
308
+ progress = (self.lr_find_progress / total) * 100
309
+ print(f"⏳ LR Find Progress: {self.lr_find_progress}/{total} ({progress:.1f}%)")
310
+ sys.stdout.flush()
311
+ else:
312
+ # Regular training progress
313
+ if hasattr(self, 'iter') and hasattr(self, 'n_iter'):
314
+ if self.iter % max(1, self.n_iter // 20) == 0:
315
+ progress = (self.iter / self.n_iter) * 100
316
+ print(f"⏳ Batch {self.iter}/{self.n_iter} ({progress:.1f}%)")
317
+ sys.stdout.flush()
318
+
319
+ def after_fit(self):
320
+ """Complete progress tracking after training."""
321
+ if self.is_vscode:
322
+ if hasattr(self.learn, 'lr_finder') and self.learn.lr_finder:
323
+ print("✅ Learning Rate Finder completed!")
324
+ else:
325
+ print("✅ Training completed!")
326
+ sys.stdout.flush()
327
+ super().after_fit()
328
+
329
+ def before_validate(self):
330
+ """Update before validation."""
331
+ if self.is_vscode and not (hasattr(self.learn, 'lr_finder') and self.learn.lr_finder):
332
+ print("🔄 Validating...")
333
+ sys.stdout.flush()
334
+ super().before_validate()
335
+
336
+ def after_validate(self):
337
+ """Update after validation."""
338
+ if self.is_vscode and not (hasattr(self.learn, 'lr_finder') and self.learn.lr_finder):
339
+ print("✅ Validation completed")
340
+ sys.stdout.flush()
341
+ super().after_validate()
342
+
343
+ def setup_vscode_progress():
344
+ """Configure fastai to use VS Code-compatible progress callback."""
345
+ from fastai.learner import defaults
346
+
347
+ # Replace default ProgressCallback with VSCodeProgressCallback
348
+ if ProgressCallback in defaults.callbacks:
349
+ defaults.callbacks = [cb if cb != ProgressCallback else VSCodeProgressCallback
350
+ for cb in defaults.callbacks]
351
+
352
+ print("✅ Configured VS Code-compatible progress callback")
fastMONAI/vision_data.py CHANGED
@@ -5,10 +5,12 @@ __all__ = ['pred_to_multiclass_mask', 'batch_pred_to_multiclass_mask', 'pred_to_
5
5
  'MedImageDataLoaders', 'show_batch', 'show_results', 'plot_top_losses']
6
6
 
7
7
  # %% ../nbs/02_vision_data.ipynb 2
8
+ import torch
8
9
  from fastai.data.all import *
9
10
  from fastai.vision.data import *
10
11
  from .vision_core import *
11
12
  from .vision_plot import find_max_slice
13
+ from plum import dispatch
12
14
 
13
15
  # %% ../nbs/02_vision_data.ipynb 5
14
16
  def pred_to_multiclass_mask(pred: torch.Tensor) -> torch.Tensor:
@@ -65,7 +67,7 @@ class MedDataBlock(DataBlock):
65
67
  """Container to quickly build dataloaders."""
66
68
  #TODO add get_x
67
69
  def __init__(self, blocks: list = None, dl_type: TfmdDL = None, getters: list = None,
68
- n_inp: int = None, item_tfms: list = None, batch_tfms: list = None,
70
+ n_inp: int | None = None, item_tfms: list = None, batch_tfms: list = None,
69
71
  reorder: bool = False, resample: (int, list) = None, **kwargs):
70
72
 
71
73
  super().__init__(blocks, dl_type, getters, n_inp, item_tfms,
@@ -109,7 +111,7 @@ class MedImageDataLoaders(DataLoaders):
109
111
  return cls.from_dblock(dblock, df, **kwargs)
110
112
 
111
113
  # %% ../nbs/02_vision_data.ipynb 16
112
- @typedispatch
114
+ @dispatch
113
115
  def show_batch(x: MedImage, y, samples, ctxs=None, max_n=6, nrows=None,
114
116
  ncols=None, figsize=None, channel=0, slice_index=None,
115
117
  anatomical_plane=0, **kwargs):
@@ -131,7 +133,7 @@ def show_batch(x: MedImage, y, samples, ctxs=None, max_n=6, nrows=None,
131
133
  return ctxs
132
134
 
133
135
  # %% ../nbs/02_vision_data.ipynb 17
134
- @typedispatch
136
+ @dispatch
135
137
  def show_batch(x: MedImage, y: MedMask, samples, ctxs=None, max_n=6, nrows=None,
136
138
  ncols=None, figsize=None, channel=0, slice_index=None,
137
139
  anatomical_plane=0, **kwargs):
@@ -162,10 +164,10 @@ def show_batch(x: MedImage, y: MedMask, samples, ctxs=None, max_n=6, nrows=None,
162
164
  return ctxs
163
165
 
164
166
  # %% ../nbs/02_vision_data.ipynb 19
165
- @typedispatch
167
+ @dispatch
166
168
  def show_results(x: MedImage, y: torch.Tensor, samples, outs, ctxs=None, max_n: int = 6,
167
- nrows: int = None, ncols: int = None, figsize=None, channel: int = 0,
168
- slice_index: int = None, anatomical_plane: int = 0, **kwargs):
169
+ nrows: int | None = None, ncols: int | None = None, figsize=None, channel: int = 0,
170
+ slice_index: int | None = None, anatomical_plane: int = 0, **kwargs):
169
171
  """Showing samples and their corresponding predictions for regression tasks."""
170
172
 
171
173
  if ctxs is None:
@@ -188,10 +190,10 @@ def show_results(x: MedImage, y: torch.Tensor, samples, outs, ctxs=None, max_n:
188
190
  return ctxs
189
191
 
190
192
  # %% ../nbs/02_vision_data.ipynb 20
191
- @typedispatch
193
+ @dispatch
192
194
  def show_results(x: MedImage, y: TensorCategory, samples, outs, ctxs=None,
193
- max_n: int = 6, nrows: int = None, ncols: int = None, figsize=None, channel: int = 0,
194
- slice_index: int = None, anatomical_plane: int = 0, **kwargs):
195
+ max_n: int = 6, nrows: int | None = None, ncols: int | None = None, figsize=None, channel: int = 0,
196
+ slice_index: int | None = None, anatomical_plane: int = 0, **kwargs):
195
197
  """Showing samples and their corresponding predictions for classification tasks."""
196
198
 
197
199
  if ctxs is None:
@@ -209,10 +211,10 @@ def show_results(x: MedImage, y: TensorCategory, samples, outs, ctxs=None,
209
211
  return ctxs
210
212
 
211
213
  # %% ../nbs/02_vision_data.ipynb 21
212
- @typedispatch
214
+ @dispatch
213
215
  def show_results(x: MedImage, y: MedMask, samples, outs, ctxs=None, max_n: int = 6,
214
- nrows: int = None, ncols: int = 3, figsize=None, channel: int = 0,
215
- slice_index: int = None, anatomical_plane: int = 0, **kwargs):
216
+ nrows: int | None = None, ncols: int = 3, figsize=None, channel: int = 0,
217
+ slice_index: int | None = None, anatomical_plane: int = 0, **kwargs):
216
218
  """Showing decoded samples and their corresponding predictions for segmentation tasks."""
217
219
 
218
220
  if ctxs is None:
@@ -240,9 +242,9 @@ def show_results(x: MedImage, y: MedMask, samples, outs, ctxs=None, max_n: int =
240
242
  return ctxs
241
243
 
242
244
  # %% ../nbs/02_vision_data.ipynb 23
243
- @typedispatch
244
- def plot_top_losses(x: MedImage, y: TensorCategory, samples, outs, raws, losses, nrows: int = None,
245
- ncols: int = None, figsize=None, channel: int = 0, slice_index: int = None,
245
+ @dispatch
246
+ def plot_top_losses(x: MedImage, y: TensorCategory, samples, outs, raws, losses, nrows: int | None = None,
247
+ ncols: int | None = None, figsize=None, channel: int = 0, slice_index: int | None = None,
246
248
  anatomical_plane: int = 0, **kwargs):
247
249
  """Show images in top_losses along with their prediction, actual, loss, and probability of actual class."""
248
250
 
@@ -258,10 +260,10 @@ def plot_top_losses(x: MedImage, y: TensorCategory, samples, outs, raws, losses,
258
260
  ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
259
261
 
260
262
  # %% ../nbs/02_vision_data.ipynb 24
261
- @typedispatch
263
+ @dispatch
262
264
  def plot_top_losses(x: MedImage, y: TensorMultiCategory, samples, outs, raws,
263
- losses, nrows: int = None, ncols: int = None, figsize=None,
264
- channel: int = 0, slice_index: int = None,
265
+ losses, nrows: int | None = None, ncols: int | None = None, figsize=None,
266
+ channel: int = 0, slice_index: int | None = None,
265
267
  anatomical_plane: int = 0, **kwargs):
266
268
  # TODO: not tested yet
267
269
  axs = get_grid(len(samples), nrows=nrows, ncols=ncols, figsize=figsize)
@@ -94,14 +94,28 @@ def inference(learn_inf, reorder, resample, fn: (str, Path) = '',
94
94
  reoriented_array = _to_original_orientation(input_img.as_sitk(),
95
95
  ('').join(org_img.orientation))
96
96
 
97
- org_img.set_data(reoriented_array)
97
+ # Create new TorchIO image with proper affine matrix
98
+ # Spatial properties (spacing, origin, direction) are automatically derived from affine
99
+ from torchio import ScalarImage
100
+ import numpy as np
101
+
102
+ # Ensure we have a valid affine matrix
103
+ if hasattr(org_img, 'affine') and org_img.affine is not None:
104
+ affine_matrix = org_img.affine.copy()
105
+ elif MedBase.affine_matrix is not None:
106
+ affine_matrix = MedBase.affine_matrix.copy()
107
+ else:
108
+ # Fallback to identity affine if no affine available
109
+ affine_matrix = np.eye(4, dtype=np.float64)
110
+
111
+ pred_img = ScalarImage(tensor=reoriented_array, affine=affine_matrix)
98
112
 
99
113
  if save_path:
100
114
  save_fn = Path(save_path)/('pred_' + Path(fn).parts[-1])
101
- org_img.save(save_fn)
115
+ pred_img.save(save_fn)
102
116
  return save_fn
103
117
 
104
- return org_img
118
+ return pred_img
105
119
 
106
120
  # %% ../nbs/06_vision_inference.ipynb 9
107
121
  def compute_binary_tumor_volume(mask_data: Image):
@@ -0,0 +1,149 @@
1
+ Metadata-Version: 2.4
2
+ Name: fastMONAI
3
+ Version: 0.5.0.0
4
+ Summary: fastMONAI library
5
+ Home-page: https://github.com/MMIV-ML/fastMONAI
6
+ Author: Satheshkumar Kaliyugarasan
7
+ Author-email: skaliyugarasan@hotmail.com
8
+ License: Apache Software License 2.0
9
+ Keywords: deep learning,medical imaging
10
+ Classifier: Development Status :: 3 - Alpha
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Natural Language :: English
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: License :: OSI Approved :: Apache Software License
17
+ Requires-Python: >=3.10
18
+ Description-Content-Type: text/markdown
19
+ License-File: LICENSE
20
+ Requires-Dist: fastai==2.8.3
21
+ Requires-Dist: monai==1.5.0
22
+ Requires-Dist: torchio==0.20.19
23
+ Requires-Dist: xlrd>=1.2.0
24
+ Requires-Dist: scikit-image==0.25.2
25
+ Requires-Dist: imagedata==3.8.4
26
+ Requires-Dist: mlflow==3.3.1
27
+ Requires-Dist: huggingface-hub
28
+ Requires-Dist: gdown
29
+ Requires-Dist: gradio
30
+ Requires-Dist: opencv-python
31
+ Requires-Dist: plum-dispatch
32
+ Provides-Extra: dev
33
+ Requires-Dist: ipywidgets; extra == "dev"
34
+ Requires-Dist: nbdev; extra == "dev"
35
+ Requires-Dist: tabulate; extra == "dev"
36
+ Requires-Dist: quarto; extra == "dev"
37
+ Dynamic: author
38
+ Dynamic: author-email
39
+ Dynamic: classifier
40
+ Dynamic: description
41
+ Dynamic: description-content-type
42
+ Dynamic: home-page
43
+ Dynamic: keywords
44
+ Dynamic: license
45
+ Dynamic: license-file
46
+ Dynamic: provides-extra
47
+ Dynamic: requires-dist
48
+ Dynamic: requires-python
49
+ Dynamic: summary
50
+
51
+ # Overview
52
+
53
+
54
+ <!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
55
+
56
+ ![](https://raw.githubusercontent.com/skaliy/skaliy.github.io/master/assets/fastmonai_v1.png)
57
+
58
+ ![CI](https://github.com/MMIV-ML/fastMONAI/workflows/CI/badge.svg)
59
+ [![Docs](https://github.com/MMIV-ML/fastMONAI/actions/workflows/deploy.yaml/badge.svg)](https://fastmonai.no)
60
+ [![PyPI](https://img.shields.io/pypi/v/fastMONAI?color=blue&label=PyPI%20version&logo=python&logoColor=white.png)](https://pypi.org/project/fastMONAI)
61
+
62
+ A low-code Python-based open source deep learning library built on top
63
+ of [fastai](https://github.com/fastai/fastai),
64
+ [MONAI](https://monai.io/), [TorchIO](https://torchio.readthedocs.io/),
65
+ and [Imagedata](https://imagedata.readthedocs.io/).
66
+
67
+ fastMONAI simplifies the use of state-of-the-art deep learning
68
+ techniques in 3D medical image analysis for solving classification,
69
+ regression, and segmentation tasks. fastMONAI provides the users with
70
+ functionalities to step through data loading, preprocessing, training,
71
+ and result interpretations.
72
+
73
+ <b>Note:</b> This documentation is also available as interactive
74
+ notebooks.
75
+
76
+ ## Requirements
77
+
78
+ - **Python:** 3.10, 3.11, or 3.12 (Python 3.11 recommended)
79
+ - **GPU:** CUDA-compatible GPU recommended for training (CPU supported
80
+ for inference)
81
+
82
+ # Installation
83
+
84
+ ## Environment setup (recommended)
85
+
86
+ We recommend using a conda environment to avoid dependency conflicts:
87
+
88
+ `conda create -n fastmonai python=3.11`
89
+
90
+ `conda activate fastmonai`
91
+
92
+ ## Quick Install [(PyPI)](https://pypi.org/project/fastMONAI/)
93
+
94
+ `pip install fastMONAI`
95
+
96
+ ## Development install [(GitHub)](https://github.com/MMIV-ML/fastMONAI)
97
+
98
+ If you want to install an editable version of fastMONAI for development:
99
+
100
+ git clone https://github.com/MMIV-ML/fastMONAI
101
+ cd fastMONAI
102
+
103
+ # Create development environment
104
+ conda create -n fastmonai-dev python=3.11
105
+ conda activate fastmonai-dev
106
+
107
+ # Install in development mode
108
+ pip install -e '.[dev]'
109
+
110
+ # Getting started
111
+
112
+ The best way to get started using fastMONAI is to read our
113
+ [paper](https://www.sciencedirect.com/science/article/pii/S2665963823001203)
114
+ and dive into our beginner-friendly [video
115
+ tutorial](https://fastmonai.no/tutorial_beginner_video). For a deeper
116
+ understanding and hands-on experience, our comprehensive instructional
117
+ notebooks will walk you through model training for various tasks like
118
+ classification, regression, and segmentation. See the docs at
119
+ https://fastmonai.no for more information.
120
+
121
+ | Notebook | 1-Click Notebook |
122
+ |:---|----|
123
+ | [10a_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10a_tutorial_classification.ipynb) <br>shows how to construct a binary classification model based on MRI data. | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10a_tutorial_classification.ipynb) |
124
+ | [10b_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10b_tutorial_regression.ipynb) <br>shows how to construct a model to predict the age of a subject from MRI scans (“brain age”). | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10b_tutorial_regression.ipynb) |
125
+ | [10c_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10c_tutorial_binary_segmentation.ipynb) <br>shows how to do binary segmentation (extract the left atrium from monomodal cardiac MRI). | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10c_tutorial_binary_segmentation.ipynb) |
126
+ | [10d_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10d_tutorial_multiclass_segmentation.ipynb) <br>shows how to perform segmentation from multimodal MRI (brain tumor segmentation). | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10d_tutorial_multiclass_segmentation.ipynb) |
127
+
128
+ # How to contribute
129
+
130
+ We welcome contributions! See
131
+ [CONTRIBUTING.md](https://github.com/MMIV-ML/fastMONAI/blob/master/CONTRIBUTING.md)
132
+
133
+ # Citing fastMONAI
134
+
135
+ If you are using fastMONAI in your research, please use the following
136
+ citation:
137
+
138
+ @article{KALIYUGARASAN2023100583,
139
+ title = {fastMONAI: A low-code deep learning library for medical image analysis},
140
+ journal = {Software Impacts},
141
+ pages = {100583},
142
+ year = {2023},
143
+ issn = {2665-9638},
144
+ doi = {https://doi.org/10.1016/j.simpa.2023.100583},
145
+ url = {https://www.sciencedirect.com/science/article/pii/S2665963823001203},
146
+ author = {Satheshkumar Kaliyugarasan and Alexander S. Lundervold},
147
+ keywords = {Deep learning, Medical imaging, Radiology},
148
+ abstract = {We introduce fastMONAI, an open-source Python-based deep learning library for 3D medical imaging. Drawing upon the strengths of fastai, MONAI, and TorchIO, fastMONAI simplifies the use of advanced techniques for tasks like classification, regression, and segmentation. The library's design addresses domain-specific demands while promoting best practices, facilitating efficient model development. It offers newcomers an easier entry into the field while keeping the option to make advanced, lower-level customizations if needed. This paper describes the library's design, impact, limitations, and plans for future work.}
149
+ }
@@ -0,0 +1,20 @@
1
+ fastMONAI/__init__.py,sha256=T19ge-mjTQc59d5RqcDcri4stPYuQEaLR3cFKwPhDfI,24
2
+ fastMONAI/_modidx.py,sha256=sVdaZs4ZogSisNkxAN1n96jaayoJqsrODsf9xodrL14,36080
3
+ fastMONAI/dataset_info.py,sha256=aJ-utYZ1OrA32RIQbF7jHxcDE8SgOZE3Vt1AojxnvZc,5026
4
+ fastMONAI/external_data.py,sha256=IVj9GbIRFh9bTFkIa2wySUObSnNfZiaVtuzFxOFAi0Q,12219
5
+ fastMONAI/research_utils.py,sha256=LZu62g8BQAVYS4dD7qDsKHJXZnDd1uLkJ6LoaMDhUhk,590
6
+ fastMONAI/utils.py,sha256=Gr5IGb3v-tfpdFotoIaSAECPXDZS3ECOBSdvQx5vb-A,8647
7
+ fastMONAI/vision_all.py,sha256=_l6F8ZlUaPYcplNG6mg1-1xssYforByEe4zECbPzTck,359
8
+ fastMONAI/vision_augmentation.py,sha256=lAlrLm8jbXRmk9a6e8_o_CNTS6Pyp-KKNXwjpelUUJc,9070
9
+ fastMONAI/vision_core.py,sha256=k4RUBzZuh9W8J4zbcVzXCKfJxkKCsBDG0oSRMwiCNp0,13848
10
+ fastMONAI/vision_data.py,sha256=VCB3hyBN7dYuLiYGSGeuWlBTMvb2cLVo_sbENrRWe5Q,11510
11
+ fastMONAI/vision_inference.py,sha256=3SaJbKGbgaf9ON9PH5DtvfNlhAurov_Idnrlp4jyU9w,6625
12
+ fastMONAI/vision_loss.py,sha256=NrHnk1yD4EBKsp6aippppXU4l-mwmsZOqE_bsZP3ZNI,3591
13
+ fastMONAI/vision_metrics.py,sha256=CVxdOBPaMJT6Mo5jF3WoQj6a3C-_FsnBicMAU_ZrFS8,3549
14
+ fastMONAI/vision_plot.py,sha256=-X_nNBXx7lYCZSFBIN1587ZTA3T_-2ASBM4K31wU660,3792
15
+ fastmonai-0.5.0.0.dist-info/licenses/LICENSE,sha256=xV8xoN4VOL0uw9X8RSs2IMuD_Ss_a9yAbtGNeBWZwnw,11337
16
+ fastmonai-0.5.0.0.dist-info/METADATA,sha256=eO5N4RAweItcte4WhnzMkrfn4d-Vbbs19hZjqxU_TZI,7096
17
+ fastmonai-0.5.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
18
+ fastmonai-0.5.0.0.dist-info/entry_points.txt,sha256=mVBsykSXMairzzk3hJaQ8c-UiwUZqGnn4aFZ24CpsBM,40
19
+ fastmonai-0.5.0.0.dist-info/top_level.txt,sha256=o8y7SWF9odtnIT3jvYtUn9okbJRlaAMCy7oPFCeQvQ8,10
20
+ fastmonai-0.5.0.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.38.4)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,88 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: fastMONAI
3
- Version: 0.4.0.1
4
- Summary: fastMONAI library
5
- Home-page: https://github.com/MMIV-ML/fastMONAI
6
- Author: Satheshkumar Kaliyugarasan
7
- Author-email: skka@hvl.no
8
- License: Apache Software License 2.0
9
- Keywords: deep learning,medical imaging
10
- Classifier: Development Status :: 3 - Alpha
11
- Classifier: Intended Audience :: Developers
12
- Classifier: Natural Language :: English
13
- Classifier: Programming Language :: Python :: 3.7
14
- Classifier: Programming Language :: Python :: 3.8
15
- Classifier: Programming Language :: Python :: 3.9
16
- Classifier: Programming Language :: Python :: 3.10
17
- Classifier: License :: OSI Approved :: Apache Software License
18
- Requires-Python: >=3.7
19
- Description-Content-Type: text/markdown
20
- License-File: LICENSE
21
- Requires-Dist: fastai (==2.7.12)
22
- Requires-Dist: monai (==1.2.0)
23
- Requires-Dist: torchio (==0.18.91)
24
- Requires-Dist: xlrd (>=1.2.0)
25
- Requires-Dist: scikit-image (==0.19.3)
26
- Requires-Dist: imagedata (==2.1.3)
27
- Requires-Dist: huggingface-hub
28
- Requires-Dist: gdown
29
- Requires-Dist: gradio
30
- Requires-Dist: opencv-python
31
- Provides-Extra: dev
32
- Requires-Dist: ipywidgets ; extra == 'dev'
33
- Requires-Dist: nbdev ; extra == 'dev'
34
- Requires-Dist: tabulate ; extra == 'dev'
35
-
36
- Overview
37
- ================
38
-
39
- <!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
40
-
41
- ![](https://raw.githubusercontent.com/skaliy/skaliy.github.io/master/assets/fastmonai_v1.png)
42
-
43
- ![CI](https://github.com/MMIV-ML/fastMONAI/workflows/CI/badge.svg)
44
- [![Docs](https://github.com/MMIV-ML/fastMONAI/actions/workflows/deploy.yaml/badge.svg)](https://fastmonai.no)
45
- [![PyPI](https://img.shields.io/pypi/v/fastMONAI?color=blue&label=PyPI%20version&logo=python&logoColor=white.png)](https://pypi.org/project/fastMONAI)
46
-
47
- A low-code Python-based open source deep learning library built on top
48
- of [fastai](https://github.com/fastai/fastai),
49
- [MONAI](https://monai.io/), and
50
- [TorchIO](https://torchio.readthedocs.io/).
51
-
52
- fastMONAI simplifies the use of state-of-the-art deep learning
53
- techniques in 3D medical image analysis for solving classification,
54
- regression, and segmentation tasks. fastMONAI provides the users with
55
- functionalities to step through data loading, preprocessing, training,
56
- and result interpretations.
57
-
58
- <b>Note:</b> This documentation is also available as interactive
59
- notebooks.
60
-
61
- # Installing
62
-
63
- ## From [PyPI](https://pypi.org/project/fastMONAI/)
64
-
65
- `pip install fastMONAI`
66
-
67
- ## From [GitHub](https://github.com/MMIV-ML/fastMONAI)
68
-
69
- If you want to install an editable version of fastMONAI run:
70
-
71
- - `git clone https://github.com/MMIV-ML/fastMONAI`
72
- - `pip install -e 'fastMONAI[dev]'`
73
-
74
- # Getting started
75
-
76
- The best way to get started using fastMONAI is to dive into our beginner-friendly [video](https://fastmonai.no/tutorial_beginner_video). For a deeper understanding and hands-on experience, our comprehensive instructional notebooks will walk you through model training for various tasks like classification, regression, and segmentation. See the docs at https://fastmonai.no for more information.
77
-
78
- | Notebook | 1-Click Notebook |
79
- |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
80
- | [10a_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10a_tutorial_classification.ipynb) <br>shows how to construct a binary classification model based on MRI data. | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10a_tutorial_classification.ipynb) |
81
- | [10b_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10b_tutorial_regression.ipynb) <br>shows how to construct a model to predict the age of a subject from MRI scans (“brain age”). | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10b_tutorial_regression.ipynb) |
82
- | [10c_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10c_tutorial_binary_segmentation.ipynb) <br>shows how to do binary segmentation (extract the left atrium from monomodal cardiac MRI). | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10c_tutorial_binary_segmentation.ipynb) |
83
- | [10d_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/master/nbs/10d_tutorial_multiclass_segmentation.ipynb) <br>shows how to perform segmentation from multimodal MRI (brain tumor segmentation). | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/master/nbs/10d_tutorial_multiclass_segmentation.ipynb) |
84
-
85
- # How to contribute
86
-
87
- See
88
- [CONTRIBUTING.md](https://github.com/MMIV-ML/fastMONAI/blob/master/CONTRIBUTING.md)
@@ -1,20 +0,0 @@
1
- fastMONAI/__init__.py,sha256=vc83kPY-LayyO_9Nln3UAcxXU7TG1ePu1eOVL5rw9Uc,24
2
- fastMONAI/_modidx.py,sha256=qlr-dyFcDjvXOZgM6-r-pYRHtodH07QRsioHD8n72cc,29553
3
- fastMONAI/dataset_info.py,sha256=w5LKGmEzFtlqhxqzhFLWUfyVkUL0EYypZQCH9Ay6jgg,4948
4
- fastMONAI/external_data.py,sha256=IVj9GbIRFh9bTFkIa2wySUObSnNfZiaVtuzFxOFAi0Q,12219
5
- fastMONAI/research_utils.py,sha256=LZu62g8BQAVYS4dD7qDsKHJXZnDd1uLkJ6LoaMDhUhk,590
6
- fastMONAI/utils.py,sha256=9I5nl6Sb0NbTxhr6FDnW4dapDhzPtmxGcJeXkkY4v3E,1406
7
- fastMONAI/vision_all.py,sha256=_l6F8ZlUaPYcplNG6mg1-1xssYforByEe4zECbPzTck,359
8
- fastMONAI/vision_augmentation.py,sha256=lAlrLm8jbXRmk9a6e8_o_CNTS6Pyp-KKNXwjpelUUJc,9070
9
- fastMONAI/vision_core.py,sha256=PBovhzMzzE16t5V-DYLdpWDqYCKPZdmTUQErsev8kmg,7603
10
- fastMONAI/vision_data.py,sha256=I7i1rzCaubfZVmU3CyJ0aITcin-fMO9jrb50Edpiro0,11394
11
- fastMONAI/vision_inference.py,sha256=7sX_ZCE_6myW_hTpJbnPG6s78WN8V7flVRRke1g__jc,5995
12
- fastMONAI/vision_loss.py,sha256=NrHnk1yD4EBKsp6aippppXU4l-mwmsZOqE_bsZP3ZNI,3591
13
- fastMONAI/vision_metrics.py,sha256=CVxdOBPaMJT6Mo5jF3WoQj6a3C-_FsnBicMAU_ZrFS8,3549
14
- fastMONAI/vision_plot.py,sha256=-X_nNBXx7lYCZSFBIN1587ZTA3T_-2ASBM4K31wU660,3792
15
- fastMONAI-0.4.0.1.dist-info/LICENSE,sha256=xV8xoN4VOL0uw9X8RSs2IMuD_Ss_a9yAbtGNeBWZwnw,11337
16
- fastMONAI-0.4.0.1.dist-info/METADATA,sha256=K_FMTVoc9wrYag5k7ka-zg64HC8NfcdiPvp1O0jF-CE,5735
17
- fastMONAI-0.4.0.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
18
- fastMONAI-0.4.0.1.dist-info/entry_points.txt,sha256=mVBsykSXMairzzk3hJaQ8c-UiwUZqGnn4aFZ24CpsBM,40
19
- fastMONAI-0.4.0.1.dist-info/top_level.txt,sha256=o8y7SWF9odtnIT3jvYtUn9okbJRlaAMCy7oPFCeQvQ8,10
20
- fastMONAI-0.4.0.1.dist-info/RECORD,,