fastMONAI 0.5.0.0__tar.gz → 0.5.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fastmonai-0.5.0.0/fastMONAI.egg-info → fastmonai-0.5.2}/PKG-INFO +7 -7
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/README.md +6 -6
- fastmonai-0.5.2/fastMONAI/__init__.py +1 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI/_modidx.py +27 -2
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI/utils.py +177 -3
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI/vision_augmentation.py +102 -18
- {fastmonai-0.5.0.0 → fastmonai-0.5.2/fastMONAI.egg-info}/PKG-INFO +7 -7
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/pyproject.toml +1 -1
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/settings.ini +2 -2
- fastmonai-0.5.0.0/fastMONAI/__init__.py +0 -1
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/CONTRIBUTING.md +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/LICENSE +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/MANIFEST.in +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI/dataset_info.py +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI/external_data.py +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI/research_utils.py +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI/vision_all.py +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI/vision_core.py +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI/vision_data.py +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI/vision_inference.py +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI/vision_loss.py +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI/vision_metrics.py +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI/vision_plot.py +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI.egg-info/SOURCES.txt +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI.egg-info/dependency_links.txt +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI.egg-info/entry_points.txt +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI.egg-info/not-zip-safe +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI.egg-info/requires.txt +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/fastMONAI.egg-info/top_level.txt +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/setup.cfg +0 -0
- {fastmonai-0.5.0.0 → fastmonai-0.5.2}/setup.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: fastMONAI
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.2
|
|
4
4
|
Summary: fastMONAI library
|
|
5
5
|
Home-page: https://github.com/MMIV-ML/fastMONAI
|
|
6
6
|
Author: Satheshkumar Kaliyugarasan
|
|
@@ -53,7 +53,7 @@ Dynamic: summary
|
|
|
53
53
|
|
|
54
54
|
<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
|
|
55
55
|
|
|
56
|
-

|
|
57
57
|
|
|
58
58
|

|
|
59
59
|
[](https://fastmonai.no)
|
|
@@ -120,15 +120,15 @@ https://fastmonai.no for more information.
|
|
|
120
120
|
|
|
121
121
|
| Notebook | 1-Click Notebook |
|
|
122
122
|
|:---|----|
|
|
123
|
-
| [10a_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/
|
|
124
|
-
| [10b_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/
|
|
125
|
-
| [10c_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/
|
|
126
|
-
| [10d_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/
|
|
123
|
+
| [10a_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10a_tutorial_classification.ipynb) <br>shows how to construct a binary classification model based on MRI data. | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10a_tutorial_classification.ipynb) |
|
|
124
|
+
| [10b_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10b_tutorial_regression.ipynb) <br>shows how to construct a model to predict the age of a subject from MRI scans (“brain age”). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10b_tutorial_regression.ipynb) |
|
|
125
|
+
| [10c_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10c_tutorial_binary_segmentation.ipynb) <br>shows how to do binary segmentation (extract the left atrium from monomodal cardiac MRI). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10c_tutorial_binary_segmentation.ipynb) |
|
|
126
|
+
| [10d_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10d_tutorial_multiclass_segmentation.ipynb) <br>shows how to perform segmentation from multimodal MRI (brain tumor segmentation). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10d_tutorial_multiclass_segmentation.ipynb) |
|
|
127
127
|
|
|
128
128
|
# How to contribute
|
|
129
129
|
|
|
130
130
|
We welcome contributions! See
|
|
131
|
-
[CONTRIBUTING.md](https://github.com/MMIV-ML/fastMONAI/blob/
|
|
131
|
+
[CONTRIBUTING.md](https://github.com/MMIV-ML/fastMONAI/blob/main/CONTRIBUTING.md)
|
|
132
132
|
|
|
133
133
|
# Citing fastMONAI
|
|
134
134
|
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
|
|
4
4
|
<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
|
|
5
5
|
|
|
6
|
-

|
|
7
7
|
|
|
8
8
|

|
|
9
9
|
[](https://fastmonai.no)
|
|
@@ -70,15 +70,15 @@ https://fastmonai.no for more information.
|
|
|
70
70
|
|
|
71
71
|
| Notebook | 1-Click Notebook |
|
|
72
72
|
|:---|----|
|
|
73
|
-
| [10a_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/
|
|
74
|
-
| [10b_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/
|
|
75
|
-
| [10c_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/
|
|
76
|
-
| [10d_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/
|
|
73
|
+
| [10a_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10a_tutorial_classification.ipynb) <br>shows how to construct a binary classification model based on MRI data. | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10a_tutorial_classification.ipynb) |
|
|
74
|
+
| [10b_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10b_tutorial_regression.ipynb) <br>shows how to construct a model to predict the age of a subject from MRI scans (“brain age”). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10b_tutorial_regression.ipynb) |
|
|
75
|
+
| [10c_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10c_tutorial_binary_segmentation.ipynb) <br>shows how to do binary segmentation (extract the left atrium from monomodal cardiac MRI). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10c_tutorial_binary_segmentation.ipynb) |
|
|
76
|
+
| [10d_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10d_tutorial_multiclass_segmentation.ipynb) <br>shows how to perform segmentation from multimodal MRI (brain tumor segmentation). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10d_tutorial_multiclass_segmentation.ipynb) |
|
|
77
77
|
|
|
78
78
|
# How to contribute
|
|
79
79
|
|
|
80
80
|
We welcome contributions! See
|
|
81
|
-
[CONTRIBUTING.md](https://github.com/MMIV-ML/fastMONAI/blob/
|
|
81
|
+
[CONTRIBUTING.md](https://github.com/MMIV-ML/fastMONAI/blob/main/CONTRIBUTING.md)
|
|
82
82
|
|
|
83
83
|
# Citing fastMONAI
|
|
84
84
|
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.5.2"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Autogenerated by nbdev
|
|
2
2
|
|
|
3
|
-
d = { 'settings': { 'branch': '
|
|
3
|
+
d = { 'settings': { 'branch': 'main',
|
|
4
4
|
'doc_baseurl': '/',
|
|
5
5
|
'doc_host': 'https://fastmonai.no',
|
|
6
6
|
'git_url': 'https://github.com/MMIV-ML/fastMONAI',
|
|
@@ -47,7 +47,20 @@ d = { 'settings': { 'branch': 'master',
|
|
|
47
47
|
'fastMONAI/external_data.py')},
|
|
48
48
|
'fastMONAI.research_utils': { 'fastMONAI.research_utils.pred_postprocess': ( 'research_utils.html#pred_postprocess',
|
|
49
49
|
'fastMONAI/research_utils.py')},
|
|
50
|
-
'fastMONAI.utils': { 'fastMONAI.utils.
|
|
50
|
+
'fastMONAI.utils': { 'fastMONAI.utils.MLflowUIManager': ('utils.html#mlflowuimanager', 'fastMONAI/utils.py'),
|
|
51
|
+
'fastMONAI.utils.MLflowUIManager.__init__': ('utils.html#mlflowuimanager.__init__', 'fastMONAI/utils.py'),
|
|
52
|
+
'fastMONAI.utils.MLflowUIManager.check_mlflow_installed': ( 'utils.html#mlflowuimanager.check_mlflow_installed',
|
|
53
|
+
'fastMONAI/utils.py'),
|
|
54
|
+
'fastMONAI.utils.MLflowUIManager.find_available_port': ( 'utils.html#mlflowuimanager.find_available_port',
|
|
55
|
+
'fastMONAI/utils.py'),
|
|
56
|
+
'fastMONAI.utils.MLflowUIManager.is_mlflow_running': ( 'utils.html#mlflowuimanager.is_mlflow_running',
|
|
57
|
+
'fastMONAI/utils.py'),
|
|
58
|
+
'fastMONAI.utils.MLflowUIManager.is_port_available': ( 'utils.html#mlflowuimanager.is_port_available',
|
|
59
|
+
'fastMONAI/utils.py'),
|
|
60
|
+
'fastMONAI.utils.MLflowUIManager.start_ui': ('utils.html#mlflowuimanager.start_ui', 'fastMONAI/utils.py'),
|
|
61
|
+
'fastMONAI.utils.MLflowUIManager.status': ('utils.html#mlflowuimanager.status', 'fastMONAI/utils.py'),
|
|
62
|
+
'fastMONAI.utils.MLflowUIManager.stop': ('utils.html#mlflowuimanager.stop', 'fastMONAI/utils.py'),
|
|
63
|
+
'fastMONAI.utils.ModelTrackingCallback': ('utils.html#modeltrackingcallback', 'fastMONAI/utils.py'),
|
|
51
64
|
'fastMONAI.utils.ModelTrackingCallback.__init__': ( 'utils.html#modeltrackingcallback.__init__',
|
|
52
65
|
'fastMONAI/utils.py'),
|
|
53
66
|
'fastMONAI.utils.ModelTrackingCallback._build_config': ( 'utils.html#modeltrackingcallback._build_config',
|
|
@@ -86,6 +99,12 @@ d = { 'settings': { 'branch': 'master',
|
|
|
86
99
|
'fastMONAI/vision_augmentation.py'),
|
|
87
100
|
'fastMONAI.vision_augmentation.CustomDictTransform.encodes': ( 'vision_augment.html#customdicttransform.encodes',
|
|
88
101
|
'fastMONAI/vision_augmentation.py'),
|
|
102
|
+
'fastMONAI.vision_augmentation.NormalizeIntensity': ( 'vision_augment.html#normalizeintensity',
|
|
103
|
+
'fastMONAI/vision_augmentation.py'),
|
|
104
|
+
'fastMONAI.vision_augmentation.NormalizeIntensity.__init__': ( 'vision_augment.html#normalizeintensity.__init__',
|
|
105
|
+
'fastMONAI/vision_augmentation.py'),
|
|
106
|
+
'fastMONAI.vision_augmentation.NormalizeIntensity.encodes': ( 'vision_augment.html#normalizeintensity.encodes',
|
|
107
|
+
'fastMONAI/vision_augmentation.py'),
|
|
89
108
|
'fastMONAI.vision_augmentation.OneOf': ( 'vision_augment.html#oneof',
|
|
90
109
|
'fastMONAI/vision_augmentation.py'),
|
|
91
110
|
'fastMONAI.vision_augmentation.OneOf.__init__': ( 'vision_augment.html#oneof.__init__',
|
|
@@ -150,6 +169,12 @@ d = { 'settings': { 'branch': 'master',
|
|
|
150
169
|
'fastMONAI/vision_augmentation.py'),
|
|
151
170
|
'fastMONAI.vision_augmentation.RandomSpike.encodes': ( 'vision_augment.html#randomspike.encodes',
|
|
152
171
|
'fastMONAI/vision_augmentation.py'),
|
|
172
|
+
'fastMONAI.vision_augmentation.RescaleIntensity': ( 'vision_augment.html#rescaleintensity',
|
|
173
|
+
'fastMONAI/vision_augmentation.py'),
|
|
174
|
+
'fastMONAI.vision_augmentation.RescaleIntensity.__init__': ( 'vision_augment.html#rescaleintensity.__init__',
|
|
175
|
+
'fastMONAI/vision_augmentation.py'),
|
|
176
|
+
'fastMONAI.vision_augmentation.RescaleIntensity.encodes': ( 'vision_augment.html#rescaleintensity.encodes',
|
|
177
|
+
'fastMONAI/vision_augmentation.py'),
|
|
153
178
|
'fastMONAI.vision_augmentation.ZNormalization': ( 'vision_augment.html#znormalization',
|
|
154
179
|
'fastMONAI/vision_augmentation.py'),
|
|
155
180
|
'fastMONAI.vision_augmentation.ZNormalization.__init__': ( 'vision_augment.html#znormalization.__init__',
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/07_utils.ipynb.
|
|
2
2
|
|
|
3
3
|
# %% auto 0
|
|
4
|
-
__all__ = ['store_variables', 'load_variables', 'print_colab_gpu_info', 'ModelTrackingCallback']
|
|
4
|
+
__all__ = ['store_variables', 'load_variables', 'print_colab_gpu_info', 'ModelTrackingCallback', 'MLflowUIManager']
|
|
5
5
|
|
|
6
6
|
# %% ../nbs/07_utils.ipynb 1
|
|
7
7
|
import pickle
|
|
@@ -13,6 +13,7 @@ import os
|
|
|
13
13
|
import tempfile
|
|
14
14
|
import json
|
|
15
15
|
from fastai.callback.core import Callback
|
|
16
|
+
from fastcore.foundation import L
|
|
16
17
|
from typing import Any
|
|
17
18
|
|
|
18
19
|
# %% ../nbs/07_utils.ipynb 3
|
|
@@ -165,6 +166,8 @@ class ModelTrackingCallback(Callback):
|
|
|
165
166
|
|
|
166
167
|
# Process each metric, handling both scalars and tensors
|
|
167
168
|
for name, val in zip(metric_names, raw_metric_values):
|
|
169
|
+
if val is None:
|
|
170
|
+
continue # Skip None values during inference
|
|
168
171
|
if isinstance(val, torch.Tensor):
|
|
169
172
|
if val.numel() == 1:
|
|
170
173
|
# Single value tensor (like binary dice score)
|
|
@@ -182,8 +185,9 @@ class ModelTrackingCallback(Callback):
|
|
|
182
185
|
|
|
183
186
|
# Handle loss values
|
|
184
187
|
if len(recorder.log) >= 2:
|
|
185
|
-
|
|
186
|
-
|
|
188
|
+
if recorder.log[1] is not None:
|
|
189
|
+
metrics['train_loss'] = float(recorder.log[1])
|
|
190
|
+
if len(recorder.log) >= 3 and recorder.log[2] is not None:
|
|
187
191
|
metrics['valid_loss'] = float(recorder.log[2])
|
|
188
192
|
|
|
189
193
|
return metrics
|
|
@@ -197,10 +201,22 @@ class ModelTrackingCallback(Callback):
|
|
|
197
201
|
if os.path.exists(weights_file):
|
|
198
202
|
mlflow.log_artifact(weights_file, "model")
|
|
199
203
|
|
|
204
|
+
# Remove MLflow callbacks before exporting learner for inference
|
|
205
|
+
# This prevents the callback from being triggered during inference
|
|
206
|
+
original_cbs = self.learn.cbs.copy() # Save original callbacks
|
|
207
|
+
|
|
208
|
+
# Remove ModelTrackingCallback instances from learner using proper collection type
|
|
209
|
+
filtered_cbs = L([cb for cb in self.learn.cbs if not isinstance(cb, ModelTrackingCallback)])
|
|
210
|
+
self.learn.cbs = filtered_cbs
|
|
211
|
+
|
|
212
|
+
# Export clean learner without MLflow callbacks
|
|
200
213
|
learner_path = temp_dir / "learner.pkl"
|
|
201
214
|
self.learn.export(str(learner_path))
|
|
202
215
|
mlflow.log_artifact(str(learner_path), "model")
|
|
203
216
|
|
|
217
|
+
# Restore original callbacks for current session
|
|
218
|
+
self.learn.cbs = original_cbs
|
|
219
|
+
|
|
204
220
|
config_path = temp_dir / "inference_settings.pkl"
|
|
205
221
|
store_variables(config_path, self.size, self.reorder, self.resample)
|
|
206
222
|
mlflow.log_artifact(str(config_path), "config")
|
|
@@ -235,3 +251,161 @@ class ModelTrackingCallback(Callback):
|
|
|
235
251
|
self._register_pytorch_model()
|
|
236
252
|
|
|
237
253
|
print(f"MLflow run completed. Run ID: {mlflow.active_run().info.run_id}")
|
|
254
|
+
|
|
255
|
+
# %% ../nbs/07_utils.ipynb 7
|
|
256
|
+
import subprocess
|
|
257
|
+
import threading
|
|
258
|
+
import time
|
|
259
|
+
import socket
|
|
260
|
+
import os
|
|
261
|
+
from IPython.display import display, HTML, clear_output
|
|
262
|
+
from IPython.core.magic import register_line_magic
|
|
263
|
+
from IPython import get_ipython
|
|
264
|
+
import requests
|
|
265
|
+
import shutil
|
|
266
|
+
|
|
267
|
+
class MLflowUIManager:
|
|
268
|
+
def __init__(self):
|
|
269
|
+
self.process = None
|
|
270
|
+
self.thread = None
|
|
271
|
+
self.port = 5001
|
|
272
|
+
self.host = '0.0.0.0'
|
|
273
|
+
self.backend_store_uri = './mlruns'
|
|
274
|
+
|
|
275
|
+
def is_port_available(self, port):
|
|
276
|
+
"""Check if a port is available."""
|
|
277
|
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
|
278
|
+
try:
|
|
279
|
+
s.bind(('localhost', port))
|
|
280
|
+
return True
|
|
281
|
+
except OSError:
|
|
282
|
+
return False
|
|
283
|
+
|
|
284
|
+
def is_mlflow_running(self):
|
|
285
|
+
"""Check if MLflow UI is actually responding."""
|
|
286
|
+
try:
|
|
287
|
+
response = requests.get(f'http://localhost:{self.port}', timeout=2)
|
|
288
|
+
return response.status_code == 200
|
|
289
|
+
except:
|
|
290
|
+
return False
|
|
291
|
+
|
|
292
|
+
def find_available_port(self, start_port=5001):
|
|
293
|
+
"""Find an available port starting from start_port."""
|
|
294
|
+
for port in range(start_port, start_port + 10):
|
|
295
|
+
if self.is_port_available(port):
|
|
296
|
+
return port
|
|
297
|
+
return None
|
|
298
|
+
|
|
299
|
+
def check_mlflow_installed(self):
|
|
300
|
+
"""Check if MLflow is installed."""
|
|
301
|
+
return shutil.which('mlflow') is not None
|
|
302
|
+
|
|
303
|
+
def start_ui(self, auto_open=True, quiet=False):
|
|
304
|
+
"""Start MLflow UI with better error handling and user feedback."""
|
|
305
|
+
|
|
306
|
+
# Check if MLflow is installed
|
|
307
|
+
if not self.check_mlflow_installed():
|
|
308
|
+
if not quiet:
|
|
309
|
+
display(HTML('<div style="color: #d32f2f; font-weight: bold; font-size: 14px;">❌ MLflow not installed. Run: pip install mlflow</div>'))
|
|
310
|
+
return False
|
|
311
|
+
|
|
312
|
+
# Find available port
|
|
313
|
+
available_port = self.find_available_port(self.port)
|
|
314
|
+
if available_port is None:
|
|
315
|
+
if not quiet:
|
|
316
|
+
display(HTML('<div style="color: #d32f2f; font-weight: bold; font-size: 14px;">❌ No available ports found (5001-5010)</div>'))
|
|
317
|
+
return False
|
|
318
|
+
|
|
319
|
+
self.port = available_port
|
|
320
|
+
|
|
321
|
+
# Start MLflow UI in a separate thread
|
|
322
|
+
def run_mlflow():
|
|
323
|
+
try:
|
|
324
|
+
self.process = subprocess.Popen([
|
|
325
|
+
'mlflow', 'ui',
|
|
326
|
+
'--host', self.host,
|
|
327
|
+
'--port', str(self.port),
|
|
328
|
+
'--backend-store-uri', self.backend_store_uri
|
|
329
|
+
], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
|
330
|
+
self.process.wait()
|
|
331
|
+
except Exception as e:
|
|
332
|
+
if not quiet:
|
|
333
|
+
display(HTML(f'<div style="color: #d32f2f; font-weight: bold; font-size: 14px;">❌ Error: {str(e)}</div>'))
|
|
334
|
+
|
|
335
|
+
self.thread = threading.Thread(target=run_mlflow, daemon=True)
|
|
336
|
+
self.thread.start()
|
|
337
|
+
|
|
338
|
+
# Wait and check if server started successfully
|
|
339
|
+
max_wait = 10
|
|
340
|
+
for i in range(max_wait):
|
|
341
|
+
time.sleep(1)
|
|
342
|
+
if self.is_mlflow_running():
|
|
343
|
+
if quiet:
|
|
344
|
+
# Bright, visible link for quiet mode
|
|
345
|
+
display(HTML(f'''
|
|
346
|
+
<a href="http://localhost:{self.port}" target="_blank"
|
|
347
|
+
style="color: #1976d2; font-weight: bold; font-size: 16px; text-decoration: underline;">
|
|
348
|
+
🔗 MLflow UI (Port {self.port})
|
|
349
|
+
</a>
|
|
350
|
+
'''))
|
|
351
|
+
else:
|
|
352
|
+
# Success message with high contrast colors
|
|
353
|
+
display(HTML(f'''
|
|
354
|
+
<div style="background-color: #c8e6c9; border: 2px solid #388e3c; padding: 15px; border-radius: 8px; margin: 10px 0;">
|
|
355
|
+
<div style="color: #1b5e20; font-weight: bold; font-size: 16px; margin-bottom: 10px;">
|
|
356
|
+
✅ MLflow UI is running successfully!
|
|
357
|
+
</div>
|
|
358
|
+
<a href="http://localhost:{self.port}" target="_blank"
|
|
359
|
+
style="background-color: #1976d2; color: white; padding: 12px 24px; text-decoration: none; border-radius: 6px; font-weight: bold; font-size: 14px; display: inline-block; margin: 5px 0;">
|
|
360
|
+
🔗 Open MLflow UI
|
|
361
|
+
</a>
|
|
362
|
+
<div style="margin-top: 10px;">
|
|
363
|
+
<div style="color: #424242; font-size: 13px;">URL: http://localhost:{self.port}</div>
|
|
364
|
+
</div>
|
|
365
|
+
</div>
|
|
366
|
+
'''))
|
|
367
|
+
return True
|
|
368
|
+
|
|
369
|
+
# If we get here, server didn't start properly
|
|
370
|
+
if not quiet:
|
|
371
|
+
display(HTML('<div style="color: #d32f2f; font-weight: bold; font-size: 14px;">❌ Failed to start MLflow UI</div>'))
|
|
372
|
+
return False
|
|
373
|
+
|
|
374
|
+
def stop(self):
|
|
375
|
+
"""Stop the MLflow UI server."""
|
|
376
|
+
if self.process:
|
|
377
|
+
self.process.terminate()
|
|
378
|
+
self.process = None
|
|
379
|
+
display(HTML('''
|
|
380
|
+
<div style="background-color: #ffecb3; border: 2px solid #f57c00; padding: 10px; border-radius: 6px;">
|
|
381
|
+
<span style="color: #e65100; font-weight: bold; font-size: 14px;">🛑 MLflow UI stopped</span>
|
|
382
|
+
</div>
|
|
383
|
+
'''))
|
|
384
|
+
else:
|
|
385
|
+
display(HTML('''
|
|
386
|
+
<div style="background-color: #f0f0f0; border: 2px solid #757575; padding: 10px; border-radius: 6px;">
|
|
387
|
+
<span style="color: #424242; font-weight: bold; font-size: 14px;">ℹ️ MLflow UI is not currently running</span>
|
|
388
|
+
</div>
|
|
389
|
+
'''))
|
|
390
|
+
|
|
391
|
+
def status(self):
|
|
392
|
+
"""Check MLflow UI status."""
|
|
393
|
+
if self.is_mlflow_running():
|
|
394
|
+
display(HTML(f'''
|
|
395
|
+
<div style="background-color: #c8e6c9; border: 2px solid #388e3c; padding: 10px; border-radius: 6px;">
|
|
396
|
+
<div style="color: #1b5e20; font-weight: bold; font-size: 14px;">✅ MLflow UI is running</div>
|
|
397
|
+
<a href="http://localhost:{self.port}" target="_blank"
|
|
398
|
+
style="color: #1976d2; font-weight: bold; text-decoration: underline;">
|
|
399
|
+
http://localhost:{self.port}
|
|
400
|
+
</a>
|
|
401
|
+
</div>
|
|
402
|
+
'''))
|
|
403
|
+
else:
|
|
404
|
+
display(HTML('''
|
|
405
|
+
<div style="background-color: #ffcdd2; border: 2px solid #d32f2f; padding: 10px; border-radius: 6px;">
|
|
406
|
+
<div style="color: #b71c1c; font-weight: bold; font-size: 14px;">❌ MLflow UI is not running</div>
|
|
407
|
+
<div style="color: #424242; font-size: 13px; margin-top: 5px;">
|
|
408
|
+
Run <code style="background-color: #f5f5f5; padding: 2px 4px; border-radius: 3px;">mlflow_ui.start_ui()</code> to start it.
|
|
409
|
+
</div>
|
|
410
|
+
</div>
|
|
411
|
+
'''))
|
|
@@ -1,14 +1,16 @@
|
|
|
1
1
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/03_vision_augment.ipynb.
|
|
2
2
|
|
|
3
3
|
# %% auto 0
|
|
4
|
-
__all__ = ['CustomDictTransform', 'do_pad_or_crop', 'PadOrCrop', 'ZNormalization', '
|
|
5
|
-
'
|
|
6
|
-
'RandomMotion', 'RandomElasticDeformation', 'RandomAffine', 'RandomFlip',
|
|
4
|
+
__all__ = ['CustomDictTransform', 'do_pad_or_crop', 'PadOrCrop', 'ZNormalization', 'RescaleIntensity', 'NormalizeIntensity',
|
|
5
|
+
'BraTSMaskConverter', 'BinaryConverter', 'RandomGhosting', 'RandomSpike', 'RandomNoise', 'RandomBiasField',
|
|
6
|
+
'RandomBlur', 'RandomGamma', 'RandomMotion', 'RandomElasticDeformation', 'RandomAffine', 'RandomFlip',
|
|
7
|
+
'OneOf']
|
|
7
8
|
|
|
8
9
|
# %% ../nbs/03_vision_augment.ipynb 2
|
|
9
10
|
from fastai.data.all import *
|
|
10
11
|
from .vision_core import *
|
|
11
12
|
import torchio as tio
|
|
13
|
+
from monai.transforms import NormalizeIntensity as MonaiNormalizeIntensity
|
|
12
14
|
|
|
13
15
|
# %% ../nbs/03_vision_augment.ipynb 5
|
|
14
16
|
class CustomDictTransform(ItemTransform):
|
|
@@ -84,9 +86,29 @@ class ZNormalization(DisplayedTransform):
|
|
|
84
86
|
self.channel_wise = channel_wise
|
|
85
87
|
|
|
86
88
|
def encodes(self, o: MedImage):
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
89
|
+
try:
|
|
90
|
+
if self.channel_wise:
|
|
91
|
+
o = torch.stack([self.z_normalization(c[None])[0] for c in o])
|
|
92
|
+
else:
|
|
93
|
+
o = self.z_normalization(o)
|
|
94
|
+
except RuntimeError as e:
|
|
95
|
+
if "Standard deviation is 0" in str(e):
|
|
96
|
+
# Calculate mean for debugging information
|
|
97
|
+
mean = float(o.mean())
|
|
98
|
+
|
|
99
|
+
error_msg = (
|
|
100
|
+
f"Standard deviation is 0 for image (mean={mean:.3f}).\n"
|
|
101
|
+
f"This indicates uniform pixel values.\n\n"
|
|
102
|
+
f"Possible causes:\n"
|
|
103
|
+
f"• Corrupted or blank image\n"
|
|
104
|
+
f"• Oversaturated regions\n"
|
|
105
|
+
f"• Background-only regions\n"
|
|
106
|
+
f"• All-zero mask being processed as image\n\n"
|
|
107
|
+
f"Suggested solutions:\n"
|
|
108
|
+
f"• Check image quality and acquisition\n"
|
|
109
|
+
f"• Verify image vs mask data loading"
|
|
110
|
+
)
|
|
111
|
+
raise RuntimeError(error_msg) from e
|
|
90
112
|
|
|
91
113
|
return MedImage.create(o)
|
|
92
114
|
|
|
@@ -94,6 +116,68 @@ class ZNormalization(DisplayedTransform):
|
|
|
94
116
|
return o
|
|
95
117
|
|
|
96
118
|
# %% ../nbs/03_vision_augment.ipynb 10
|
|
119
|
+
class RescaleIntensity(DisplayedTransform):
|
|
120
|
+
"""Apply TorchIO RescaleIntensity for robust intensity scaling.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
out_min_max (tuple[float, float]): Output intensity range (min, max)
|
|
124
|
+
in_min_max (tuple[float, float]): Input intensity range (min, max)
|
|
125
|
+
|
|
126
|
+
Example for CT images:
|
|
127
|
+
# Normalize CT from air (-1000 HU) to bone (1000 HU) into range (-1, 1)
|
|
128
|
+
transform = RescaleIntensity(out_min_max=(-1, 1), in_min_max=(-1000, 1000))
|
|
129
|
+
"""
|
|
130
|
+
|
|
131
|
+
order = 0
|
|
132
|
+
|
|
133
|
+
def __init__(self, out_min_max: tuple[float, float], in_min_max: tuple[float, float]):
|
|
134
|
+
self.rescale = tio.RescaleIntensity(out_min_max=out_min_max, in_min_max=in_min_max)
|
|
135
|
+
|
|
136
|
+
def encodes(self, o: MedImage):
|
|
137
|
+
return MedImage.create(self.rescale(o))
|
|
138
|
+
|
|
139
|
+
def encodes(self, o: MedMask):
|
|
140
|
+
return o
|
|
141
|
+
|
|
142
|
+
# %% ../nbs/03_vision_augment.ipynb 11
|
|
143
|
+
class NormalizeIntensity(DisplayedTransform):
|
|
144
|
+
"""Apply MONAI NormalizeIntensity.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
nonzero (bool): Only normalize non-zero values (default: True)
|
|
148
|
+
channel_wise (bool): Apply normalization per channel (default: True)
|
|
149
|
+
subtrahend (float, optional): Value to subtract
|
|
150
|
+
divisor (float, optional): Value to divide by
|
|
151
|
+
"""
|
|
152
|
+
|
|
153
|
+
order = 0
|
|
154
|
+
|
|
155
|
+
def __init__(self, nonzero: bool = True, channel_wise: bool = True,
|
|
156
|
+
subtrahend: float = None, divisor: float = None):
|
|
157
|
+
self.nonzero = nonzero
|
|
158
|
+
self.channel_wise = channel_wise
|
|
159
|
+
self.subtrahend = subtrahend
|
|
160
|
+
self.divisor = divisor
|
|
161
|
+
|
|
162
|
+
self.transform = MonaiNormalizeIntensity(
|
|
163
|
+
nonzero=nonzero,
|
|
164
|
+
channel_wise=False, # Always 'False', we handle channel-wise manually
|
|
165
|
+
subtrahend=subtrahend,
|
|
166
|
+
divisor=divisor
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
def encodes(self, o: MedImage):
|
|
170
|
+
if self.channel_wise:
|
|
171
|
+
result = torch.stack([self.transform(c[None])[0] for c in o])
|
|
172
|
+
else:
|
|
173
|
+
result = torch.Tensor(self.transform(o))
|
|
174
|
+
|
|
175
|
+
return MedImage.create(result)
|
|
176
|
+
|
|
177
|
+
def encodes(self, o: MedMask):
|
|
178
|
+
return o
|
|
179
|
+
|
|
180
|
+
# %% ../nbs/03_vision_augment.ipynb 12
|
|
97
181
|
class BraTSMaskConverter(DisplayedTransform):
|
|
98
182
|
'''Convert BraTS masks.'''
|
|
99
183
|
|
|
@@ -105,7 +189,7 @@ class BraTSMaskConverter(DisplayedTransform):
|
|
|
105
189
|
o = torch.where(o==4, 3., o)
|
|
106
190
|
return MedMask.create(o)
|
|
107
191
|
|
|
108
|
-
# %% ../nbs/03_vision_augment.ipynb
|
|
192
|
+
# %% ../nbs/03_vision_augment.ipynb 13
|
|
109
193
|
class BinaryConverter(DisplayedTransform):
|
|
110
194
|
'''Convert to binary mask.'''
|
|
111
195
|
|
|
@@ -118,7 +202,7 @@ class BinaryConverter(DisplayedTransform):
|
|
|
118
202
|
o = torch.where(o>0, 1., 0)
|
|
119
203
|
return MedMask.create(o)
|
|
120
204
|
|
|
121
|
-
# %% ../nbs/03_vision_augment.ipynb
|
|
205
|
+
# %% ../nbs/03_vision_augment.ipynb 14
|
|
122
206
|
class RandomGhosting(DisplayedTransform):
|
|
123
207
|
"""Apply TorchIO `RandomGhosting`."""
|
|
124
208
|
|
|
@@ -133,7 +217,7 @@ class RandomGhosting(DisplayedTransform):
|
|
|
133
217
|
def encodes(self, o: MedMask):
|
|
134
218
|
return o
|
|
135
219
|
|
|
136
|
-
# %% ../nbs/03_vision_augment.ipynb
|
|
220
|
+
# %% ../nbs/03_vision_augment.ipynb 15
|
|
137
221
|
class RandomSpike(DisplayedTransform):
|
|
138
222
|
'''Apply TorchIO `RandomSpike`.'''
|
|
139
223
|
|
|
@@ -148,7 +232,7 @@ class RandomSpike(DisplayedTransform):
|
|
|
148
232
|
def encodes(self, o:MedMask):
|
|
149
233
|
return o
|
|
150
234
|
|
|
151
|
-
# %% ../nbs/03_vision_augment.ipynb
|
|
235
|
+
# %% ../nbs/03_vision_augment.ipynb 16
|
|
152
236
|
class RandomNoise(DisplayedTransform):
|
|
153
237
|
'''Apply TorchIO `RandomNoise`.'''
|
|
154
238
|
|
|
@@ -163,7 +247,7 @@ class RandomNoise(DisplayedTransform):
|
|
|
163
247
|
def encodes(self, o: MedMask):
|
|
164
248
|
return o
|
|
165
249
|
|
|
166
|
-
# %% ../nbs/03_vision_augment.ipynb
|
|
250
|
+
# %% ../nbs/03_vision_augment.ipynb 17
|
|
167
251
|
class RandomBiasField(DisplayedTransform):
|
|
168
252
|
'''Apply TorchIO `RandomBiasField`.'''
|
|
169
253
|
|
|
@@ -178,7 +262,7 @@ class RandomBiasField(DisplayedTransform):
|
|
|
178
262
|
def encodes(self, o: MedMask):
|
|
179
263
|
return o
|
|
180
264
|
|
|
181
|
-
# %% ../nbs/03_vision_augment.ipynb
|
|
265
|
+
# %% ../nbs/03_vision_augment.ipynb 18
|
|
182
266
|
class RandomBlur(DisplayedTransform):
|
|
183
267
|
'''Apply TorchIO `RandomBiasField`.'''
|
|
184
268
|
|
|
@@ -193,7 +277,7 @@ class RandomBlur(DisplayedTransform):
|
|
|
193
277
|
def encodes(self, o: MedMask):
|
|
194
278
|
return o
|
|
195
279
|
|
|
196
|
-
# %% ../nbs/03_vision_augment.ipynb
|
|
280
|
+
# %% ../nbs/03_vision_augment.ipynb 19
|
|
197
281
|
class RandomGamma(DisplayedTransform):
|
|
198
282
|
'''Apply TorchIO `RandomGamma`.'''
|
|
199
283
|
|
|
@@ -209,7 +293,7 @@ class RandomGamma(DisplayedTransform):
|
|
|
209
293
|
def encodes(self, o: MedMask):
|
|
210
294
|
return o
|
|
211
295
|
|
|
212
|
-
# %% ../nbs/03_vision_augment.ipynb
|
|
296
|
+
# %% ../nbs/03_vision_augment.ipynb 20
|
|
213
297
|
class RandomMotion(DisplayedTransform):
|
|
214
298
|
"""Apply TorchIO `RandomMotion`."""
|
|
215
299
|
|
|
@@ -237,7 +321,7 @@ class RandomMotion(DisplayedTransform):
|
|
|
237
321
|
def encodes(self, o: MedMask):
|
|
238
322
|
return o
|
|
239
323
|
|
|
240
|
-
# %% ../nbs/03_vision_augment.ipynb
|
|
324
|
+
# %% ../nbs/03_vision_augment.ipynb 22
|
|
241
325
|
class RandomElasticDeformation(CustomDictTransform):
|
|
242
326
|
"""Apply TorchIO `RandomElasticDeformation`."""
|
|
243
327
|
|
|
@@ -250,7 +334,7 @@ class RandomElasticDeformation(CustomDictTransform):
|
|
|
250
334
|
image_interpolation=image_interpolation,
|
|
251
335
|
p=p))
|
|
252
336
|
|
|
253
|
-
# %% ../nbs/03_vision_augment.ipynb
|
|
337
|
+
# %% ../nbs/03_vision_augment.ipynb 23
|
|
254
338
|
class RandomAffine(CustomDictTransform):
|
|
255
339
|
"""Apply TorchIO `RandomAffine`."""
|
|
256
340
|
|
|
@@ -266,14 +350,14 @@ class RandomAffine(CustomDictTransform):
|
|
|
266
350
|
default_pad_value=default_pad_value,
|
|
267
351
|
p=p))
|
|
268
352
|
|
|
269
|
-
# %% ../nbs/03_vision_augment.ipynb
|
|
353
|
+
# %% ../nbs/03_vision_augment.ipynb 24
|
|
270
354
|
class RandomFlip(CustomDictTransform):
|
|
271
355
|
"""Apply TorchIO `RandomFlip`."""
|
|
272
356
|
|
|
273
357
|
def __init__(self, axes='LR', p=0.5):
|
|
274
358
|
super().__init__(tio.RandomFlip(axes=axes, flip_probability=p))
|
|
275
359
|
|
|
276
|
-
# %% ../nbs/03_vision_augment.ipynb
|
|
360
|
+
# %% ../nbs/03_vision_augment.ipynb 25
|
|
277
361
|
class OneOf(CustomDictTransform):
|
|
278
362
|
"""Apply only one of the given transforms using TorchIO `OneOf`."""
|
|
279
363
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: fastMONAI
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.2
|
|
4
4
|
Summary: fastMONAI library
|
|
5
5
|
Home-page: https://github.com/MMIV-ML/fastMONAI
|
|
6
6
|
Author: Satheshkumar Kaliyugarasan
|
|
@@ -53,7 +53,7 @@ Dynamic: summary
|
|
|
53
53
|
|
|
54
54
|
<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
|
|
55
55
|
|
|
56
|
-

|
|
57
57
|
|
|
58
58
|

|
|
59
59
|
[](https://fastmonai.no)
|
|
@@ -120,15 +120,15 @@ https://fastmonai.no for more information.
|
|
|
120
120
|
|
|
121
121
|
| Notebook | 1-Click Notebook |
|
|
122
122
|
|:---|----|
|
|
123
|
-
| [10a_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/
|
|
124
|
-
| [10b_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/
|
|
125
|
-
| [10c_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/
|
|
126
|
-
| [10d_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/
|
|
123
|
+
| [10a_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10a_tutorial_classification.ipynb) <br>shows how to construct a binary classification model based on MRI data. | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10a_tutorial_classification.ipynb) |
|
|
124
|
+
| [10b_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10b_tutorial_regression.ipynb) <br>shows how to construct a model to predict the age of a subject from MRI scans (“brain age”). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10b_tutorial_regression.ipynb) |
|
|
125
|
+
| [10c_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10c_tutorial_binary_segmentation.ipynb) <br>shows how to do binary segmentation (extract the left atrium from monomodal cardiac MRI). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10c_tutorial_binary_segmentation.ipynb) |
|
|
126
|
+
| [10d_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10d_tutorial_multiclass_segmentation.ipynb) <br>shows how to perform segmentation from multimodal MRI (brain tumor segmentation). | [](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10d_tutorial_multiclass_segmentation.ipynb) |
|
|
127
127
|
|
|
128
128
|
# How to contribute
|
|
129
129
|
|
|
130
130
|
We welcome contributions! See
|
|
131
|
-
[CONTRIBUTING.md](https://github.com/MMIV-ML/fastMONAI/blob/
|
|
131
|
+
[CONTRIBUTING.md](https://github.com/MMIV-ML/fastMONAI/blob/main/CONTRIBUTING.md)
|
|
132
132
|
|
|
133
133
|
# Citing fastMONAI
|
|
134
134
|
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name="fastMONAI"
|
|
7
|
-
requires-python=">=3.
|
|
7
|
+
requires-python=">=3.10"
|
|
8
8
|
dynamic = [ "keywords", "description", "version", "dependencies", "optional-dependencies", "readme", "license", "authors", "classifiers", "entry-points", "scripts", "urls"]
|
|
9
9
|
|
|
10
10
|
[tool.uv]
|
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
### Python Library ###
|
|
6
6
|
lib_name = fastMONAI
|
|
7
7
|
min_python = 3.10
|
|
8
|
-
version = 0.5.
|
|
8
|
+
version = 0.5.2
|
|
9
9
|
### OPTIONAL ###
|
|
10
10
|
|
|
11
11
|
requirements = fastai==2.8.3 monai==1.5.0 torchio==0.20.19 xlrd>=1.2.0 scikit-image==0.25.2 imagedata==3.8.4 mlflow==3.3.1 huggingface-hub gdown gradio opencv-python plum-dispatch
|
|
@@ -20,7 +20,7 @@ tst_flags = notest
|
|
|
20
20
|
### Documentation ###
|
|
21
21
|
host = github
|
|
22
22
|
repo = fastMONAI
|
|
23
|
-
branch =
|
|
23
|
+
branch = main
|
|
24
24
|
custom_sidebar = True
|
|
25
25
|
jupyter_hooks = True
|
|
26
26
|
clean_ids = False
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.5.0.0"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|