fastMONAI 0.5.0.0__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fastMONAI/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.5.0.0"
1
+ __version__ = "0.5.1"
fastMONAI/_modidx.py CHANGED
@@ -47,7 +47,20 @@ d = { 'settings': { 'branch': 'master',
47
47
  'fastMONAI/external_data.py')},
48
48
  'fastMONAI.research_utils': { 'fastMONAI.research_utils.pred_postprocess': ( 'research_utils.html#pred_postprocess',
49
49
  'fastMONAI/research_utils.py')},
50
- 'fastMONAI.utils': { 'fastMONAI.utils.ModelTrackingCallback': ('utils.html#modeltrackingcallback', 'fastMONAI/utils.py'),
50
+ 'fastMONAI.utils': { 'fastMONAI.utils.MLflowUIManager': ('utils.html#mlflowuimanager', 'fastMONAI/utils.py'),
51
+ 'fastMONAI.utils.MLflowUIManager.__init__': ('utils.html#mlflowuimanager.__init__', 'fastMONAI/utils.py'),
52
+ 'fastMONAI.utils.MLflowUIManager.check_mlflow_installed': ( 'utils.html#mlflowuimanager.check_mlflow_installed',
53
+ 'fastMONAI/utils.py'),
54
+ 'fastMONAI.utils.MLflowUIManager.find_available_port': ( 'utils.html#mlflowuimanager.find_available_port',
55
+ 'fastMONAI/utils.py'),
56
+ 'fastMONAI.utils.MLflowUIManager.is_mlflow_running': ( 'utils.html#mlflowuimanager.is_mlflow_running',
57
+ 'fastMONAI/utils.py'),
58
+ 'fastMONAI.utils.MLflowUIManager.is_port_available': ( 'utils.html#mlflowuimanager.is_port_available',
59
+ 'fastMONAI/utils.py'),
60
+ 'fastMONAI.utils.MLflowUIManager.start_ui': ('utils.html#mlflowuimanager.start_ui', 'fastMONAI/utils.py'),
61
+ 'fastMONAI.utils.MLflowUIManager.status': ('utils.html#mlflowuimanager.status', 'fastMONAI/utils.py'),
62
+ 'fastMONAI.utils.MLflowUIManager.stop': ('utils.html#mlflowuimanager.stop', 'fastMONAI/utils.py'),
63
+ 'fastMONAI.utils.ModelTrackingCallback': ('utils.html#modeltrackingcallback', 'fastMONAI/utils.py'),
51
64
  'fastMONAI.utils.ModelTrackingCallback.__init__': ( 'utils.html#modeltrackingcallback.__init__',
52
65
  'fastMONAI/utils.py'),
53
66
  'fastMONAI.utils.ModelTrackingCallback._build_config': ( 'utils.html#modeltrackingcallback._build_config',
fastMONAI/utils.py CHANGED
@@ -1,7 +1,7 @@
1
1
  # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/07_utils.ipynb.
2
2
 
3
3
  # %% auto 0
4
- __all__ = ['store_variables', 'load_variables', 'print_colab_gpu_info', 'ModelTrackingCallback']
4
+ __all__ = ['store_variables', 'load_variables', 'print_colab_gpu_info', 'ModelTrackingCallback', 'MLflowUIManager']
5
5
 
6
6
  # %% ../nbs/07_utils.ipynb 1
7
7
  import pickle
@@ -13,6 +13,7 @@ import os
13
13
  import tempfile
14
14
  import json
15
15
  from fastai.callback.core import Callback
16
+ from fastcore.foundation import L
16
17
  from typing import Any
17
18
 
18
19
  # %% ../nbs/07_utils.ipynb 3
@@ -165,6 +166,8 @@ class ModelTrackingCallback(Callback):
165
166
 
166
167
  # Process each metric, handling both scalars and tensors
167
168
  for name, val in zip(metric_names, raw_metric_values):
169
+ if val is None:
170
+ continue # Skip None values during inference
168
171
  if isinstance(val, torch.Tensor):
169
172
  if val.numel() == 1:
170
173
  # Single value tensor (like binary dice score)
@@ -182,8 +185,9 @@ class ModelTrackingCallback(Callback):
182
185
 
183
186
  # Handle loss values
184
187
  if len(recorder.log) >= 2:
185
- metrics['train_loss'] = float(recorder.log[1])
186
- if len(recorder.log) >= 3:
188
+ if recorder.log[1] is not None:
189
+ metrics['train_loss'] = float(recorder.log[1])
190
+ if len(recorder.log) >= 3 and recorder.log[2] is not None:
187
191
  metrics['valid_loss'] = float(recorder.log[2])
188
192
 
189
193
  return metrics
@@ -197,10 +201,22 @@ class ModelTrackingCallback(Callback):
197
201
  if os.path.exists(weights_file):
198
202
  mlflow.log_artifact(weights_file, "model")
199
203
 
204
+ # Remove MLflow callbacks before exporting learner for inference
205
+ # This prevents the callback from being triggered during inference
206
+ original_cbs = self.learn.cbs.copy() # Save original callbacks
207
+
208
+ # Remove ModelTrackingCallback instances from learner using proper collection type
209
+ filtered_cbs = L([cb for cb in self.learn.cbs if not isinstance(cb, ModelTrackingCallback)])
210
+ self.learn.cbs = filtered_cbs
211
+
212
+ # Export clean learner without MLflow callbacks
200
213
  learner_path = temp_dir / "learner.pkl"
201
214
  self.learn.export(str(learner_path))
202
215
  mlflow.log_artifact(str(learner_path), "model")
203
216
 
217
+ # Restore original callbacks for current session
218
+ self.learn.cbs = original_cbs
219
+
204
220
  config_path = temp_dir / "inference_settings.pkl"
205
221
  store_variables(config_path, self.size, self.reorder, self.resample)
206
222
  mlflow.log_artifact(str(config_path), "config")
@@ -235,3 +251,161 @@ class ModelTrackingCallback(Callback):
235
251
  self._register_pytorch_model()
236
252
 
237
253
  print(f"MLflow run completed. Run ID: {mlflow.active_run().info.run_id}")
254
+
255
+ # %% ../nbs/07_utils.ipynb 7
256
+ import subprocess
257
+ import threading
258
+ import time
259
+ import socket
260
+ import os
261
+ from IPython.display import display, HTML, clear_output
262
+ from IPython.core.magic import register_line_magic
263
+ from IPython import get_ipython
264
+ import requests
265
+ import shutil
266
+
267
+ class MLflowUIManager:
268
+ def __init__(self):
269
+ self.process = None
270
+ self.thread = None
271
+ self.port = 5001
272
+ self.host = '0.0.0.0'
273
+ self.backend_store_uri = './mlruns'
274
+
275
+ def is_port_available(self, port):
276
+ """Check if a port is available."""
277
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
278
+ try:
279
+ s.bind(('localhost', port))
280
+ return True
281
+ except OSError:
282
+ return False
283
+
284
+ def is_mlflow_running(self):
285
+ """Check if MLflow UI is actually responding."""
286
+ try:
287
+ response = requests.get(f'http://localhost:{self.port}', timeout=2)
288
+ return response.status_code == 200
289
+ except:
290
+ return False
291
+
292
+ def find_available_port(self, start_port=5001):
293
+ """Find an available port starting from start_port."""
294
+ for port in range(start_port, start_port + 10):
295
+ if self.is_port_available(port):
296
+ return port
297
+ return None
298
+
299
+ def check_mlflow_installed(self):
300
+ """Check if MLflow is installed."""
301
+ return shutil.which('mlflow') is not None
302
+
303
+ def start_ui(self, auto_open=True, quiet=False):
304
+ """Start MLflow UI with better error handling and user feedback."""
305
+
306
+ # Check if MLflow is installed
307
+ if not self.check_mlflow_installed():
308
+ if not quiet:
309
+ display(HTML('<div style="color: #d32f2f; font-weight: bold; font-size: 14px;">❌ MLflow not installed. Run: pip install mlflow</div>'))
310
+ return False
311
+
312
+ # Find available port
313
+ available_port = self.find_available_port(self.port)
314
+ if available_port is None:
315
+ if not quiet:
316
+ display(HTML('<div style="color: #d32f2f; font-weight: bold; font-size: 14px;">❌ No available ports found (5001-5010)</div>'))
317
+ return False
318
+
319
+ self.port = available_port
320
+
321
+ # Start MLflow UI in a separate thread
322
+ def run_mlflow():
323
+ try:
324
+ self.process = subprocess.Popen([
325
+ 'mlflow', 'ui',
326
+ '--host', self.host,
327
+ '--port', str(self.port),
328
+ '--backend-store-uri', self.backend_store_uri
329
+ ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
330
+ self.process.wait()
331
+ except Exception as e:
332
+ if not quiet:
333
+ display(HTML(f'<div style="color: #d32f2f; font-weight: bold; font-size: 14px;">❌ Error: {str(e)}</div>'))
334
+
335
+ self.thread = threading.Thread(target=run_mlflow, daemon=True)
336
+ self.thread.start()
337
+
338
+ # Wait and check if server started successfully
339
+ max_wait = 10
340
+ for i in range(max_wait):
341
+ time.sleep(1)
342
+ if self.is_mlflow_running():
343
+ if quiet:
344
+ # Bright, visible link for quiet mode
345
+ display(HTML(f'''
346
+ <a href="http://localhost:{self.port}" target="_blank"
347
+ style="color: #1976d2; font-weight: bold; font-size: 16px; text-decoration: underline;">
348
+ 🔗 MLflow UI (Port {self.port})
349
+ </a>
350
+ '''))
351
+ else:
352
+ # Success message with high contrast colors
353
+ display(HTML(f'''
354
+ <div style="background-color: #c8e6c9; border: 2px solid #388e3c; padding: 15px; border-radius: 8px; margin: 10px 0;">
355
+ <div style="color: #1b5e20; font-weight: bold; font-size: 16px; margin-bottom: 10px;">
356
+ ✅ MLflow UI is running successfully!
357
+ </div>
358
+ <a href="http://localhost:{self.port}" target="_blank"
359
+ style="background-color: #1976d2; color: white; padding: 12px 24px; text-decoration: none; border-radius: 6px; font-weight: bold; font-size: 14px; display: inline-block; margin: 5px 0;">
360
+ 🔗 Open MLflow UI
361
+ </a>
362
+ <div style="margin-top: 10px;">
363
+ <div style="color: #424242; font-size: 13px;">URL: http://localhost:{self.port}</div>
364
+ </div>
365
+ </div>
366
+ '''))
367
+ return True
368
+
369
+ # If we get here, server didn't start properly
370
+ if not quiet:
371
+ display(HTML('<div style="color: #d32f2f; font-weight: bold; font-size: 14px;">❌ Failed to start MLflow UI</div>'))
372
+ return False
373
+
374
+ def stop(self):
375
+ """Stop the MLflow UI server."""
376
+ if self.process:
377
+ self.process.terminate()
378
+ self.process = None
379
+ display(HTML('''
380
+ <div style="background-color: #ffecb3; border: 2px solid #f57c00; padding: 10px; border-radius: 6px;">
381
+ <span style="color: #e65100; font-weight: bold; font-size: 14px;">🛑 MLflow UI stopped</span>
382
+ </div>
383
+ '''))
384
+ else:
385
+ display(HTML('''
386
+ <div style="background-color: #f0f0f0; border: 2px solid #757575; padding: 10px; border-radius: 6px;">
387
+ <span style="color: #424242; font-weight: bold; font-size: 14px;">ℹ️ MLflow UI is not currently running</span>
388
+ </div>
389
+ '''))
390
+
391
+ def status(self):
392
+ """Check MLflow UI status."""
393
+ if self.is_mlflow_running():
394
+ display(HTML(f'''
395
+ <div style="background-color: #c8e6c9; border: 2px solid #388e3c; padding: 10px; border-radius: 6px;">
396
+ <div style="color: #1b5e20; font-weight: bold; font-size: 14px;">✅ MLflow UI is running</div>
397
+ <a href="http://localhost:{self.port}" target="_blank"
398
+ style="color: #1976d2; font-weight: bold; text-decoration: underline;">
399
+ http://localhost:{self.port}
400
+ </a>
401
+ </div>
402
+ '''))
403
+ else:
404
+ display(HTML('''
405
+ <div style="background-color: #ffcdd2; border: 2px solid #d32f2f; padding: 10px; border-radius: 6px;">
406
+ <div style="color: #b71c1c; font-weight: bold; font-size: 14px;">❌ MLflow UI is not running</div>
407
+ <div style="color: #424242; font-size: 13px; margin-top: 5px;">
408
+ Run <code style="background-color: #f5f5f5; padding: 2px 4px; border-radius: 3px;">mlflow_ui.start_ui()</code> to start it.
409
+ </div>
410
+ </div>
411
+ '''))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fastMONAI
3
- Version: 0.5.0.0
3
+ Version: 0.5.1
4
4
  Summary: fastMONAI library
5
5
  Home-page: https://github.com/MMIV-ML/fastMONAI
6
6
  Author: Satheshkumar Kaliyugarasan
@@ -1,9 +1,9 @@
1
- fastMONAI/__init__.py,sha256=T19ge-mjTQc59d5RqcDcri4stPYuQEaLR3cFKwPhDfI,24
2
- fastMONAI/_modidx.py,sha256=sVdaZs4ZogSisNkxAN1n96jaayoJqsrODsf9xodrL14,36080
1
+ fastMONAI/__init__.py,sha256=eZ1bOun1DDVV0YLOBW4wj2FP1ajReLjbIrGmzN7ASBw,22
2
+ fastMONAI/_modidx.py,sha256=iuYWVoaM3bmpb_Dv6lMQU8zOVPA76hYfnyFxVgKyuBg,37759
3
3
  fastMONAI/dataset_info.py,sha256=aJ-utYZ1OrA32RIQbF7jHxcDE8SgOZE3Vt1AojxnvZc,5026
4
4
  fastMONAI/external_data.py,sha256=IVj9GbIRFh9bTFkIa2wySUObSnNfZiaVtuzFxOFAi0Q,12219
5
5
  fastMONAI/research_utils.py,sha256=LZu62g8BQAVYS4dD7qDsKHJXZnDd1uLkJ6LoaMDhUhk,590
6
- fastMONAI/utils.py,sha256=Gr5IGb3v-tfpdFotoIaSAECPXDZS3ECOBSdvQx5vb-A,8647
6
+ fastMONAI/utils.py,sha256=jG8SiYebcrPJsmnmMZh4SokWRj7McdJ_gftINnfcE1A,16590
7
7
  fastMONAI/vision_all.py,sha256=_l6F8ZlUaPYcplNG6mg1-1xssYforByEe4zECbPzTck,359
8
8
  fastMONAI/vision_augmentation.py,sha256=lAlrLm8jbXRmk9a6e8_o_CNTS6Pyp-KKNXwjpelUUJc,9070
9
9
  fastMONAI/vision_core.py,sha256=k4RUBzZuh9W8J4zbcVzXCKfJxkKCsBDG0oSRMwiCNp0,13848
@@ -12,9 +12,9 @@ fastMONAI/vision_inference.py,sha256=3SaJbKGbgaf9ON9PH5DtvfNlhAurov_Idnrlp4jyU9w
12
12
  fastMONAI/vision_loss.py,sha256=NrHnk1yD4EBKsp6aippppXU4l-mwmsZOqE_bsZP3ZNI,3591
13
13
  fastMONAI/vision_metrics.py,sha256=CVxdOBPaMJT6Mo5jF3WoQj6a3C-_FsnBicMAU_ZrFS8,3549
14
14
  fastMONAI/vision_plot.py,sha256=-X_nNBXx7lYCZSFBIN1587ZTA3T_-2ASBM4K31wU660,3792
15
- fastmonai-0.5.0.0.dist-info/licenses/LICENSE,sha256=xV8xoN4VOL0uw9X8RSs2IMuD_Ss_a9yAbtGNeBWZwnw,11337
16
- fastmonai-0.5.0.0.dist-info/METADATA,sha256=eO5N4RAweItcte4WhnzMkrfn4d-Vbbs19hZjqxU_TZI,7096
17
- fastmonai-0.5.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
18
- fastmonai-0.5.0.0.dist-info/entry_points.txt,sha256=mVBsykSXMairzzk3hJaQ8c-UiwUZqGnn4aFZ24CpsBM,40
19
- fastmonai-0.5.0.0.dist-info/top_level.txt,sha256=o8y7SWF9odtnIT3jvYtUn9okbJRlaAMCy7oPFCeQvQ8,10
20
- fastmonai-0.5.0.0.dist-info/RECORD,,
15
+ fastmonai-0.5.1.dist-info/licenses/LICENSE,sha256=xV8xoN4VOL0uw9X8RSs2IMuD_Ss_a9yAbtGNeBWZwnw,11337
16
+ fastmonai-0.5.1.dist-info/METADATA,sha256=VZcJOlNQR7g3mJZkMnnoukPPJZ0yt_B5T9r5yPdCLBk,7094
17
+ fastmonai-0.5.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
18
+ fastmonai-0.5.1.dist-info/entry_points.txt,sha256=mVBsykSXMairzzk3hJaQ8c-UiwUZqGnn4aFZ24CpsBM,40
19
+ fastmonai-0.5.1.dist-info/top_level.txt,sha256=o8y7SWF9odtnIT3jvYtUn9okbJRlaAMCy7oPFCeQvQ8,10
20
+ fastmonai-0.5.1.dist-info/RECORD,,