fastMONAI 0.5.3__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fastMONAI/vision_plot.py CHANGED
@@ -1,9 +1,10 @@
1
1
  # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/00_vision_plot.ipynb.
2
2
 
3
3
  # %% auto 0
4
- __all__ = ['validate_anatomical_plane', 'show_med_img', 'find_max_slice']
4
+ __all__ = ['validate_anatomical_plane', 'show_med_img', 'find_max_slice', 'show_segmentation_comparison']
5
5
 
6
6
  # %% ../nbs/00_vision_plot.ipynb 1
7
+ import warnings
7
8
  from fastai.data.all import *
8
9
  from torchio.visualization import rotate
9
10
 
@@ -114,3 +115,91 @@ def find_max_slice(mask_data, anatomical_plane):
114
115
  idx = np.argmax(sums)
115
116
 
116
117
  return idx
118
+
119
+ # %% ../nbs/00_vision_plot.ipynb 7
120
+ def show_segmentation_comparison(
121
+ image, ground_truth, prediction,
122
+ slice_index: int = None,
123
+ anatomical_plane: int = 2,
124
+ metric_value: float = None,
125
+ metric_name: str = 'DSC',
126
+ channel: int = 0,
127
+ figsize: tuple = (15, 5),
128
+ cmap_img: str = 'gray',
129
+ cmap_mask: str = 'gray',
130
+ voxel_size = None
131
+ ):
132
+ """Display 3-panel comparison: Image | Ground Truth | Prediction.
133
+
134
+ Useful for validating segmentation results, especially after patch-based
135
+ inference where results are not in standard fastai batch format.
136
+
137
+ Args:
138
+ image: Input image (MedImage, MedMask, or tensor [C, H, W, D])
139
+ ground_truth: Ground truth mask (MedMask or tensor [C, H, W, D])
140
+ prediction: Predicted mask (tensor [C, H, W, D])
141
+ slice_index: Slice to display. If None, uses find_max_slice on ground_truth.
142
+ anatomical_plane: 0=sagittal, 1=coronal, 2=axial (default)
143
+ metric_value: Optional metric value to display in prediction title
144
+ metric_name: Name of metric for title (default 'DSC')
145
+ channel: Channel to display for multi-channel data (default 0)
146
+ figsize: Figure size (default (15, 5))
147
+ cmap_img: Colormap for image (default 'gray')
148
+ cmap_mask: Colormap for masks (default 'gray')
149
+ voxel_size: Voxel spacing for aspect ratio. If None, aspect=1.
150
+
151
+ Example::
152
+
153
+ # After patch_inference()
154
+ show_segmentation_comparison(
155
+ image=val_img,
156
+ ground_truth=val_gt,
157
+ prediction=predictions[0],
158
+ metric_value=results_df.iloc[0]['DSC'],
159
+ anatomical_plane=2
160
+ )
161
+ """
162
+ validate_anatomical_plane(anatomical_plane)
163
+
164
+ # Ensure we have tensor data
165
+ img_data = image.data if hasattr(image, 'data') else image
166
+ gt_data = ground_truth.data if hasattr(ground_truth, 'data') else ground_truth
167
+ pred_data = prediction.data if hasattr(prediction, 'data') else prediction
168
+
169
+ # Move to CPU if needed
170
+ if hasattr(img_data, 'cpu'): img_data = img_data.cpu()
171
+ if hasattr(gt_data, 'cpu'): gt_data = gt_data.cpu()
172
+ if hasattr(pred_data, 'cpu'): pred_data = pred_data.cpu()
173
+
174
+ # Find optimal slice if not provided
175
+ if slice_index is None:
176
+ gt_np = gt_data[channel].numpy()
177
+ slice_index = find_max_slice(gt_np, anatomical_plane)
178
+
179
+ # Create figure
180
+ fig, axes = plt.subplots(1, 3, figsize=figsize)
181
+
182
+ # Get slices with proper aspect ratio using existing _get_slice helper
183
+ img_slice, img_aspect = _get_slice(img_data, channel, slice_index, anatomical_plane, voxel_size)
184
+ gt_slice, gt_aspect = _get_slice(gt_data, channel, slice_index, anatomical_plane, voxel_size)
185
+ pred_slice, pred_aspect = _get_slice(pred_data, channel, slice_index, anatomical_plane, voxel_size)
186
+
187
+ # Plot panels
188
+ axes[0].imshow(img_slice, cmap=cmap_img, aspect=img_aspect)
189
+ axes[0].set_title('Input Image')
190
+ axes[0].axis('off')
191
+
192
+ axes[1].imshow(gt_slice, cmap=cmap_mask, aspect=gt_aspect)
193
+ axes[1].set_title('Ground Truth')
194
+ axes[1].axis('off')
195
+
196
+ # Build prediction title
197
+ pred_title = 'Prediction'
198
+ if metric_value is not None:
199
+ pred_title = f'Prediction ({metric_name}: {metric_value:.4f})'
200
+
201
+ axes[2].imshow(pred_slice, cmap=cmap_mask, aspect=pred_aspect)
202
+ axes[2].set_title(pred_title)
203
+ axes[2].axis('off')
204
+
205
+ plt.tight_layout()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fastMONAI
3
- Version: 0.5.3
3
+ Version: 0.6.0
4
4
  Summary: fastMONAI library
5
5
  Home-page: https://github.com/MMIV-ML/fastMONAI
6
6
  Author: Satheshkumar Kaliyugarasan
@@ -120,10 +120,10 @@ https://fastmonai.no for more information.
120
120
 
121
121
  | Notebook | 1-Click Notebook |
122
122
  |:---|----|
123
- | [10a_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10a_tutorial_classification.ipynb) <br>shows how to construct a binary classification model based on MRI data. | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10a_tutorial_classification.ipynb) |
124
- | [10b_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10b_tutorial_regression.ipynb) <br>shows how to construct a model to predict the age of a subject from MRI scans (“brain age”). | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10b_tutorial_regression.ipynb) |
125
- | [10c_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10c_tutorial_binary_segmentation.ipynb) <br>shows how to do binary segmentation (extract the left atrium from monomodal cardiac MRI). | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10c_tutorial_binary_segmentation.ipynb) |
126
- | [10d_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/10d_tutorial_multiclass_segmentation.ipynb) <br>shows how to perform segmentation from multimodal MRI (brain tumor segmentation). | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/10d_tutorial_multiclass_segmentation.ipynb) |
123
+ | [11b_tutorial_classification.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/11b_tutorial_classification.ipynb) <br>shows how to construct a binary classification model based on MRI data. | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/11b_tutorial_classification.ipynb) |
124
+ | [11c_tutorial_regression.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/11c_tutorial_regression.ipynb) <br>shows how to construct a model to predict the age of a subject from MRI scans (“brain age”). | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/11c_tutorial_regression.ipynb) |
125
+ | [11d_tutorial_binary_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/11d_tutorial_binary_segmentation.ipynb) <br>shows how to do binary segmentation (extract the left atrium from monomodal cardiac MRI). | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/11d_tutorial_binary_segmentation.ipynb) |
126
+ | [11e_tutorial_multiclass_segmentation.ipynb](https://nbviewer.org/github/MMIV-ML/fastMONAI/blob/main/nbs/11e_tutorial_multiclass_segmentation.ipynb) <br>shows how to perform segmentation from multimodal MRI (brain tumor segmentation). | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMIV-ML/fastMONAI/blob/main/nbs/11e_tutorial_multiclass_segmentation.ipynb) |
127
127
 
128
128
  # How to contribute
129
129
 
@@ -0,0 +1,21 @@
1
+ fastMONAI/__init__.py,sha256=cID1jLnC_vj48GgMN6Yb1FA3JsQ95zNmCHmRYE8TFhY,22
2
+ fastMONAI/_modidx.py,sha256=9e2FCrV3l4HYPJ-qcHUVqI_Q6ps6UVxEEdD3OBCSwhw,66249
3
+ fastMONAI/dataset_info.py,sha256=hLNX0Jps8dPCbF6HYPO2PScZtn2qqAQBH07LtSubeFM,16306
4
+ fastMONAI/external_data.py,sha256=Ofa6RmSKYj8LKlzWqPGfg9lu9lTjBmolJTo1zVgTe6g,12263
5
+ fastMONAI/research_utils.py,sha256=LZu62g8BQAVYS4dD7qDsKHJXZnDd1uLkJ6LoaMDhUhk,590
6
+ fastMONAI/utils.py,sha256=8avga5wIGK4IlJ04SrN_Ba5STkBSuEA5G06EvdDHNBs,30493
7
+ fastMONAI/vision_all.py,sha256=L2JVYQq77X7Ko2SmzKUGAXwVjw898V_skGuQuYxjVd8,385
8
+ fastMONAI/vision_augmentation.py,sha256=nhFA-usts59AIYdDtzX8QJ1HrGY3bcEKu2koP3HZews,20604
9
+ fastMONAI/vision_core.py,sha256=xXGnutYD3sdWzpoMZHSfClUFmphsB7yFsDcY91Fa844,9539
10
+ fastMONAI/vision_data.py,sha256=_SgwSlNm4ZOooFnrp5vYnA7ZAweV60a3XaOnozDKm6w,11569
11
+ fastMONAI/vision_inference.py,sha256=fRPgIO-3XifBlCZY7qaucgymSLQQajcKvmZKWg0XfS4,7688
12
+ fastMONAI/vision_loss.py,sha256=NrHnk1yD4EBKsp6aippppXU4l-mwmsZOqE_bsZP3ZNI,3591
13
+ fastMONAI/vision_metrics.py,sha256=OED2ewWaZUtmkqc5CjmBWDboTVUlpsnsuFU-ZrIw4tI,18572
14
+ fastMONAI/vision_patch.py,sha256=SkEdOc_U2nrpjIVZ3wW4qwgciKnOCuzQXixRGyK3KaA,50118
15
+ fastMONAI/vision_plot.py,sha256=7HLkKQAwmwPvwrDxWlKP5IIHNNwWIrLkdsejqlOc27k,7282
16
+ fastmonai-0.6.0.dist-info/licenses/LICENSE,sha256=xV8xoN4VOL0uw9X8RSs2IMuD_Ss_a9yAbtGNeBWZwnw,11337
17
+ fastmonai-0.6.0.dist-info/METADATA,sha256=vdTSIppLb0xQtNgaAtKQfyTDKH7qqVd2sZFsv1cKgoI,7075
18
+ fastmonai-0.6.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
19
+ fastmonai-0.6.0.dist-info/entry_points.txt,sha256=mVBsykSXMairzzk3hJaQ8c-UiwUZqGnn4aFZ24CpsBM,40
20
+ fastmonai-0.6.0.dist-info/top_level.txt,sha256=o8y7SWF9odtnIT3jvYtUn9okbJRlaAMCy7oPFCeQvQ8,10
21
+ fastmonai-0.6.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,20 +0,0 @@
1
- fastMONAI/__init__.py,sha256=tgzuqHKcEdKBaP57F5oXxq4XlW2n9J4Fj8ZGu7nGOZg,22
2
- fastMONAI/_modidx.py,sha256=pV36J9-oryTjfV9gL-lXJqoCH9SCQFGsa_NIRdkUlYI,39527
3
- fastMONAI/dataset_info.py,sha256=aJ-utYZ1OrA32RIQbF7jHxcDE8SgOZE3Vt1AojxnvZc,5026
4
- fastMONAI/external_data.py,sha256=IVj9GbIRFh9bTFkIa2wySUObSnNfZiaVtuzFxOFAi0Q,12219
5
- fastMONAI/research_utils.py,sha256=LZu62g8BQAVYS4dD7qDsKHJXZnDd1uLkJ6LoaMDhUhk,590
6
- fastMONAI/utils.py,sha256=jG8SiYebcrPJsmnmMZh4SokWRj7McdJ_gftINnfcE1A,16590
7
- fastMONAI/vision_all.py,sha256=_l6F8ZlUaPYcplNG6mg1-1xssYforByEe4zECbPzTck,359
8
- fastMONAI/vision_augmentation.py,sha256=-4LsLuPi55bh0KduB6EPTzudG63japebszdtN5FtFC0,12281
9
- fastMONAI/vision_core.py,sha256=k4RUBzZuh9W8J4zbcVzXCKfJxkKCsBDG0oSRMwiCNp0,13848
10
- fastMONAI/vision_data.py,sha256=VCB3hyBN7dYuLiYGSGeuWlBTMvb2cLVo_sbENrRWe5Q,11510
11
- fastMONAI/vision_inference.py,sha256=3SaJbKGbgaf9ON9PH5DtvfNlhAurov_Idnrlp4jyU9w,6625
12
- fastMONAI/vision_loss.py,sha256=NrHnk1yD4EBKsp6aippppXU4l-mwmsZOqE_bsZP3ZNI,3591
13
- fastMONAI/vision_metrics.py,sha256=CVxdOBPaMJT6Mo5jF3WoQj6a3C-_FsnBicMAU_ZrFS8,3549
14
- fastMONAI/vision_plot.py,sha256=-X_nNBXx7lYCZSFBIN1587ZTA3T_-2ASBM4K31wU660,3792
15
- fastmonai-0.5.3.dist-info/licenses/LICENSE,sha256=xV8xoN4VOL0uw9X8RSs2IMuD_Ss_a9yAbtGNeBWZwnw,11337
16
- fastmonai-0.5.3.dist-info/METADATA,sha256=S_ocpvWghuz7Jau4C-2YKCtDaC7pI8qG3hvCtQvkybk,7075
17
- fastmonai-0.5.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
18
- fastmonai-0.5.3.dist-info/entry_points.txt,sha256=mVBsykSXMairzzk3hJaQ8c-UiwUZqGnn4aFZ24CpsBM,40
19
- fastmonai-0.5.3.dist-info/top_level.txt,sha256=o8y7SWF9odtnIT3jvYtUn9okbJRlaAMCy7oPFCeQvQ8,10
20
- fastmonai-0.5.3.dist-info/RECORD,,