biomedisa 24.8.5__tar.gz → 24.8.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. {biomedisa-24.8.5 → biomedisa-24.8.7}/PKG-INFO +12 -10
  2. {biomedisa-24.8.5 → biomedisa-24.8.7}/README.md +9 -7
  3. {biomedisa-24.8.5 → biomedisa-24.8.7}/pyproject.toml +2 -2
  4. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/deeplearning.py +12 -4
  5. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/DataGenerator.py +20 -23
  6. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/DataGeneratorCrop.py +36 -8
  7. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/crop_helper.py +32 -26
  8. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/keras_helper.py +334 -205
  9. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa.egg-info/PKG-INFO +12 -10
  10. {biomedisa-24.8.5 → biomedisa-24.8.7}/LICENSE +0 -0
  11. {biomedisa-24.8.5 → biomedisa-24.8.7}/setup.cfg +0 -0
  12. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/__init__.py +0 -0
  13. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/__main__.py +0 -0
  14. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/PredictDataGenerator.py +0 -0
  15. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/PredictDataGeneratorCrop.py +0 -0
  16. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/__init__.py +0 -0
  17. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/active_contour.py +0 -0
  18. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/amira_to_np/__init__.py +0 -0
  19. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/amira_to_np/amira_data_stream.py +0 -0
  20. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/amira_to_np/amira_grammar.py +0 -0
  21. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/amira_to_np/amira_header.py +0 -0
  22. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/amira_to_np/amira_helper.py +0 -0
  23. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/assd.py +0 -0
  24. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/biomedisa_helper.py +0 -0
  25. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/create_slices.py +0 -0
  26. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/curvop_numba.py +0 -0
  27. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/django_env.py +0 -0
  28. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/nc_reader.py +0 -0
  29. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/pid.py +0 -0
  30. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/process_image.py +0 -0
  31. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/pycuda_test.py +0 -0
  32. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/random_walk/__init__.py +0 -0
  33. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/random_walk/gpu_kernels.py +0 -0
  34. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/random_walk/pycuda_large.py +0 -0
  35. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/random_walk/pycuda_large_allx.py +0 -0
  36. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/random_walk/pycuda_small.py +0 -0
  37. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/random_walk/pycuda_small_allx.py +0 -0
  38. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/random_walk/pyopencl_large.py +0 -0
  39. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/random_walk/pyopencl_small.py +0 -0
  40. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/random_walk/rw_large.py +0 -0
  41. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/random_walk/rw_small.py +0 -0
  42. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/remove_outlier.py +0 -0
  43. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/features/split_volume.py +0 -0
  44. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/interpolation.py +0 -0
  45. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa/mesh.py +0 -0
  46. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa.egg-info/SOURCES.txt +0 -0
  47. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa.egg-info/dependency_links.txt +0 -0
  48. {biomedisa-24.8.5 → biomedisa-24.8.7}/src/biomedisa.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: biomedisa
3
- Version: 24.8.5
3
+ Version: 24.8.7
4
4
  Summary: Segmentation of 3D volumetric image data
5
5
  Author: Philipp Lösel
6
6
  Author-email: philipp.loesel@anu.edu.au
@@ -10,7 +10,7 @@ Project-URL: GitHub, https://github.com/biomedisa/biomedisa
10
10
  Classifier: Programming Language :: Python :: 3
11
11
  Classifier: License :: OSI Approved :: European Union Public Licence 1.2 (EUPL 1.2)
12
12
  Classifier: Operating System :: OS Independent
13
- Requires-Python: >=3.8
13
+ Requires-Python: >=3.10
14
14
  Description-Content-Type: text/markdown
15
15
  License-File: LICENSE
16
16
 
@@ -19,6 +19,7 @@ License-File: LICENSE
19
19
  - [Overview](#overview)
20
20
  - [Hardware Requirements](#hardware-requirements)
21
21
  - [Installation (command-line based)](#installation-command-line-based)
22
+ - [Installation (3D Slicer extension)](#installation-3d-slicer-extension)
22
23
  - [Installation (browser based)](#installation-browser-based)
23
24
  - [Download Data](#download-data)
24
25
  - [Revisions](#revisions)
@@ -33,20 +34,21 @@ License-File: LICENSE
33
34
  - [License](#license)
34
35
 
35
36
  ## Overview
36
- Biomedisa (https://biomedisa.info) is a free and easy-to-use open-source application for segmenting large 3D volumetric images such as CT and MRI scans, developed at [The Australian National University CTLab](https://ctlab.anu.edu.au/). Biomedisa's smart interpolation of sparsely pre-segmented slices enables accurate semi-automated segmentation by considering the complete underlying image data. Additionally, Biomedisa enables deep learning for fully automated segmentation across similar samples and structures. It is compatible with segmentation tools like Amira/Avizo, ImageJ/Fiji and 3D Slicer. If you are using Biomedisa or the data for your research please cite: Lösel, P.D. et al. [Introducing Biomedisa as an open-source online platform for biomedical image segmentation.](https://www.nature.com/articles/s41467-020-19303-w) *Nat. Commun.* **11**, 5577 (2020).
37
+ Biomedisa (https://biomedisa.info) is a free and easy-to-use open-source application for segmenting large 3D volumetric images such as CT and MRI scans, developed at [The Australian National University CTLab](https://ctlab.anu.edu.au/). Biomedisa's smart interpolation of sparsely pre-segmented slices enables accurate semi-automated segmentation by considering the complete underlying image data. Additionally, Biomedisa enables deep learning for fully automated segmentation across similar samples and structures. It is compatible with segmentation tools like Amira/Avizo, ImageJ/Fiji, and 3D Slicer. If you are using Biomedisa or the data for your research please cite: Lösel, P.D. et al. [Introducing Biomedisa as an open-source online platform for biomedical image segmentation.](https://www.nature.com/articles/s41467-020-19303-w) *Nat. Commun.* **11**, 5577 (2020).
37
38
 
38
39
  ## Hardware Requirements
39
- + One or more NVIDIA GPUs with compute capability 3.0 or higher.
40
+ + One or more NVIDIA GPUs.
40
41
 
41
42
  ## Installation (command-line based)
42
- + [Ubuntu 22.04 + Smart Interpolation](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_interpolation_cli.md)
43
- + [Ubuntu 22.04 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8_gpu_cli.md)
44
- + [Windows 10 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_cli.md)
43
+ + [Ubuntu 22/24 + Smart Interpolation](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu_interpolation_cli.md)
44
+ + [Ubuntu 22/24 + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu_deeplearning_cli.md)
45
+ + [Ubuntu 22/24 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu_cli.md)
46
+ + [Windows 10/11 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_cli.md)
45
47
  + [Windows (WSL) + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/windows_wsl.md)
46
48
 
47
49
  ## Installation (3D Slicer extension)
48
50
  + [Ubuntu 22.04 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8_gpu_slicer.md)
49
- + [Windows 10 + Smart Interpolation](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_slicer.md)
51
+ + [Windows 10/11 + Smart Interpolation](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_slicer.md)
50
52
 
51
53
  ## Installation (browser based)
52
54
  + [Ubuntu 22.04](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8.md)
@@ -165,7 +167,7 @@ save_data('final.Head5.am', results['regular'], results['header'])
165
167
 
166
168
  #### Command-line based (prediction)
167
169
  ```
168
- python -m biomedisa.deeplearning C:\Users\%USERNAME%\Downloads\testing_axial_crop_pat13.nii.gz C:\Users\%USERNAME%\Downloads\heart.h5 -p
170
+ python -m biomedisa.deeplearning C:\Users\%USERNAME%\Downloads\testing_axial_crop_pat13.nii.gz C:\Users\%USERNAME%\Downloads\heart.h5
169
171
  ```
170
172
 
171
173
  ## Mesh Generator
@@ -3,6 +3,7 @@
3
3
  - [Overview](#overview)
4
4
  - [Hardware Requirements](#hardware-requirements)
5
5
  - [Installation (command-line based)](#installation-command-line-based)
6
+ - [Installation (3D Slicer extension)](#installation-3d-slicer-extension)
6
7
  - [Installation (browser based)](#installation-browser-based)
7
8
  - [Download Data](#download-data)
8
9
  - [Revisions](#revisions)
@@ -17,20 +18,21 @@
17
18
  - [License](#license)
18
19
 
19
20
  ## Overview
20
- Biomedisa (https://biomedisa.info) is a free and easy-to-use open-source application for segmenting large 3D volumetric images such as CT and MRI scans, developed at [The Australian National University CTLab](https://ctlab.anu.edu.au/). Biomedisa's smart interpolation of sparsely pre-segmented slices enables accurate semi-automated segmentation by considering the complete underlying image data. Additionally, Biomedisa enables deep learning for fully automated segmentation across similar samples and structures. It is compatible with segmentation tools like Amira/Avizo, ImageJ/Fiji and 3D Slicer. If you are using Biomedisa or the data for your research please cite: Lösel, P.D. et al. [Introducing Biomedisa as an open-source online platform for biomedical image segmentation.](https://www.nature.com/articles/s41467-020-19303-w) *Nat. Commun.* **11**, 5577 (2020).
21
+ Biomedisa (https://biomedisa.info) is a free and easy-to-use open-source application for segmenting large 3D volumetric images such as CT and MRI scans, developed at [The Australian National University CTLab](https://ctlab.anu.edu.au/). Biomedisa's smart interpolation of sparsely pre-segmented slices enables accurate semi-automated segmentation by considering the complete underlying image data. Additionally, Biomedisa enables deep learning for fully automated segmentation across similar samples and structures. It is compatible with segmentation tools like Amira/Avizo, ImageJ/Fiji, and 3D Slicer. If you are using Biomedisa or the data for your research please cite: Lösel, P.D. et al. [Introducing Biomedisa as an open-source online platform for biomedical image segmentation.](https://www.nature.com/articles/s41467-020-19303-w) *Nat. Commun.* **11**, 5577 (2020).
21
22
 
22
23
  ## Hardware Requirements
23
- + One or more NVIDIA GPUs with compute capability 3.0 or higher.
24
+ + One or more NVIDIA GPUs.
24
25
 
25
26
  ## Installation (command-line based)
26
- + [Ubuntu 22.04 + Smart Interpolation](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_interpolation_cli.md)
27
- + [Ubuntu 22.04 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8_gpu_cli.md)
28
- + [Windows 10 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_cli.md)
27
+ + [Ubuntu 22/24 + Smart Interpolation](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu_interpolation_cli.md)
28
+ + [Ubuntu 22/24 + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu_deeplearning_cli.md)
29
+ + [Ubuntu 22/24 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu_cli.md)
30
+ + [Windows 10/11 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_cli.md)
29
31
  + [Windows (WSL) + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/windows_wsl.md)
30
32
 
31
33
  ## Installation (3D Slicer extension)
32
34
  + [Ubuntu 22.04 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8_gpu_slicer.md)
33
- + [Windows 10 + Smart Interpolation](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_slicer.md)
35
+ + [Windows 10/11 + Smart Interpolation](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_slicer.md)
34
36
 
35
37
  ## Installation (browser based)
36
38
  + [Ubuntu 22.04](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8.md)
@@ -149,7 +151,7 @@ save_data('final.Head5.am', results['regular'], results['header'])
149
151
 
150
152
  #### Command-line based (prediction)
151
153
  ```
152
- python -m biomedisa.deeplearning C:\Users\%USERNAME%\Downloads\testing_axial_crop_pat13.nii.gz C:\Users\%USERNAME%\Downloads\heart.h5 -p
154
+ python -m biomedisa.deeplearning C:\Users\%USERNAME%\Downloads\testing_axial_crop_pat13.nii.gz C:\Users\%USERNAME%\Downloads\heart.h5
153
155
  ```
154
156
 
155
157
  ## Mesh Generator
@@ -4,13 +4,13 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "biomedisa"
7
- version = "24.8.5"
7
+ version = "24.8.7"
8
8
  authors = [
9
9
  { name="Philipp Lösel"}, {email="philipp.loesel@anu.edu.au" },
10
10
  ]
11
11
  description = "Segmentation of 3D volumetric image data"
12
12
  readme = "README.md"
13
- requires-python = ">=3.8"
13
+ requires-python = ">=3.10"
14
14
  classifiers = [
15
15
  "Programming Language :: Python :: 3",
16
16
  "License :: OSI Approved :: European Union Public Licence 1.2 (EUPL 1.2)",
@@ -71,7 +71,7 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
71
71
  learning_rate=0.01, stride_size=32, validation_stride_size=32, validation_freq=1,
72
72
  batch_size=None, x_scale=256, y_scale=256, z_scale=256, scaling=True, early_stopping=0,
73
73
  pretrained_model=None, fine_tune=False, workers=1, cropping_epochs=50,
74
- x_range=None, y_range=None, z_range=None, header=None, extension='.tif',
74
+ x_range=None, y_range=None, z_range=None, header=None, extension=None,
75
75
  img_header=None, img_extension='.tif', average_dice=False, django_env=False,
76
76
  path=None, success=True, return_probs=False, patch_normalization=False,
77
77
  z_patch=64, y_patch=64, x_patch=64, path_to_logfile=None, img_id=None, label_id=None,
@@ -228,9 +228,11 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
228
228
  bm.scaling = bool(meta['scaling'][()])
229
229
 
230
230
  # check if amira header is available in the network
231
- if bm.header is None and meta.get('header') is not None:
231
+ if bm.extension is None and bm.header is None and meta.get('header') is not None:
232
232
  bm.header = [np.array(meta.get('header'))]
233
233
  bm.extension = '.am'
234
+ if bm.extension is None:
235
+ bm.extension = '.tif'
234
236
 
235
237
  # crop data
236
238
  crop_data = True if 'cropping_weights' in hf else False
@@ -301,6 +303,12 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
301
303
  results, bm = predict_segmentation(bm, region_of_interest,
302
304
  channels, normalization_parameters)
303
305
 
306
+ from mpi4py import MPI
307
+ comm = MPI.COMM_WORLD
308
+ rank = comm.Get_rank()
309
+ if rank>0:
310
+ return 0
311
+
304
312
  # results
305
313
  if cropped_volume is not None:
306
314
  results['cropped_volume'] = cropped_volume
@@ -488,8 +496,8 @@ if __name__ == '__main__':
488
496
  help='Location of mask')
489
497
  parser.add_argument('-rf','--refinement', action='store_true', default=False,
490
498
  help='Refine segmentation on full size data')
491
- parser.add_argument('-ext','--extension', type=str, default='.tif',
492
- help='Save data for example as NRRD file using --extension=".nrrd"')
499
+ parser.add_argument('-ext','--extension', type=str, default=None,
500
+ help='Save data in formats like NRRD or TIFF using --extension=".nrrd"')
493
501
  bm = parser.parse_args()
494
502
  bm.success = True
495
503
 
@@ -129,39 +129,25 @@ class DataGenerator(tf.keras.utils.Sequence):
129
129
  def __len__(self):
130
130
  'Denotes the number of batches per epoch'
131
131
  if len(self.list_IDs_bg) > 0:
132
- len_IDs = 2 * max(len(self.list_IDs_fg), len(self.list_IDs_bg))
132
+ len_IDs = max(len(self.list_IDs_fg), len(self.list_IDs_bg))
133
+ n_batches = len_IDs // (self.batch_size // 2)
133
134
  else:
134
135
  len_IDs = len(self.list_IDs_fg)
135
- n_batches = int(np.floor(len_IDs / self.batch_size))
136
+ n_batches = len_IDs // self.batch_size
136
137
  return n_batches
137
138
 
138
139
  def __getitem__(self, index):
139
140
  'Generate one batch of data'
140
-
141
141
  if len(self.list_IDs_bg) > 0:
142
-
143
- # len IDs
144
- len_IDs = max(len(self.list_IDs_fg), len(self.list_IDs_bg))
145
-
146
- # upsample lists of indexes to the same size
147
- repetitions = int(np.floor(len_IDs / len(self.list_IDs_fg))) + 1
148
- upsampled_indexes_fg = np.tile(self.indexes_fg, repetitions)
149
- upsampled_indexes_fg = upsampled_indexes_fg[:len_IDs]
150
-
151
- repetitions = int(np.floor(len_IDs / len(self.list_IDs_bg))) + 1
152
- upsampled_indexes_bg = np.tile(self.indexes_bg, repetitions)
153
- upsampled_indexes_bg = upsampled_indexes_bg[:len_IDs]
154
-
155
142
  # Generate indexes of the batch
156
- tmp_batch_size = int(self.batch_size / 2)
157
- indexes_fg = upsampled_indexes_fg[index*tmp_batch_size:(index+1)*tmp_batch_size]
158
- indexes_bg = upsampled_indexes_bg[index*tmp_batch_size:(index+1)*tmp_batch_size]
143
+ half_batch_size = self.batch_size // 2
144
+ indexes_fg = self.indexes_fg[index*half_batch_size:(index+1)*half_batch_size]
145
+ indexes_bg = self.indexes_bg[index*half_batch_size:(index+1)*half_batch_size]
159
146
 
160
147
  # Find list of IDs
161
148
  list_IDs_temp = [self.list_IDs_fg[k] for k in indexes_fg] + [self.list_IDs_bg[k] for k in indexes_bg]
162
149
 
163
150
  else:
164
-
165
151
  # Generate indexes of the batch
166
152
  indexes_fg = self.indexes_fg[index*self.batch_size:(index+1)*self.batch_size]
167
153
 
@@ -175,11 +161,22 @@ class DataGenerator(tf.keras.utils.Sequence):
175
161
 
176
162
  def on_epoch_end(self):
177
163
  'Updates indexes after each epoch'
178
- self.indexes_fg = np.arange(len(self.list_IDs_fg))
179
- self.indexes_bg = np.arange(len(self.list_IDs_bg))
164
+ if len(self.list_IDs_bg) > 0:
165
+ # upsample lists of indexes
166
+ indexes_fg = np.arange(len(self.list_IDs_fg))
167
+ indexes_bg = np.arange(len(self.list_IDs_bg))
168
+ len_IDs = max(len(self.list_IDs_fg), len(self.list_IDs_bg))
169
+ repetitions = len_IDs // len(self.list_IDs_fg) + 1
170
+ self.indexes_fg = np.tile(indexes_fg, repetitions)
171
+ repetitions = len_IDs // len(self.list_IDs_bg) + 1
172
+ self.indexes_bg = np.tile(indexes_bg, repetitions)
173
+ else:
174
+ self.indexes_fg = np.arange(len(self.list_IDs_fg))
175
+ # shuffle indexes
180
176
  if self.shuffle == True:
181
177
  np.random.shuffle(self.indexes_fg)
182
- np.random.shuffle(self.indexes_bg)
178
+ if len(self.list_IDs_bg) > 0:
179
+ np.random.shuffle(self.indexes_bg)
183
180
 
184
181
  def __data_generation(self, list_IDs_temp):
185
182
  'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
@@ -28,22 +28,25 @@
28
28
 
29
29
  import numpy as np
30
30
  import tensorflow as tf
31
- from scipy.ndimage import gaussian_filter, map_coordinates
31
+ from scipy.ndimage import gaussian_filter, map_coordinates, rotate
32
+ import random
32
33
 
33
34
  def elastic_transform(image, alpha=100, sigma=20):
34
- zsh, ysh, xsh = image.shape
35
+ ysh, xsh, csh = image.shape
35
36
  dx = gaussian_filter((np.random.rand(ysh, xsh) * 2 - 1) * alpha, sigma)
36
37
  dy = gaussian_filter((np.random.rand(ysh, xsh) * 2 - 1) * alpha, sigma)
37
38
  y, x = np.meshgrid(np.arange(ysh), np.arange(xsh), indexing='ij')
38
39
  indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
39
- for k in range(zsh):
40
- image[k] = map_coordinates(image[k], indices, order=1, mode='reflect').reshape(ysh, xsh)
40
+ for k in range(csh):
41
+ image[:,:,k] = map_coordinates(image[:,:,k], indices, order=0, mode='reflect').reshape(ysh, xsh)
41
42
  return image
42
43
 
43
44
  class DataGeneratorCrop(tf.keras.utils.Sequence):
44
45
  'Generates data for Keras'
45
- def __init__(self, img, label, list_IDs_fg, list_IDs_bg, batch_size=32, dim=(32,32,32),
46
- n_channels=3, n_classes=2, shuffle=True):
46
+ def __init__(self, img, label, list_IDs_fg, list_IDs_bg, batch_size=32,
47
+ dim=(32,32,32), n_channels=3, n_classes=2, shuffle=True,
48
+ augment=(False,False,False,0), train=True):
49
+
47
50
  'Initialization'
48
51
  self.dim = dim
49
52
  self.list_IDs_fg = list_IDs_fg
@@ -54,6 +57,8 @@ class DataGeneratorCrop(tf.keras.utils.Sequence):
54
57
  self.n_channels = n_channels
55
58
  self.n_classes = n_classes
56
59
  self.shuffle = shuffle
60
+ self.augment = augment
61
+ self.train = train
57
62
  self.on_epoch_end()
58
63
 
59
64
  def __len__(self):
@@ -108,14 +113,37 @@ class DataGeneratorCrop(tf.keras.utils.Sequence):
108
113
  def __data_generation(self, list_IDs_temp):
109
114
  'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
110
115
 
116
+ # get augmentation parameter
117
+ flip_x, flip_y, flip_z, rotation = self.augment
118
+ elastic = False
119
+
111
120
  # Initialization
112
121
  X = np.empty((self.batch_size, *self.dim, self.n_channels), dtype=np.uint8)
113
122
  y = np.empty((self.batch_size,), dtype=np.int32)
114
123
 
115
124
  # Generate data
116
125
  for i, ID in enumerate(list_IDs_temp):
117
- X[i,...] = self.img[ID,...]
118
- y[i] = self.label[ID]
126
+ tmp_X = self.img[ID,...].copy()
127
+
128
+ # augmentation
129
+ if self.train and (any(self.augment) or elastic):
130
+ if flip_x and np.random.randint(2) and abs(self.label[ID])!=3:
131
+ tmp_X = np.flip(tmp_X, 1)
132
+ if flip_y and np.random.randint(2):
133
+ if abs(self.label[ID])==1:
134
+ tmp_X = np.flip(tmp_X, 0)
135
+ elif abs(self.label[ID])==3:
136
+ tmp_X = np.flip(tmp_X, 1)
137
+ if flip_z and np.random.randint(2) and abs(self.label[ID])!=1:
138
+ tmp_X = np.flip(tmp_X, 0)
139
+ if rotation:
140
+ angle = random.uniform(-rotation, rotation)
141
+ tmp_X = rotate(tmp_X, angle, order=0, mode='reflect', reshape=False)
142
+ if elastic:
143
+ tmp_X = elastic_transform(tmp_X)
144
+
145
+ X[i,...] = tmp_X
146
+ y[i] = 0 if self.label[ID] < 0 else 1
119
147
 
120
148
  return X, y
121
149
 
@@ -44,6 +44,7 @@ import h5py
44
44
  import tarfile
45
45
  import matplotlib.pyplot as plt
46
46
  import tempfile
47
+ import copy
47
48
 
48
49
  class InputError(Exception):
49
50
  def __init__(self, message=None):
@@ -117,9 +118,12 @@ def load_cropping_training_data(normalize, img_list, label_list, x_scale, y_scal
117
118
  a = label_in
118
119
  a = a.astype(np.uint8)
119
120
  a = set_labels_to_zero(a, labels_to_compute, labels_to_remove)
120
- label_z = np.any(a,axis=(1,2))
121
- label_y = np.any(a,axis=(0,2))
122
- label_x = np.any(a,axis=(0,1))
121
+ label_z = np.any(a,axis=(1,2)).astype(np.int8) * 1
122
+ label_y = np.any(a,axis=(0,2)).astype(np.int8) * 2
123
+ label_x = np.any(a,axis=(0,1)).astype(np.int8) * 3
124
+ label_z[label_z==0] = -1
125
+ label_y[label_y==0] = -2
126
+ label_x[label_x==0] = -3
123
127
  label = np.append(label_z,label_y,axis=0)
124
128
  label = np.append(label,label_x,axis=0)
125
129
 
@@ -178,12 +182,15 @@ def load_cropping_training_data(normalize, img_list, label_list, x_scale, y_scal
178
182
  a = label_in[k]
179
183
  a = a.astype(np.uint8)
180
184
  a = set_labels_to_zero(a, labels_to_compute, labels_to_remove)
181
- next_label_z = np.any(a,axis=(1,2))
182
- next_label_y = np.any(a,axis=(0,2))
183
- next_label_x = np.any(a,axis=(0,1))
184
- label = np.append(label,next_label_z,axis=0)
185
- label = np.append(label,next_label_y,axis=0)
186
- label = np.append(label,next_label_x,axis=0)
185
+ label_z = np.any(a,axis=(1,2)).astype(np.int8) * 1
186
+ label_y = np.any(a,axis=(0,2)).astype(np.int8) * 2
187
+ label_x = np.any(a,axis=(0,1)).astype(np.int8) * 3
188
+ label_z[label_z==0] = -1
189
+ label_y[label_y==0] = -2
190
+ label_x[label_x==0] = -3
191
+ label = np.append(label,label_z,axis=0)
192
+ label = np.append(label,label_y,axis=0)
193
+ label = np.append(label,label_x,axis=0)
187
194
 
188
195
  # append image
189
196
  if any(img_list):
@@ -228,20 +235,20 @@ def load_cropping_training_data(normalize, img_list, label_list, x_scale, y_scal
228
235
  return img_rgb, label, normalization_parameters, channels
229
236
 
230
237
  def train_cropping(img, label, path_to_model, epochs, batch_size,
231
- validation_split, flip_x, flip_y, flip_z, rotate,
232
- img_val, label_val):
238
+ validation_split, flip_x, flip_y, flip_z, rotate,
239
+ img_val, label_val):
233
240
 
234
241
  # img shape
235
242
  zsh, ysh, xsh, channels = img.shape
236
243
 
237
244
  # list of IDs
238
- list_IDs_fg = list(np.where(label)[0])
239
- list_IDs_bg = list(np.where(label==False)[0])
245
+ list_IDs_fg = list(np.where(label>0)[0])
246
+ list_IDs_bg = list(np.where(label<0)[0])
240
247
 
241
248
  # validation data
242
249
  if np.any(img_val):
243
- list_IDs_val_fg = list(np.where(label_val)[0])
244
- list_IDs_val_bg = list(np.where(label_val==False)[0])
250
+ list_IDs_val_fg = list(np.where(label_val>0)[0])
251
+ list_IDs_val_bg = list(np.where(label_val<0)[0])
245
252
  elif validation_split:
246
253
  split_fg = int(len(list_IDs_fg) * validation_split)
247
254
  split_bg = int(len(list_IDs_bg) * validation_split)
@@ -287,14 +294,13 @@ def train_cropping(img, label, path_to_model, epochs, batch_size,
287
294
  'batch_size': batch_size,
288
295
  'n_classes': 2,
289
296
  'n_channels': channels,
290
- 'shuffle': True}
297
+ 'shuffle': True,
298
+ 'augment': (flip_x, flip_y, flip_z, rotate),
299
+ 'train': True}
291
300
 
292
301
  # validation parameters
293
- params_val = {'dim': (ysh, xsh),
294
- 'batch_size': batch_size,
295
- 'n_classes': 2,
296
- 'n_channels': channels,
297
- 'shuffle': False}
302
+ params_val = copy.deepcopy(params)
303
+ params_val['train'] = False
298
304
 
299
305
  # data generator
300
306
  training_generator = DataGeneratorCrop(img, label, list_IDs_fg, list_IDs_bg, **params)
@@ -497,7 +503,7 @@ def crop_volume(img, path_to_model, path_to_final, z_shape, y_shape, x_shape, ba
497
503
  # main functions
498
504
  #=====================
499
505
 
500
- def load_and_train(bm, x_scale=256, y_scale=256, z_scale=256):
506
+ def load_and_train(bm, x_scale=256, y_scale=256, z_scale=256, batch_size=24):
501
507
 
502
508
  # load training data
503
509
  img, label, normalization_parameters, channels = load_cropping_training_data(bm.normalize,
@@ -512,8 +518,8 @@ def load_and_train(bm, x_scale=256, y_scale=256, z_scale=256):
512
518
  bm.only, bm.ignore, bm.val_img_data, bm.val_label_data, normalization_parameters, channels)
513
519
 
514
520
  # train cropping
515
- train_cropping(img, label, bm.path_to_model, bm.cropping_epochs,
516
- bm.batch_size, bm.validation_split,
521
+ train_cropping(img, label, bm.path_to_model,
522
+ bm.cropping_epochs, batch_size, bm.validation_split,
517
523
  bm.flip_x, bm.flip_y, bm.flip_z, bm.rotate,
518
524
  img_val, label_val)
519
525
 
@@ -531,7 +537,7 @@ def load_and_train(bm, x_scale=256, y_scale=256, z_scale=256):
531
537
 
532
538
  return cropping_weights, cropping_config, normalization_parameters
533
539
 
534
- def crop_data(bm):
540
+ def crop_data(bm, batch_size=32):
535
541
 
536
542
  # get meta data
537
543
  hf = h5py.File(bm.path_to_model, 'r')
@@ -554,7 +560,7 @@ def crop_data(bm):
554
560
 
555
561
  # make prediction
556
562
  z_lower, z_upper, y_lower, y_upper, x_lower, x_upper, cropped_volume = crop_volume(img, bm.path_to_model,
557
- bm.path_to_cropped_image, z_shape, y_shape, x_shape, bm.batch_size, bm.debug_cropping, bm.save_cropped, bm.img_data)
563
+ bm.path_to_cropped_image, z_shape, y_shape, x_shape, batch_size, bm.debug_cropping, bm.save_cropped, bm.img_data)
558
564
 
559
565
  # region of interest
560
566
  region_of_interest = np.array([z_lower, z_upper, y_lower, y_upper, x_lower, x_upper])