biomedisa 24.5.23__tar.gz → 24.7.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. {biomedisa-24.5.23 → biomedisa-24.7.1}/PKG-INFO +12 -7
  2. {biomedisa-24.5.23 → biomedisa-24.7.1}/README.md +11 -6
  3. {biomedisa-24.5.23 → biomedisa-24.7.1}/pyproject.toml +1 -1
  4. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/deeplearning.py +35 -32
  5. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/DataGenerator.py +1 -1
  6. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/active_contour.py +3 -10
  7. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/biomedisa_helper.py +17 -16
  8. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/create_slices.py +4 -3
  9. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/keras_helper.py +280 -112
  10. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/remove_outlier.py +3 -9
  11. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/interpolation.py +8 -15
  12. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/mesh.py +12 -11
  13. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa.egg-info/PKG-INFO +12 -7
  14. {biomedisa-24.5.23 → biomedisa-24.7.1}/LICENSE +0 -0
  15. {biomedisa-24.5.23 → biomedisa-24.7.1}/setup.cfg +0 -0
  16. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/__init__.py +0 -0
  17. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/__main__.py +0 -0
  18. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/DataGeneratorCrop.py +0 -0
  19. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/PredictDataGenerator.py +0 -0
  20. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/PredictDataGeneratorCrop.py +0 -0
  21. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/__init__.py +0 -0
  22. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/amira_to_np/__init__.py +0 -0
  23. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/amira_to_np/amira_data_stream.py +0 -0
  24. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/amira_to_np/amira_grammar.py +0 -0
  25. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/amira_to_np/amira_header.py +0 -0
  26. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/amira_to_np/amira_helper.py +0 -0
  27. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/assd.py +0 -0
  28. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/crop_helper.py +0 -0
  29. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/curvop_numba.py +0 -0
  30. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/django_env.py +0 -0
  31. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/nc_reader.py +0 -0
  32. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/pid.py +0 -0
  33. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/process_image.py +0 -0
  34. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/pycuda_test.py +0 -0
  35. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/random_walk/__init__.py +0 -0
  36. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/random_walk/gpu_kernels.py +0 -0
  37. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/random_walk/pycuda_large.py +0 -0
  38. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/random_walk/pycuda_large_allx.py +0 -0
  39. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/random_walk/pycuda_small.py +0 -0
  40. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/random_walk/pycuda_small_allx.py +0 -0
  41. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/random_walk/pyopencl_large.py +0 -0
  42. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/random_walk/pyopencl_small.py +0 -0
  43. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/random_walk/rw_large.py +0 -0
  44. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/random_walk/rw_small.py +0 -0
  45. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa/features/split_volume.py +0 -0
  46. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa.egg-info/SOURCES.txt +0 -0
  47. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa.egg-info/dependency_links.txt +0 -0
  48. {biomedisa-24.5.23 → biomedisa-24.7.1}/src/biomedisa.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: biomedisa
3
- Version: 24.5.23
3
+ Version: 24.7.1
4
4
  Summary: Segmentation of 3D volumetric image data
5
5
  Author: Philipp Lösel
6
6
  Author-email: philipp.loesel@anu.edu.au
@@ -33,18 +33,21 @@ License-File: LICENSE
33
33
  - [License](#license)
34
34
 
35
35
  ## Overview
36
- Biomedisa (https://biomedisa.info) is a free and easy-to-use open-source application for segmenting large volumetric images, e.g. CT and MRI scans, developed at [The Australian National University CTLab](https://ctlab.anu.edu.au/). Biomedisa's semi-automated segmentation is based on a smart interpolation of sparsely pre-segmented slices, taking into account the complete underlying image data. In addition, Biomedisa enables deep learning for the fully automated segmentation of series of similar samples. It can be used in combination with segmentation tools such as Amira/Avizo, ImageJ/Fiji and 3D Slicer. If you are using Biomedisa or the data for your research please cite: Lösel, P.D. et al. [Introducing Biomedisa as an open-source online platform for biomedical image segmentation.](https://www.nature.com/articles/s41467-020-19303-w) *Nat. Commun.* **11**, 5577 (2020).
36
+ Biomedisa (https://biomedisa.info) is a free and easy-to-use open-source application for segmenting large 3D volumetric images such as CT and MRI scans, developed at [The Australian National University CTLab](https://ctlab.anu.edu.au/). Biomedisa's smart interpolation of sparsely pre-segmented slices enables accurate semi-automated segmentation by considering the complete underlying image data. Additionally, Biomedisa enables deep learning for fully automated segmentation across similar samples and structures. It is compatible with segmentation tools like Amira/Avizo, ImageJ/Fiji and 3D Slicer. If you are using Biomedisa or the data for your research please cite: Lösel, P.D. et al. [Introducing Biomedisa as an open-source online platform for biomedical image segmentation.](https://www.nature.com/articles/s41467-020-19303-w) *Nat. Commun.* **11**, 5577 (2020).
37
37
 
38
38
  ## Hardware Requirements
39
39
  + One or more NVIDIA GPUs with compute capability 3.0 or higher or an Intel CPU
40
40
 
41
41
  ## Installation (command-line based)
42
42
  + [Ubuntu 22.04 + CUDA + GPU (recommended)](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8_gpu_cli.md)
43
- + [Ubuntu 22.04 + OpenCL + CPU (smart interpolation only and very slow)](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_opencl_cpu_cli.md)
43
+ + [Ubuntu 22.04 + OpenCL + CPU (very slow)](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_opencl_cpu_cli.md)
44
44
  + [Windows 10 + CUDA + GPU (recommended)](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_cli.md)
45
45
  + [Windows 10 + OpenCL + GPU (easy to install but lacks features like allaxis, smoothing, uncertainty, optimized GPU memory usage)](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_opencl_gpu_cli.md)
46
46
  + [Windows 10 + OpenCL + CPU (very slow)](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_opencl_cpu_cli.md)
47
47
 
48
+ ## Installation (3D Slicer extension)
49
+ + [Ubuntu 22.04 + CUDA + GPU](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8_gpu_slicer.md)
50
+
48
51
  ## Installation (browser based)
49
52
  + [Ubuntu 22.04](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8.md)
50
53
 
@@ -52,6 +55,9 @@ Biomedisa (https://biomedisa.info) is a free and easy-to-use open-source applica
52
55
  + Download test data from our [gallery](https://biomedisa.info/gallery/)
53
56
 
54
57
  ## Revisions
58
+ 24.7.1
59
+ + 3D Slicer extension
60
+ + Prediction of large data block by block
55
61
  24.5.22
56
62
  + Pip is the preferred installation method
57
63
  + Commands, module names and imports have been changed to conform to the Pip standard
@@ -131,15 +137,14 @@ deep_learning(img_data, label_data, train=True, batch_size=12,
131
137
  ```
132
138
 
133
139
  #### Command-line based (training)
134
- Start training with a batch size of 12:
135
140
  ```
136
- python -m biomedisa.deeplearning C:\Users\%USERNAME%\Downloads\training_heart C:\Users\%USERNAME%\Downloads\training_heart_labels -t -bs=12
141
+ python -m biomedisa.deeplearning C:\Users\%USERNAME%\Downloads\training_heart C:\Users\%USERNAME%\Downloads\training_heart_labels -t
137
142
  ```
138
143
  Monitor training progress using validation data:
139
144
  ```
140
145
  python -m biomedisa.deeplearning C:\Users\%USERNAME%\Downloads\training_heart C:\Users\%USERNAME%\Downloads\training_heart_labels -t -vi=C:\Users\%USERNAME%\Downloads\val_img -vl=C:\Users\%USERNAME%\Downloads\val_labels
141
146
  ```
142
- If running into ResourceExhaustedError due to out of memory (OOM), try to use a smaller batch size.
147
+ If running into ResourceExhaustedError due to out of memory (OOM), try to use a smaller batch size (e.g. -bs=12).
143
148
 
144
149
  #### Python example (prediction)
145
150
  ```python
@@ -175,7 +180,7 @@ from biomedisa.mesh import get_voxel_spacing, save_mesh
175
180
  data, header, extension = load_data('final.Head5.am', return_extension=True)
176
181
 
177
182
  # get voxel spacing
178
- x_res, y_res, z_res = get_voxel_spacing(header, data, extension)
183
+ x_res, y_res, z_res = get_voxel_spacing(header, extension)
179
184
  print(f'Voxel spacing: x_spacing, y_spacing, z_spacing = {x_res}, {y_res}, {z_res}')
180
185
 
181
186
  # save stl file
@@ -17,18 +17,21 @@
17
17
  - [License](#license)
18
18
 
19
19
  ## Overview
20
- Biomedisa (https://biomedisa.info) is a free and easy-to-use open-source application for segmenting large volumetric images, e.g. CT and MRI scans, developed at [The Australian National University CTLab](https://ctlab.anu.edu.au/). Biomedisa's semi-automated segmentation is based on a smart interpolation of sparsely pre-segmented slices, taking into account the complete underlying image data. In addition, Biomedisa enables deep learning for the fully automated segmentation of series of similar samples. It can be used in combination with segmentation tools such as Amira/Avizo, ImageJ/Fiji and 3D Slicer. If you are using Biomedisa or the data for your research please cite: Lösel, P.D. et al. [Introducing Biomedisa as an open-source online platform for biomedical image segmentation.](https://www.nature.com/articles/s41467-020-19303-w) *Nat. Commun.* **11**, 5577 (2020).
20
+ Biomedisa (https://biomedisa.info) is a free and easy-to-use open-source application for segmenting large 3D volumetric images such as CT and MRI scans, developed at [The Australian National University CTLab](https://ctlab.anu.edu.au/). Biomedisa's smart interpolation of sparsely pre-segmented slices enables accurate semi-automated segmentation by considering the complete underlying image data. Additionally, Biomedisa enables deep learning for fully automated segmentation across similar samples and structures. It is compatible with segmentation tools like Amira/Avizo, ImageJ/Fiji and 3D Slicer. If you are using Biomedisa or the data for your research please cite: Lösel, P.D. et al. [Introducing Biomedisa as an open-source online platform for biomedical image segmentation.](https://www.nature.com/articles/s41467-020-19303-w) *Nat. Commun.* **11**, 5577 (2020).
21
21
 
22
22
  ## Hardware Requirements
23
23
  + One or more NVIDIA GPUs with compute capability 3.0 or higher or an Intel CPU
24
24
 
25
25
  ## Installation (command-line based)
26
26
  + [Ubuntu 22.04 + CUDA + GPU (recommended)](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8_gpu_cli.md)
27
- + [Ubuntu 22.04 + OpenCL + CPU (smart interpolation only and very slow)](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_opencl_cpu_cli.md)
27
+ + [Ubuntu 22.04 + OpenCL + CPU (very slow)](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_opencl_cpu_cli.md)
28
28
  + [Windows 10 + CUDA + GPU (recommended)](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_cli.md)
29
29
  + [Windows 10 + OpenCL + GPU (easy to install but lacks features like allaxis, smoothing, uncertainty, optimized GPU memory usage)](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_opencl_gpu_cli.md)
30
30
  + [Windows 10 + OpenCL + CPU (very slow)](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_opencl_cpu_cli.md)
31
31
 
32
+ ## Installation (3D Slicer extension)
33
+ + [Ubuntu 22.04 + CUDA + GPU](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8_gpu_slicer.md)
34
+
32
35
  ## Installation (browser based)
33
36
  + [Ubuntu 22.04](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8.md)
34
37
 
@@ -36,6 +39,9 @@ Biomedisa (https://biomedisa.info) is a free and easy-to-use open-source applica
36
39
  + Download test data from our [gallery](https://biomedisa.info/gallery/)
37
40
 
38
41
  ## Revisions
42
+ 24.7.1
43
+ + 3D Slicer extension
44
+ + Prediction of large data block by block
39
45
  24.5.22
40
46
  + Pip is the preferred installation method
41
47
  + Commands, module names and imports have been changed to conform to the Pip standard
@@ -115,15 +121,14 @@ deep_learning(img_data, label_data, train=True, batch_size=12,
115
121
  ```
116
122
 
117
123
  #### Command-line based (training)
118
- Start training with a batch size of 12:
119
124
  ```
120
- python -m biomedisa.deeplearning C:\Users\%USERNAME%\Downloads\training_heart C:\Users\%USERNAME%\Downloads\training_heart_labels -t -bs=12
125
+ python -m biomedisa.deeplearning C:\Users\%USERNAME%\Downloads\training_heart C:\Users\%USERNAME%\Downloads\training_heart_labels -t
121
126
  ```
122
127
  Monitor training progress using validation data:
123
128
  ```
124
129
  python -m biomedisa.deeplearning C:\Users\%USERNAME%\Downloads\training_heart C:\Users\%USERNAME%\Downloads\training_heart_labels -t -vi=C:\Users\%USERNAME%\Downloads\val_img -vl=C:\Users\%USERNAME%\Downloads\val_labels
125
130
  ```
126
- If running into ResourceExhaustedError due to out of memory (OOM), try to use a smaller batch size.
131
+ If running into ResourceExhaustedError due to out of memory (OOM), try to use a smaller batch size (e.g. -bs=12).
127
132
 
128
133
  #### Python example (prediction)
129
134
  ```python
@@ -159,7 +164,7 @@ from biomedisa.mesh import get_voxel_spacing, save_mesh
159
164
  data, header, extension = load_data('final.Head5.am', return_extension=True)
160
165
 
161
166
  # get voxel spacing
162
- x_res, y_res, z_res = get_voxel_spacing(header, data, extension)
167
+ x_res, y_res, z_res = get_voxel_spacing(header, extension)
163
168
  print(f'Voxel spacing: x_spacing, y_spacing, z_spacing = {x_res}, {y_res}, {z_res}')
164
169
 
165
170
  # save stl file
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "biomedisa"
7
- version = "24.5.23"
7
+ version = "24.7.1"
8
8
  authors = [
9
9
  { name="Philipp Lösel"}, {email="philipp.loesel@anu.edu.au" },
10
10
  ]
@@ -65,11 +65,11 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
65
65
  path_to_images=None, path_to_labels=None, val_images=None, val_labels=None,
66
66
  path_to_model=None, predict=False, train=False, header_file=None,
67
67
  balance=False, crop_data=False, flip_x=False, flip_y=False, flip_z=False,
68
- swapaxes=False, train_dice=False, val_dice=True, no_compression=False, ignore='none', only='all',
68
+ swapaxes=False, train_dice=False, val_dice=True, compression=True, ignore='none', only='all',
69
69
  network_filters='32-64-128-256-512', resnet=False, debug_cropping=False,
70
- save_cropped=False, epochs=100, no_normalization=False, rotate=0.0, validation_split=0.0,
70
+ save_cropped=False, epochs=100, normalization=True, rotate=0.0, validation_split=0.0,
71
71
  learning_rate=0.01, stride_size=32, validation_stride_size=32, validation_freq=1,
72
- batch_size=None, x_scale=256, y_scale=256, z_scale=256, no_scaling=False, early_stopping=0,
72
+ batch_size=None, x_scale=256, y_scale=256, z_scale=256, scaling=True, early_stopping=0,
73
73
  pretrained_model=None, fine_tune=False, workers=1, cropping_epochs=50,
74
74
  x_range=None, y_range=None, z_range=None, header=None, extension='.tif',
75
75
  img_header=None, img_extension='.tif', average_dice=False, django_env=False,
@@ -91,17 +91,13 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
91
91
  for arg in key_copy:
92
92
  bm.__dict__[arg] = locals()[arg]
93
93
 
94
- # compression
95
- if bm.no_compression:
96
- bm.compression = False
97
- else:
98
- bm.compression = True
99
-
100
94
  # normalization
101
- if bm.no_normalization:
95
+ bm.normalize = 1 if bm.normalization else 0
96
+
97
+ # use patch normalization instead of normalizing the entire volume
98
+ if not bm.scaling:
102
99
  bm.normalize = 0
103
- else:
104
- bm.normalize = 1
100
+ bm.patch_normalization = True
105
101
 
106
102
  # django environment
107
103
  if bm.django_env:
@@ -217,14 +213,19 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
217
213
  hf = h5py.File(bm.path_to_model, 'r')
218
214
  meta = hf.get('meta')
219
215
  configuration = meta.get('configuration')
220
- channels, bm.x_scale, bm.y_scale, bm.z_scale, normalize, mu, sig = np.array(configuration)[:]
221
- channels, bm.x_scale, bm.y_scale, bm.z_scale, normalize, mu, sig = int(channels), int(bm.x_scale), \
222
- int(bm.y_scale), int(bm.z_scale), int(normalize), float(mu), float(sig)
223
- if '/meta/normalization' in hf:
224
- normalization_parameters = np.array(meta.get('normalization'), dtype=float)
216
+ channels, bm.x_scale, bm.y_scale, bm.z_scale, bm.normalize, mu, sig = np.array(configuration)[:]
217
+ channels, bm.x_scale, bm.y_scale, bm.z_scale, bm.normalize, mu, sig = int(channels), int(bm.x_scale), \
218
+ int(bm.y_scale), int(bm.z_scale), int(bm.normalize), float(mu), float(sig)
219
+ if 'normalization' in meta:
220
+ normalization_parameters = np.array(meta['normalization'], dtype=float)
225
221
  else:
226
222
  normalization_parameters = np.array([[mu],[sig]])
227
223
  allLabels = np.array(meta.get('labels'))
224
+ if 'patch_normalization' in meta:
225
+ bm.patch_normalization = bool(meta['patch_normalization'][()])
226
+ if 'scaling' in meta:
227
+ bm.scaling = bool(meta['scaling'][()])
228
+
228
229
  # check if amira header is available in the network
229
230
  if header is None and meta.get('header') is not None:
230
231
  header = [np.array(meta.get('header'))]
@@ -290,16 +291,11 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
290
291
  region_of_interest, cropped_volume = ch.crop_data(bm.path_to_image, bm.path_to_model, bm.path_to_cropped_image,
291
292
  bm.batch_size, bm.debug_cropping, bm.save_cropped, img_data, bm.x_range, bm.y_range, bm.z_range)
292
293
 
293
- # load prediction data
294
- img, img_header, z_shape, y_shape, x_shape, region_of_interest, img_data = load_prediction_data(bm.path_to_image,
295
- channels, bm.x_scale, bm.y_scale, bm.z_scale, bm.no_scaling, normalize, normalization_parameters,
296
- region_of_interest, img_data, img_header)
297
-
298
294
  # make prediction
299
- results, bm = predict_semantic_segmentation(bm, img, bm.path_to_model,
300
- bm.z_patch, bm.y_patch, bm.x_patch, z_shape, y_shape, x_shape, bm.compression, header,
301
- img_header, bm.stride_size, allLabels, bm.batch_size, region_of_interest,
302
- bm.no_scaling, extension, img_data)
295
+ results, bm = predict_semantic_segmentation(bm,
296
+ header, img_header, allLabels,
297
+ region_of_interest, extension, img_data,
298
+ channels, normalization_parameters)
303
299
 
304
300
  # results
305
301
  if cropped_volume is not None:
@@ -403,7 +399,7 @@ if __name__ == '__main__':
403
399
  help='Dice loss function')
404
400
  parser.add_argument('-ad','--average_dice', action='store_true', default=False,
405
401
  help='Use averaged dice score of each label')
406
- parser.add_argument('-nc', '--no_compression', action='store_true', default=False,
402
+ parser.add_argument('-nc', '--no-compression', dest='compression', action='store_false',
407
403
  help='Disable compression of segmentation results')
408
404
  parser.add_argument('-i', '--ignore', type=str, default='none',
409
405
  help='Ignore specific label(s), e.g. 2,5,6')
@@ -421,12 +417,12 @@ if __name__ == '__main__':
421
417
  help='Epochs the network is trained')
422
418
  parser.add_argument('-ce','--cropping_epochs', type=int, default=50,
423
419
  help='Epochs the network for auto-cropping is trained')
424
- parser.add_argument('-nn','--no_normalization', action='store_true', default=False,
425
- help='Disable image normalization')
420
+ parser.add_argument('-nn','--no-normalization', dest='normalization', action='store_false',
421
+ help='Disable normalization of 3D image volumes')
426
422
  parser.add_argument('-r','--rotate', type=float, default=0.0,
427
423
  help='Randomly rotate during training')
428
424
  parser.add_argument('-vs','--validation_split', type=float, default=0.0,
429
- help='Percentage of data used for validation')
425
+ help='Percentage of data used for training')
430
426
  parser.add_argument('-lr','--learning_rate', type=float, default=0.01,
431
427
  help='Learning rate')
432
428
  parser.add_argument('-ss','--stride_size', metavar="[1-64]", type=int, choices=range(1,65), default=32,
@@ -447,7 +443,7 @@ if __name__ == '__main__':
447
443
  help='Images and labels are scaled at y-axis to this size before training')
448
444
  parser.add_argument('-zs','--z_scale', type=int, default=256,
449
445
  help='Images and labels are scaled at z-axis to this size before training')
450
- parser.add_argument('-ns','--no_scaling', action='store_true', default=False,
446
+ parser.add_argument('-ns','--no-scaling', dest='scaling', action='store_false',
451
447
  help='Do not resize image and label data')
452
448
  parser.add_argument('-es','--early_stopping', type=int, default=0,
453
449
  help='Training is terminated when the accuracy has not increased in the epochs defined by this')
@@ -484,8 +480,15 @@ if __name__ == '__main__':
484
480
  parser.add_argument('-hf','--header_file', type=str, metavar='PATH', default=None,
485
481
  help='Location of header file')
486
482
  bm = parser.parse_args()
487
-
488
483
  bm.success = True
484
+
485
+ # prediction or training
486
+ if not any([bm.train, bm.predict]):
487
+ bm.predict = False
488
+ bm.train = True
489
+ if os.path.splitext(bm.path)[1] == '.h5':
490
+ bm.predict = True
491
+ bm.train = False
489
492
  if bm.predict:
490
493
  bm.path_to_labels = None
491
494
  bm.path_to_model = bm.path
@@ -286,7 +286,7 @@ class DataGenerator(tf.keras.utils.Sequence):
286
286
 
287
287
  # patch normalization
288
288
  if self.patch_normalization:
289
- tmp_X = np.copy(tmp_X, order='C')
289
+ tmp_X = tmp_X.copy()
290
290
  for c in range(self.n_channels):
291
291
  tmp_X[:,:,:,c] -= np.mean(tmp_X[:,:,:,c])
292
292
  tmp_X[:,:,:,c] /= max(np.std(tmp_X[:,:,:,c]), 1e-6)
@@ -106,7 +106,7 @@ def reduce_blocksize(raw, slices):
106
106
  return raw, slices, argmin_z, argmax_z, argmin_y, argmax_y, argmin_x, argmax_x
107
107
 
108
108
  def activeContour(data, labelData, alpha=1.0, smooth=1, steps=3,
109
- path_to_data=None, path_to_labels=None, no_compression=False,
109
+ path_to_data=None, path_to_labels=None, compression=True,
110
110
  ignore='none', only='all', simple=False,
111
111
  img_id=None, friend_id=None, remote=False):
112
112
 
@@ -126,12 +126,6 @@ def activeContour(data, labelData, alpha=1.0, smooth=1, steps=3,
126
126
  else:
127
127
  bm.django_env = False
128
128
 
129
- # compression
130
- if bm.no_compression:
131
- bm.compression = False
132
- else:
133
- bm.compression = True
134
-
135
129
  # disable file saving when called as a function
136
130
  if bm.data is not None:
137
131
  bm.path_to_data = None
@@ -374,8 +368,7 @@ def init_active_contour(image_id, friend_id, label_id, simple=False):
374
368
  else:
375
369
  try:
376
370
  activeContour(None, None, path_to_data=image.pic.path, path_to_labels=friend.pic.path,
377
- alpha=label.ac_alpha, smooth=label.ac_smooth, steps=label.ac_steps,
378
- no_compression=(False if label.compression else True),
371
+ alpha=label.ac_alpha, smooth=label.ac_smooth, steps=label.ac_steps, compression=label.compression,
379
372
  simple=simple, img_id=image_id, friend_id=friend_id, remote=False)
380
373
  except Exception as e:
381
374
  print(traceback.format_exc())
@@ -407,7 +400,7 @@ if __name__ == '__main__':
407
400
  help='Number of smoothing steps')
408
401
  parser.add_argument('-st', '--steps', type=int, default=3,
409
402
  help='Number of iterations')
410
- parser.add_argument('-nc', '--no_compression', action='store_true', default=False,
403
+ parser.add_argument('-nc', '--no-compression', dest='compression', action='store_false',
411
404
  help='Disable compression of segmentation results')
412
405
  parser.add_argument('-i', '--ignore', type=str, default='none',
413
406
  help='Ignore specific label(s), e.g. 2,5,6')
@@ -317,19 +317,23 @@ def load_data(path_to_data, process='None', return_extension=False):
317
317
  data, header = None, None
318
318
  else:
319
319
  try:
320
- # remove unreadable files or directories
321
- for name in files:
322
- if os.path.isfile(name):
320
+ # load data slice by slice
321
+ file_names = []
322
+ img_slices = []
323
+ header = []
324
+ files.sort()
325
+ for file_name in files:
326
+ if os.path.isfile(file_name):
323
327
  try:
324
- img, _ = load(name)
328
+ img, img_header = load(file_name)
329
+ file_names.append(file_name)
330
+ img_slices.append(img)
331
+ header.append(img_header)
325
332
  except:
326
- files.remove(name)
327
- else:
328
- files.remove(name)
329
- files.sort()
333
+ pass
330
334
 
331
335
  # get data size
332
- img, _ = load(files[0])
336
+ img = img_slices[0]
333
337
  if len(img.shape)==3:
334
338
  ysh, xsh, csh = img.shape[0], img.shape[1], img.shape[2]
335
339
  channel = 'last'
@@ -340,11 +344,9 @@ def load_data(path_to_data, process='None', return_extension=False):
340
344
  ysh, xsh = img.shape[0], img.shape[1]
341
345
  csh, channel = 0, None
342
346
 
343
- # load data slice by slice
344
- data = np.empty((len(files), ysh, xsh), dtype=img.dtype)
345
- header, image_data_shape = [], []
346
- for k, file_name in enumerate(files):
347
- img, img_header = load(file_name)
347
+ # create 3D volume
348
+ data = np.empty((len(file_names), ysh, xsh), dtype=img.dtype)
349
+ for k, img in enumerate(img_slices):
348
350
  if csh==3:
349
351
  img = rgb2gray(img, channel)
350
352
  elif csh==1 and channel=='last':
@@ -352,8 +354,7 @@ def load_data(path_to_data, process='None', return_extension=False):
352
354
  elif csh==1 and channel=='first':
353
355
  img = img[0,:,:]
354
356
  data[k] = img
355
- header.append(img_header)
356
- header = [header, files, data.dtype]
357
+ header = [header, file_names, data.dtype]
357
358
  data = np.swapaxes(data, 1, 2)
358
359
  data = np.copy(data, order='C')
359
360
  except Exception as e:
@@ -151,6 +151,7 @@ def create_slices(path_to_data, path_to_label, on_site=False):
151
151
  # increase contrast
152
152
  raw = img_to_uint8(raw)
153
153
  raw = contrast(raw)
154
+ zsh, ysh, xsh = raw.shape
154
155
 
155
156
  # create slices for slice viewer
156
157
  if not os.path.isdir(path_to_slices):
@@ -160,9 +161,9 @@ def create_slices(path_to_data, path_to_label, on_site=False):
160
161
  os.chmod(path_to_slices, 0o770)
161
162
 
162
163
  # save slices
163
- for k in range(raw.shape[0]):
164
+ for k in range(zsh):
164
165
  im = Image.fromarray(raw[k])
165
- im.save(path_to_slices + f'/{k}.png')
166
+ im.save(path_to_slices + '/slice_' + str(k).zfill(len(str(zsh-1))) + '.png')
166
167
 
167
168
  if path_to_label and not os.path.isdir(path_to_label_slices):
168
169
 
@@ -263,7 +264,7 @@ def create_slices(path_to_data, path_to_label, on_site=False):
263
264
 
264
265
  # save slice
265
266
  im = Image.fromarray(out)
266
- im.save(path_to_label_slices + f'/{k}.png')
267
+ im.save(path_to_label_slices + '/slice_' + str(k).zfill(len(str(zsh-1))) + '.png')
267
268
 
268
269
  except Exception as e:
269
270
  print(e)