biomedisa 24.5.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. biomedisa/__init__.py +49 -0
  2. biomedisa/__main__.py +18 -0
  3. biomedisa/deeplearning.py +529 -0
  4. biomedisa/features/DataGenerator.py +299 -0
  5. biomedisa/features/DataGeneratorCrop.py +121 -0
  6. biomedisa/features/PredictDataGenerator.py +87 -0
  7. biomedisa/features/PredictDataGeneratorCrop.py +74 -0
  8. biomedisa/features/__init__.py +0 -0
  9. biomedisa/features/active_contour.py +430 -0
  10. biomedisa/features/amira_to_np/__init__.py +0 -0
  11. biomedisa/features/amira_to_np/amira_data_stream.py +980 -0
  12. biomedisa/features/amira_to_np/amira_grammar.py +369 -0
  13. biomedisa/features/amira_to_np/amira_header.py +290 -0
  14. biomedisa/features/amira_to_np/amira_helper.py +72 -0
  15. biomedisa/features/assd.py +167 -0
  16. biomedisa/features/biomedisa_helper.py +842 -0
  17. biomedisa/features/create_slices.py +277 -0
  18. biomedisa/features/crop_helper.py +581 -0
  19. biomedisa/features/curvop_numba.py +149 -0
  20. biomedisa/features/django_env.py +171 -0
  21. biomedisa/features/keras_helper.py +1195 -0
  22. biomedisa/features/nc_reader.py +179 -0
  23. biomedisa/features/pid.py +52 -0
  24. biomedisa/features/process_image.py +251 -0
  25. biomedisa/features/pycuda_test.py +85 -0
  26. biomedisa/features/random_walk/__init__.py +0 -0
  27. biomedisa/features/random_walk/gpu_kernels.py +184 -0
  28. biomedisa/features/random_walk/pycuda_large.py +826 -0
  29. biomedisa/features/random_walk/pycuda_large_allx.py +806 -0
  30. biomedisa/features/random_walk/pycuda_small.py +414 -0
  31. biomedisa/features/random_walk/pycuda_small_allx.py +493 -0
  32. biomedisa/features/random_walk/pyopencl_large.py +760 -0
  33. biomedisa/features/random_walk/pyopencl_small.py +441 -0
  34. biomedisa/features/random_walk/rw_large.py +389 -0
  35. biomedisa/features/random_walk/rw_small.py +307 -0
  36. biomedisa/features/remove_outlier.py +396 -0
  37. biomedisa/features/split_volume.py +167 -0
  38. biomedisa/interpolation.py +369 -0
  39. biomedisa/mesh.py +403 -0
  40. biomedisa-24.5.23.dist-info/LICENSE +191 -0
  41. biomedisa-24.5.23.dist-info/METADATA +261 -0
  42. biomedisa-24.5.23.dist-info/RECORD +44 -0
  43. biomedisa-24.5.23.dist-info/WHEEL +5 -0
  44. biomedisa-24.5.23.dist-info/top_level.txt +1 -0
biomedisa/__init__.py ADDED
@@ -0,0 +1,49 @@
1
+ import os
2
+ import subprocess
3
+
4
+ # base directory
5
+ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
6
+
7
+ # pip installation
8
+ if not os.path.exists(os.path.join(BASE_DIR,'biomedisa/settings.py')):
9
+
10
+ # metadata
11
+ import importlib_metadata
12
+ metadata = importlib_metadata.metadata("biomedisa")
13
+
14
+ __all__ = (
15
+ "__title__",
16
+ "__summary__",
17
+ "__url__",
18
+ "__version__",
19
+ "__author__",
20
+ "__email__",
21
+ "__license__",
22
+ "__copyright__",
23
+ )
24
+
25
+ __copyright__ = "Copyright (c) 2019-2024 Philipp Lösel"
26
+ __title__ = metadata["name"]
27
+ __summary__ = metadata["summary"]
28
+ __url__ = "https://biomedisa.info"
29
+ __version__ = metadata["version"]
30
+ __author__ = "Philipp Lösel"
31
+ __email__ = metadata["author-email"]
32
+ __license__ = "European Union Public Licence 1.2 (EUPL 1.2)"
33
+
34
+ # biomedisa version when installed from source
35
+ else:
36
+ try:
37
+ if os.path.exists(os.path.join(BASE_DIR,'.git')):
38
+ __version__ = subprocess.check_output(['git', 'describe', '--tags', '--always'], cwd=BASE_DIR).decode('utf-8').strip()
39
+ f = open(os.path.join(BASE_DIR,'log/biomedisa_version'), 'w')
40
+ f.write(__version__)
41
+ f.close()
42
+ else:
43
+ raise Exception()
44
+ except:
45
+ if os.path.isfile(os.path.join(BASE_DIR,'log/biomedisa_version')):
46
+ __version__ = open(os.path.join(BASE_DIR,'log/biomedisa_version'), 'r').readline().rstrip('\n')
47
+ else:
48
+ __version__ = None
49
+
biomedisa/__main__.py ADDED
@@ -0,0 +1,18 @@
1
+ # biomedisa/__main__.py
2
+
3
+ import sys
4
+
5
+ def main():
6
+ if len(sys.argv) < 2 or sys.argv[1] in ['-h','--help']:
7
+ print("Usage: python3 -m biomedisa.<module_name> <args>")
8
+ print("Modules available: interpolation, deeplearning, mesh")
9
+ print("[-h, --help] for more information of each module")
10
+ print("[-V, --version] for Biomedisa version installed")
11
+
12
+ if sys.argv[1] in ['-v','-V','--version']:
13
+ import biomedisa
14
+ print(biomedisa.__version__)
15
+
16
+ if __name__ == "__main__":
17
+ sys.exit(main())
18
+
@@ -0,0 +1,529 @@
1
+ #!/usr/bin/python3
2
+ ##########################################################################
3
+ ## ##
4
+ ## Copyright (c) 2019-2024 Philipp Lösel. All rights reserved. ##
5
+ ## ##
6
+ ## This file is part of the open source project biomedisa. ##
7
+ ## ##
8
+ ## Licensed under the European Union Public Licence (EUPL) ##
9
+ ## v1.2, or - as soon as they will be approved by the ##
10
+ ## European Commission - subsequent versions of the EUPL; ##
11
+ ## ##
12
+ ## You may redistribute it and/or modify it under the terms ##
13
+ ## of the EUPL v1.2. You may not use this work except in ##
14
+ ## compliance with this Licence. ##
15
+ ## ##
16
+ ## You can obtain a copy of the Licence at: ##
17
+ ## ##
18
+ ## https://joinup.ec.europa.eu/page/eupl-text-11-12 ##
19
+ ## ##
20
+ ## Unless required by applicable law or agreed to in ##
21
+ ## writing, software distributed under the Licence is ##
22
+ ## distributed on an "AS IS" basis, WITHOUT WARRANTIES ##
23
+ ## OR CONDITIONS OF ANY KIND, either express or implied. ##
24
+ ## ##
25
+ ## See the Licence for the specific language governing ##
26
+ ## permissions and limitations under the Licence. ##
27
+ ## ##
28
+ ##########################################################################
29
+
30
+ import os
31
+ import biomedisa
32
+ import biomedisa.features.crop_helper as ch
33
+ from biomedisa.features.keras_helper import *
34
+ from biomedisa.features.biomedisa_helper import _error_, unique_file_path
35
+ from tensorflow.python.framework.errors_impl import ResourceExhaustedError
36
+ import tensorflow as tf
37
+ import numpy as np
38
+ import traceback
39
+ import argparse
40
+ import h5py
41
+ import time
42
+ import subprocess
43
+ import glob
44
+ import tempfile
45
+ import tifffile
46
+
47
+ class Biomedisa(object):
48
+ pass
49
+
50
+ def get_gpu_memory():
51
+ try:
52
+ result = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.free', '--format=csv,nounits,noheader'], encoding='utf-8')
53
+ # Convert lines to list
54
+ gpu_memory = [int(x) for x in result.strip().split('\n')]
55
+ return gpu_memory
56
+ except:
57
+ return None
58
+
59
+ def number_of_slices(file_path):
60
+ with tifffile.TiffFile(file_path) as tiff:
61
+ z_dim = len(tiff.pages)
62
+ return z_dim
63
+
64
+ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=None,
65
+ path_to_images=None, path_to_labels=None, val_images=None, val_labels=None,
66
+ path_to_model=None, predict=False, train=False, header_file=None,
67
+ balance=False, crop_data=False, flip_x=False, flip_y=False, flip_z=False,
68
+ swapaxes=False, train_dice=False, val_dice=True, no_compression=False, ignore='none', only='all',
69
+ network_filters='32-64-128-256-512', resnet=False, debug_cropping=False,
70
+ save_cropped=False, epochs=100, no_normalization=False, rotate=0.0, validation_split=0.0,
71
+ learning_rate=0.01, stride_size=32, validation_stride_size=32, validation_freq=1,
72
+ batch_size=None, x_scale=256, y_scale=256, z_scale=256, no_scaling=False, early_stopping=0,
73
+ pretrained_model=None, fine_tune=False, workers=1, cropping_epochs=50,
74
+ x_range=None, y_range=None, z_range=None, header=None, extension='.tif',
75
+ img_header=None, img_extension='.tif', average_dice=False, django_env=False,
76
+ path=None, success=True, return_probs=False, patch_normalization=False,
77
+ z_patch=64, y_patch=64, x_patch=64, path_to_logfile=None, img_id=None, label_id=None,
78
+ remote=False, queue=0, username=None, shortfilename=None, dice_loss=False,
79
+ acwe=False, acwe_alpha=1.0, acwe_smooth=1, acwe_steps=3, clean=None, fill=None):
80
+
81
+ # create biomedisa
82
+ bm = Biomedisa()
83
+ bm.process = 'deep_learning'
84
+ results = None
85
+
86
+ # time
87
+ TIC = time.time()
88
+
89
+ # transfer arguments
90
+ key_copy = tuple(locals().keys())
91
+ for arg in key_copy:
92
+ bm.__dict__[arg] = locals()[arg]
93
+
94
+ # compression
95
+ if bm.no_compression:
96
+ bm.compression = False
97
+ else:
98
+ bm.compression = True
99
+
100
+ # normalization
101
+ if bm.no_normalization:
102
+ bm.normalize = 0
103
+ else:
104
+ bm.normalize = 1
105
+
106
+ # django environment
107
+ if bm.django_env:
108
+
109
+ # path to image data
110
+ if bm.train:
111
+
112
+ # training files
113
+ bm.path_to_images = bm.path_to_images.split(',')[:-1]
114
+ bm.path_to_labels = bm.path_to_labels.split(',')[:-1]
115
+
116
+ # validation files
117
+ if bm.val_images is not None:
118
+ bm.val_images = bm.val_images.split(',')[:-1]
119
+ bm.val_labels = bm.val_labels.split(',')[:-1]
120
+ else:
121
+ bm.val_images = [None]
122
+ bm.val_labels = [None]
123
+
124
+ # project name
125
+ project = os.path.splitext(bm.shortfilename)[0]
126
+
127
+ # path to model
128
+ bm.path_to_model = biomedisa.BASE_DIR + f'/private_storage/images/{bm.username}/{project}.h5'
129
+ if not bm.remote:
130
+ bm.path_to_model = unique_file_path(bm.path_to_model)
131
+
132
+ if bm.predict:
133
+ project = os.path.splitext(os.path.basename(bm.path_to_model))[0]
134
+
135
+ # create pid object
136
+ from biomedisa.features.django_env import create_pid_object
137
+ create_pid_object(os.getpid(), bm.remote, bm.queue, bm.img_id, (bm.path_to_model if bm.train else ''))
138
+
139
+ # write in log file
140
+ with open(bm.path_to_logfile, 'a') as logfile:
141
+ print('%s %s %s %s' %(time.ctime(), bm.username, bm.shortfilename, 'Process was started.'), file=logfile)
142
+ print(f'PROJECT:{project} PREDICT:{bm.predict} IMG:{bm.shortfilename}', file=logfile)
143
+ if bm.train:
144
+ print(f'IMG_LIST:{bm.path_to_images} LABEL_LIST:{bm.path_to_labels} VAL_IMG_LIST:{bm.val_images} VAL_LABEL_LIST:{bm.val_labels}', file=logfile)
145
+
146
+ # path to model
147
+ if bm.train and not bm.path_to_model:
148
+ current_time = time.strftime("%d-%m-%Y_%H-%M-%S", time.localtime())
149
+ bm.path_to_model = os.getcwd() + f'/biomedisa_{current_time}.h5'
150
+ if bm.predict and not bm.path_to_model:
151
+ raise Exception("'path_to_model' must be specified")
152
+
153
+ # disable file saving when called as a function
154
+ if img_data is not None:
155
+ bm.path_to_images = None
156
+ bm.path_to_cropped_image = None
157
+
158
+ # adapt scaling to stride size and patch size
159
+ bm.stride_size = max(1, min(bm.stride_size, 64))
160
+ bm.x_scale = bm.x_scale - (bm.x_scale - bm.x_patch) % bm.stride_size
161
+ bm.y_scale = bm.y_scale - (bm.y_scale - bm.y_patch) % bm.stride_size
162
+ bm.z_scale = bm.z_scale - (bm.z_scale - bm.z_patch) % bm.stride_size
163
+
164
+ # adapt batch size to available gpu memory
165
+ if bm.batch_size is None:
166
+ bm.batch_size = 6
167
+ gpu_memory = get_gpu_memory()
168
+ if gpu_memory:
169
+ if bm.predict:
170
+ gpu_memory = gpu_memory[:1]
171
+ if 14000 < np.sum(gpu_memory) < 28000:
172
+ bm.batch_size = 12
173
+ elif 28000 <= np.sum(gpu_memory):
174
+ bm.batch_size = 24
175
+
176
+ if bm.train:
177
+
178
+ # path to results
179
+ bm.path_to_final = None
180
+ bm.path_to_cropped_image = None
181
+
182
+ # get number of GPUs
183
+ strategy = tf.distribute.MirroredStrategy()
184
+ ngpus = int(strategy.num_replicas_in_sync)
185
+
186
+ # batch size must be divisible by the number of GPUs and two
187
+ rest = bm.batch_size % (2*ngpus)
188
+ if 2*ngpus - rest < rest:
189
+ bm.batch_size = bm.batch_size + 2*ngpus - rest
190
+ else:
191
+ bm.batch_size = bm.batch_size - rest
192
+
193
+ if not bm.django_env:
194
+ bm.path_to_images, bm.path_to_labels = [bm.path_to_images], [bm.path_to_labels]
195
+ bm.val_images, bm.val_labels = [bm.val_images], [bm.val_labels]
196
+
197
+ # train automatic cropping
198
+ bm.cropping_weights, bm.cropping_config, bm.cropping_norm = None, None, None
199
+ if bm.crop_data:
200
+ bm.cropping_weights, bm.cropping_config, bm.cropping_norm = ch.load_and_train(
201
+ bm.normalize, bm.path_to_images, bm.path_to_labels, bm.path_to_model,
202
+ bm.cropping_epochs, bm.batch_size, bm.validation_split,
203
+ bm.flip_x, bm.flip_y, bm.flip_z, bm.rotate, bm.only, bm.ignore,
204
+ bm.val_images, bm.val_labels, img_data, label_data,
205
+ val_img_data, val_label_data)
206
+
207
+ # train automatic segmentation
208
+ train_semantic_segmentation(bm, bm.path_to_images, bm.path_to_labels,
209
+ bm.val_images, bm.val_labels,
210
+ img_data, label_data,
211
+ val_img_data, val_label_data,
212
+ header, extension)
213
+
214
+ if bm.predict:
215
+
216
+ # get meta data
217
+ hf = h5py.File(bm.path_to_model, 'r')
218
+ meta = hf.get('meta')
219
+ configuration = meta.get('configuration')
220
+ channels, bm.x_scale, bm.y_scale, bm.z_scale, normalize, mu, sig = np.array(configuration)[:]
221
+ channels, bm.x_scale, bm.y_scale, bm.z_scale, normalize, mu, sig = int(channels), int(bm.x_scale), \
222
+ int(bm.y_scale), int(bm.z_scale), int(normalize), float(mu), float(sig)
223
+ if '/meta/normalization' in hf:
224
+ normalization_parameters = np.array(meta.get('normalization'), dtype=float)
225
+ else:
226
+ normalization_parameters = np.array([[mu],[sig]])
227
+ allLabels = np.array(meta.get('labels'))
228
+ # check if amira header is available in the network
229
+ if header is None and meta.get('header') is not None:
230
+ header = [np.array(meta.get('header'))]
231
+ extension = '.am'
232
+ crop_data = True if 'cropping_weights' in hf else False
233
+ hf.close()
234
+
235
+ # make temporary directory
236
+ with tempfile.TemporaryDirectory() as temp_dir:
237
+
238
+ # extract image files from tar file
239
+ bm.tarfile = False
240
+ if bm.path_to_images is not None and (os.path.splitext(bm.path_to_images)[1]=='.tar' or bm.path_to_images[-7:]=='.tar.gz'):
241
+ bm.tarfile = True
242
+ path_to_result = os.path.dirname(bm.path_to_images) + '/final.'+os.path.basename(bm.path_to_images)
243
+ if path_to_result[-3:]=='.gz':
244
+ path_to_result = path_to_result[:-3]
245
+ if bm.django_env and not bm.remote:
246
+ path_to_result = unique_file_path(path_to_result)
247
+ tar = tarfile.open(bm.path_to_images)
248
+ tar.extractall(path=temp_dir)
249
+ tar.close()
250
+ bm.path_to_images = temp_dir
251
+ bm.save_cropped, bm.acwe = False, False
252
+ bm.clean, bm.fill = None, None
253
+
254
+ # list of images
255
+ path_to_finals = []
256
+ if bm.path_to_images is not None and os.path.isdir(bm.path_to_images):
257
+ # load list of volumetric image files
258
+ files = []
259
+ for data_type in ['.am','.hdr','.mhd','.mha','.nrrd','.nii','.nii.gz','.zip','.mrc']:
260
+ files += [file for file in glob.glob(bm.path_to_images+'/**/*'+data_type, recursive=True) if not os.path.basename(file).startswith('.')]
261
+ for data_type in ['.tif','.tiff']:
262
+ files += [file for file in glob.glob(bm.path_to_images+'/**/*'+data_type, recursive=True) if not os.path.basename(file).startswith('.') and number_of_slices(file)>1]
263
+ if len(files)==0: # assume directory of 2D slices
264
+ bm.path_to_images = [bm.path_to_images]
265
+ else:
266
+ bm.path_to_images = files
267
+ else:
268
+ bm.path_to_images = [bm.path_to_images]
269
+
270
+ # loop over all images
271
+ for bm.path_to_image in bm.path_to_images:
272
+
273
+ # create path_to_final
274
+ if bm.path_to_image:
275
+ filename = os.path.basename(bm.path_to_image)
276
+ filename = os.path.splitext(filename)[0]
277
+ if filename[-4:] == '.nii':
278
+ filename = filename[:-4]
279
+ bm.path_to_cropped_image = os.path.dirname(bm.path_to_image) + '/' + filename + '.cropped.tif'
280
+ if bm.django_env and not bm.remote and not bm.tarfile:
281
+ bm.path_to_cropped_image = unique_file_path(bm.path_to_cropped_image)
282
+ filename = 'final.' + filename
283
+ bm.path_to_final = os.path.dirname(bm.path_to_image) + '/' + filename + extension
284
+ if bm.django_env and not bm.remote and not bm.tarfile:
285
+ bm.path_to_final = unique_file_path(bm.path_to_final)
286
+
287
+ # crop data
288
+ region_of_interest, cropped_volume = None, None
289
+ if crop_data:
290
+ region_of_interest, cropped_volume = ch.crop_data(bm.path_to_image, bm.path_to_model, bm.path_to_cropped_image,
291
+ bm.batch_size, bm.debug_cropping, bm.save_cropped, img_data, bm.x_range, bm.y_range, bm.z_range)
292
+
293
+ # load prediction data
294
+ img, img_header, z_shape, y_shape, x_shape, region_of_interest, img_data = load_prediction_data(bm.path_to_image,
295
+ channels, bm.x_scale, bm.y_scale, bm.z_scale, bm.no_scaling, normalize, normalization_parameters,
296
+ region_of_interest, img_data, img_header)
297
+
298
+ # make prediction
299
+ results, bm = predict_semantic_segmentation(bm, img, bm.path_to_model,
300
+ bm.z_patch, bm.y_patch, bm.x_patch, z_shape, y_shape, x_shape, bm.compression, header,
301
+ img_header, bm.stride_size, allLabels, bm.batch_size, region_of_interest,
302
+ bm.no_scaling, extension, img_data)
303
+
304
+ # results
305
+ if cropped_volume is not None:
306
+ results['cropped_volume'] = cropped_volume
307
+
308
+ # path to results
309
+ if bm.path_to_image:
310
+ path_to_finals.append(bm.path_to_final)
311
+
312
+ # write tar file and delete extracted image files
313
+ if bm.tarfile and os.path.exists(temp_dir):
314
+ with tarfile.open(path_to_result, 'w') as tar:
315
+ for file_path in path_to_finals:
316
+ file_name = os.path.basename(file_path)
317
+ tar.add(file_path, arcname=file_name)
318
+ bm.path_to_final = path_to_result
319
+ bm.path_to_cropped_image = None
320
+
321
+ # computation time
322
+ t = int(time.time() - TIC)
323
+ if t < 60:
324
+ time_str = str(t) + " sec"
325
+ elif 60 <= t < 3600:
326
+ time_str = str(t // 60) + " min " + str(t % 60) + " sec"
327
+ elif 3600 < t:
328
+ time_str = str(t // 3600) + " h " + str((t % 3600) // 60) + " min " + str(t % 60) + " sec"
329
+ print('Total calculation time:', time_str)
330
+
331
+ # django environment
332
+ if bm.django_env:
333
+ from biomedisa_app.config import config
334
+ from biomedisa.features.django_env import post_processing
335
+ validation=True if bm.validation_split or (bm.val_images is not None and bm.val_images[0] is not None) else False
336
+ post_processing(bm.path_to_final, time_str, config['SERVER_ALIAS'], bm.remote, bm.queue,
337
+ img_id=bm.img_id, label_id=bm.label_id, path_to_model=bm.path_to_model,
338
+ path_to_cropped_image=(bm.path_to_cropped_image if crop_data else None),
339
+ train=bm.train, predict=bm.predict, validation=validation)
340
+
341
+ # write in log file
342
+ path_to_time = biomedisa.BASE_DIR + '/log/time.txt'
343
+ with open(path_to_time, 'a') as timefile:
344
+ if predict:
345
+ message = 'Successfully segmented ' + bm.shortfilename
346
+ else:
347
+ message = 'Successfully trained ' + project
348
+ print('%s %s %s %s on %s' %(time.ctime(), bm.username, message, time_str, config['SERVER_ALIAS']), file=timefile)
349
+
350
+ return results
351
+
352
+ if __name__ == '__main__':
353
+
354
+ # initialize arguments
355
+ parser = argparse.ArgumentParser(description='Biomedisa deeplearning.',
356
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
357
+
358
+ # mutually exclusive group
359
+ g = parser.add_mutually_exclusive_group()
360
+
361
+ # required arguments
362
+ parser.add_argument('path_to_images', type=str, metavar='PATH_TO_IMAGES',
363
+ help='Location of image data (tarball, directory, or file)')
364
+ parser.add_argument('path', type=str, metavar='PATH',
365
+ help='Location of label data during training (tarball, directory, or file) or model for prediction (h5)')
366
+
367
+ # optional arguments
368
+ g.add_argument('-p','--predict', action='store_true', default=False,
369
+ help='Automatic/predict segmentation')
370
+ g.add_argument('-t','--train', action='store_true', default=False,
371
+ help='Train neural network')
372
+ parser.add_argument('-v', '--version', action='version', version=f'{biomedisa.__version__}',
373
+ help='Biomedisa version')
374
+ parser.add_argument('-b','--balance', action='store_true', default=False,
375
+ help='Balance foreground and background training patches')
376
+ parser.add_argument('-cd','--crop_data', action='store_true', default=False,
377
+ help='Crop data automatically to region of interest')
378
+ parser.add_argument('--acwe', action='store_true', default=False,
379
+ help='Post-processing with active contour')
380
+ parser.add_argument('--acwe_alpha', metavar='ALPHA', type=float, default=1.0,
381
+ help='Pushing force of active contour')
382
+ parser.add_argument('--acwe_smooth', metavar='SMOOTH', type=int, default=1,
383
+ help='Smoothing steps of active contour')
384
+ parser.add_argument('--acwe_steps', metavar='STEPS', type=int, default=3,
385
+ help='Iterations of active contour')
386
+ parser.add_argument('-c','--clean', nargs='?', type=float, const=0.1, default=None,
387
+ help='Remove outliers, e.g. 0.5 means that objects smaller than 50 percent of the size of the largest object will be removed')
388
+ parser.add_argument('-f','--fill', nargs='?', type=float, const=0.9, default=None,
389
+ help='Fill holes, e.g. 0.5 means that all holes smaller than 50 percent of the entire label will be filled')
390
+ parser.add_argument('--flip_x', action='store_true', default=False,
391
+ help='Randomly flip x-axis during training')
392
+ parser.add_argument('--flip_y', action='store_true', default=False,
393
+ help='Randomly flip y-axis during training')
394
+ parser.add_argument('--flip_z', action='store_true', default=False,
395
+ help='Randomly flip z-axis during training')
396
+ parser.add_argument('-sa','--swapaxes', action='store_true', default=False,
397
+ help='Randomly swap two axes during training')
398
+ parser.add_argument('-td','--train_dice', action='store_true', default=False,
399
+ help='Monitor Dice score on training data')
400
+ parser.add_argument('-nvd','--no-val_dice', dest='val_dice', action='store_false',
401
+ help='Disable monitoring of Dice score on validation data')
402
+ parser.add_argument('-dl','--dice_loss', action='store_true', default=False,
403
+ help='Dice loss function')
404
+ parser.add_argument('-ad','--average_dice', action='store_true', default=False,
405
+ help='Use averaged dice score of each label')
406
+ parser.add_argument('-nc', '--no_compression', action='store_true', default=False,
407
+ help='Disable compression of segmentation results')
408
+ parser.add_argument('-i', '--ignore', type=str, default='none',
409
+ help='Ignore specific label(s), e.g. 2,5,6')
410
+ parser.add_argument('-o', '--only', type=str, default='all',
411
+ help='Segment only specific label(s), e.g. 1,3,5')
412
+ parser.add_argument('-nf', '--network_filters', type=str, default='32-64-128-256-512',
413
+ help='Number of filters per layer up to the deepest, e.g. 32-64-128-256-512')
414
+ parser.add_argument('-rn','--resnet', action='store_true', default=False,
415
+ help='Use U-resnet instead of standard U-net')
416
+ parser.add_argument('-dc','--debug_cropping', action='store_true', default=False,
417
+ help='Debug cropping')
418
+ parser.add_argument('-sc','--save_cropped', action='store_true', default=False,
419
+ help='Save automatically cropped image')
420
+ parser.add_argument('-e','--epochs', type=int, default=100,
421
+ help='Epochs the network is trained')
422
+ parser.add_argument('-ce','--cropping_epochs', type=int, default=50,
423
+ help='Epochs the network for auto-cropping is trained')
424
+ parser.add_argument('-nn','--no_normalization', action='store_true', default=False,
425
+ help='Disable image normalization')
426
+ parser.add_argument('-r','--rotate', type=float, default=0.0,
427
+ help='Randomly rotate during training')
428
+ parser.add_argument('-vs','--validation_split', type=float, default=0.0,
429
+ help='Percentage of data used for validation')
430
+ parser.add_argument('-lr','--learning_rate', type=float, default=0.01,
431
+ help='Learning rate')
432
+ parser.add_argument('-ss','--stride_size', metavar="[1-64]", type=int, choices=range(1,65), default=32,
433
+ help='Stride size for patches')
434
+ parser.add_argument('-vss','--validation_stride_size', metavar="[1-64]", type=int, choices=range(1,65), default=32,
435
+ help='Stride size for validation patches')
436
+ parser.add_argument('-vf','--validation_freq', type=int, default=1,
437
+ help='Epochs performed before validation')
438
+ parser.add_argument('-bs','--batch_size', type=int, default=None,
439
+ help='Number of samples processed in a batch')
440
+ parser.add_argument('-vi','--val_images', type=str, metavar='PATH', default=None,
441
+ help='Location of validation image data (tarball, directory, or file)')
442
+ parser.add_argument('-vl','--val_labels', type=str, metavar='PATH', default=None,
443
+ help='Location of validation label data (tarball, directory, or file)')
444
+ parser.add_argument('-xs','--x_scale', type=int, default=256,
445
+ help='Images and labels are scaled at x-axis to this size before training')
446
+ parser.add_argument('-ys','--y_scale', type=int, default=256,
447
+ help='Images and labels are scaled at y-axis to this size before training')
448
+ parser.add_argument('-zs','--z_scale', type=int, default=256,
449
+ help='Images and labels are scaled at z-axis to this size before training')
450
+ parser.add_argument('-ns','--no_scaling', action='store_true', default=False,
451
+ help='Do not resize image and label data')
452
+ parser.add_argument('-es','--early_stopping', type=int, default=0,
453
+ help='Training is terminated when the accuracy has not increased in the epochs defined by this')
454
+ parser.add_argument('-pm','--pretrained_model', type=str, metavar='PATH', default=None,
455
+ help='Location of pretrained model (only encoder will be trained if specified)')
456
+ parser.add_argument('-ft','--fine_tune', action='store_true', default=False,
457
+ help='Fine-tune the entire pretrained model. Choose a smaller learning rate, e.g. 0.0001')
458
+ parser.add_argument('-w','--workers', type=int, default=1,
459
+ help='Parallel workers for batch processing')
460
+ parser.add_argument('-xr','--x_range', nargs="+", type=int, default=None,
461
+ help='Manually crop x-axis of image data for prediction, e.g. -xr 100 200')
462
+ parser.add_argument('-yr','--y_range', nargs="+", type=int, default=None,
463
+ help='Manually crop y-axis of image data for prediction, e.g. -xr 100 200')
464
+ parser.add_argument('-zr','--z_range', nargs="+", type=int, default=None,
465
+ help='Manually crop z-axis of image data for prediction, e.g. -xr 100 200')
466
+ parser.add_argument('-rp','--return_probs', action='store_true', default=False,
467
+ help='Return prediction probabilities for each label')
468
+ parser.add_argument('-pn','--patch_normalization', action='store_true', default=False,
469
+ help='Scale each patch to mean zero and standard deviation')
470
+ parser.add_argument('-xp','--x_patch', type=int, default=64,
471
+ help='X-dimension of patch')
472
+ parser.add_argument('-yp','--y_patch', type=int, default=64,
473
+ help='Y-dimension of patch')
474
+ parser.add_argument('-zp','--z_patch', type=int, default=64,
475
+ help='Z-dimension of patch')
476
+ parser.add_argument('-iid','--img_id', type=str, default=None,
477
+ help='Image ID within django environment/browser version')
478
+ parser.add_argument('-lid','--label_id', type=str, default=None,
479
+ help='Label ID within django environment/browser version')
480
+ parser.add_argument('-re','--remote', action='store_true', default=False,
481
+ help='The interpolation is carried out on a remote server. Must be set up in config.py')
482
+ parser.add_argument('-q','--queue', type=int, default=0,
483
+ help='Processing queue when using a remote server')
484
+ parser.add_argument('-hf','--header_file', type=str, metavar='PATH', default=None,
485
+ help='Location of header file')
486
+ bm = parser.parse_args()
487
+
488
+ bm.success = True
489
+ if bm.predict:
490
+ bm.path_to_labels = None
491
+ bm.path_to_model = bm.path
492
+ if bm.train:
493
+ bm.path_to_labels = bm.path
494
+ bm.path_to_model = bm.path_to_images + '.h5'
495
+
496
+ # django environment
497
+ if bm.img_id is not None:
498
+ bm.django_env = True
499
+ if bm.train:
500
+ reference_image_path = bm.path_to_images.split(',')[:-1][-1]
501
+ else:
502
+ reference_image_path = bm.path_to_images
503
+ bm.username = os.path.basename(os.path.dirname(reference_image_path))
504
+ bm.shortfilename = os.path.basename(reference_image_path)
505
+ bm.path_to_logfile = biomedisa.BASE_DIR + '/log/logfile.txt'
506
+ else:
507
+ bm.django_env = False
508
+
509
+ kwargs = vars(bm)
510
+
511
+ # train or predict segmentation
512
+ try:
513
+ deep_learning(None, **kwargs)
514
+ except InputError:
515
+ print(traceback.format_exc())
516
+ bm = _error_(bm, f'{InputError.message}')
517
+ except ch.InputError:
518
+ print(traceback.format_exc())
519
+ bm = _error_(bm, f'{ch.InputError.message}')
520
+ except MemoryError:
521
+ print(traceback.format_exc())
522
+ bm = _error_(bm, 'MemoryError')
523
+ except ResourceExhaustedError:
524
+ print(traceback.format_exc())
525
+ bm = _error_(bm, 'GPU out of memory. Reduce your batch size')
526
+ except Exception as e:
527
+ print(traceback.format_exc())
528
+ bm = _error_(bm, e)
529
+