biomedisa 24.8.3__py3-none-any.whl → 24.8.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
biomedisa/deeplearning.py CHANGED
@@ -100,6 +100,15 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
100
100
  bm.normalize = 0
101
101
  bm.patch_normalization = True
102
102
 
103
+ # region of interest
104
+ if any([bm.x_range, bm.y_range, bm.z_range]):
105
+ if not bm.x_range:
106
+ bm.x_range = [0,None]
107
+ if not bm.y_range:
108
+ bm.y_range = [0,None]
109
+ if not bm.z_range:
110
+ bm.z_range = [0,None]
111
+
103
112
  # django environment
104
113
  if bm.django_env:
105
114
 
@@ -148,7 +157,7 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
148
157
  raise Exception("'path_to_model' must be specified")
149
158
 
150
159
  # disable file saving when called as a function
151
- if img_data is not None:
160
+ if bm.img_data is not None:
152
161
  bm.path_to_images = None
153
162
  bm.path_to_cropped_image = None
154
163
 
@@ -194,19 +203,10 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
194
203
  # train automatic cropping
195
204
  bm.cropping_weights, bm.cropping_config, bm.cropping_norm = None, None, None
196
205
  if bm.crop_data:
197
- bm.cropping_weights, bm.cropping_config, bm.cropping_norm = ch.load_and_train(
198
- bm.normalize, bm.path_to_images, bm.path_to_labels, bm.path_to_model,
199
- bm.cropping_epochs, bm.batch_size, bm.validation_split,
200
- bm.flip_x, bm.flip_y, bm.flip_z, bm.rotate, bm.only, bm.ignore,
201
- bm.val_images, bm.val_labels, img_data, label_data,
202
- val_img_data, val_label_data)
206
+ bm.cropping_weights, bm.cropping_config, bm.cropping_norm = ch.load_and_train(bm)
203
207
 
204
208
  # train automatic segmentation
205
- train_semantic_segmentation(bm, bm.path_to_images, bm.path_to_labels,
206
- bm.val_images, bm.val_labels,
207
- img_data, label_data,
208
- val_img_data, val_label_data,
209
- header, extension)
209
+ train_segmentation(bm)
210
210
 
211
211
  if bm.predict:
212
212
 
@@ -228,9 +228,11 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
228
228
  bm.scaling = bool(meta['scaling'][()])
229
229
 
230
230
  # check if amira header is available in the network
231
- if header is None and meta.get('header') is not None:
232
- header = [np.array(meta.get('header'))]
233
- extension = '.am'
231
+ if bm.header is None and meta.get('header') is not None:
232
+ bm.header = [np.array(meta.get('header'))]
233
+ bm.extension = '.am'
234
+
235
+ # crop data
234
236
  crop_data = True if 'cropping_weights' in hf else False
235
237
  hf.close()
236
238
 
@@ -284,20 +286,19 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
284
286
  filename = 'final.' + filename
285
287
  if bm.refinement:
286
288
  filename += '.refined'
287
- bm.path_to_final = os.path.dirname(bm.path_to_image) + '/' + filename + extension
289
+ bm.path_to_final = os.path.dirname(bm.path_to_image) + '/' + filename + bm.extension
288
290
  if bm.django_env and not bm.remote and not bm.tarfile:
289
291
  bm.path_to_final = unique_file_path(bm.path_to_final)
290
292
 
291
293
  # crop data
292
294
  region_of_interest, cropped_volume = None, None
293
- if crop_data:
294
- region_of_interest, cropped_volume = ch.crop_data(bm.path_to_image, bm.path_to_model, bm.path_to_cropped_image,
295
- bm.batch_size, bm.debug_cropping, bm.save_cropped, img_data, bm.x_range, bm.y_range, bm.z_range)
295
+ if any([bm.x_range, bm.y_range, bm.z_range]):
296
+ region_of_interest = [bm.z_range[0], bm.z_range[1], bm.y_range[0], bm.y_range[1], bm.x_range[0], bm.x_range[1]]
297
+ elif crop_data:
298
+ region_of_interest, cropped_volume = ch.crop_data(bm)
296
299
 
297
300
  # make prediction
298
- results, bm = predict_semantic_segmentation(bm,
299
- header, img_header,
300
- region_of_interest, extension, img_data,
301
+ results, bm = predict_segmentation(bm, region_of_interest,
301
302
  channels, normalization_parameters)
302
303
 
303
304
  # results
@@ -331,11 +332,10 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
331
332
  if bm.django_env:
332
333
  from biomedisa_app.config import config
333
334
  from biomedisa.features.django_env import post_processing
334
- validation=True if bm.validation_split or (bm.val_images is not None and bm.val_images[0] is not None) else False
335
335
  post_processing(bm.path_to_final, time_str, config['SERVER_ALIAS'], bm.remote, bm.queue,
336
336
  img_id=bm.img_id, label_id=bm.label_id, path_to_model=bm.path_to_model,
337
337
  path_to_cropped_image=(bm.path_to_cropped_image if crop_data else None),
338
- train=bm.train, predict=bm.predict, validation=validation)
338
+ train=bm.train, predict=bm.predict)
339
339
 
340
340
  # write in log file
341
341
  path_to_time = biomedisa.BASE_DIR + '/log/time.txt'
@@ -459,9 +459,9 @@ if __name__ == '__main__':
459
459
  parser.add_argument('-xr','--x_range', nargs="+", type=int, default=None,
460
460
  help='Manually crop x-axis of image data for prediction, e.g. -xr 100 200')
461
461
  parser.add_argument('-yr','--y_range', nargs="+", type=int, default=None,
462
- help='Manually crop y-axis of image data for prediction, e.g. -xr 100 200')
462
+ help='Manually crop y-axis of image data for prediction, e.g. -yr 100 200')
463
463
  parser.add_argument('-zr','--z_range', nargs="+", type=int, default=None,
464
- help='Manually crop z-axis of image data for prediction, e.g. -xr 100 200')
464
+ help='Manually crop z-axis of image data for prediction, e.g. -zr 100 200')
465
465
  parser.add_argument('-rp','--return_probs', action='store_true', default=False,
466
466
  help='Return prediction probabilities for each label')
467
467
  parser.add_argument('-pn','--patch_normalization', action='store_true', default=False,
@@ -393,7 +393,7 @@ def load_data_to_crop(path_to_img, channels, x_scale, y_scale, z_scale,
393
393
  return img_rgb, z_shape, y_shape, x_shape, img_data
394
394
 
395
395
  def crop_volume(img, path_to_model, path_to_final, z_shape, y_shape, x_shape, batch_size,
396
- debug_cropping, save_cropped, img_data, x_range, y_range, z_range, x_puffer=25, y_puffer=25, z_puffer=25):
396
+ debug_cropping, save_cropped, img_data, x_puffer=25, y_puffer=25, z_puffer=25):
397
397
 
398
398
  # img shape
399
399
  zsh, ysh, xsh, channels = img.shape
@@ -467,24 +467,13 @@ def crop_volume(img, path_to_model, path_to_final, z_shape, y_shape, x_shape, ba
467
467
  elif np.all(probabilities[k-4:k+5] == np.array([0,1,1,1,1,1,1,1,0])):
468
468
  probabilities[k-4:k+5] = 0
469
469
 
470
- # create final
471
- if z_range is not None:
472
- z_lower, z_upper = z_range
473
- else:
474
- z_lower = max(0,np.argmax(probabilities[:z_shape]) - z_puffer)
475
- z_upper = min(z_shape,z_shape - np.argmax(np.flip(probabilities[:z_shape])) + z_puffer +1)
476
-
477
- if y_range is not None:
478
- y_lower, y_upper = y_range
479
- else:
480
- y_lower = max(0,np.argmax(probabilities[z_shape:z_shape+y_shape]) - y_puffer)
481
- y_upper = min(y_shape,y_shape - np.argmax(np.flip(probabilities[z_shape:z_shape+y_shape])) + y_puffer +1)
482
-
483
- if x_range is not None:
484
- x_lower, x_upper = x_range
485
- else:
486
- x_lower = max(0,np.argmax(probabilities[z_shape+y_shape:]) - x_puffer)
487
- x_upper = min(x_shape,x_shape - np.argmax(np.flip(probabilities[z_shape+y_shape:])) + x_puffer +1)
470
+ # cropping range
471
+ z_lower = max(0,np.argmax(probabilities[:z_shape]) - z_puffer)
472
+ z_upper = min(z_shape,z_shape - np.argmax(np.flip(probabilities[:z_shape])) + z_puffer +1)
473
+ y_lower = max(0,np.argmax(probabilities[z_shape:z_shape+y_shape]) - y_puffer)
474
+ y_upper = min(y_shape,y_shape - np.argmax(np.flip(probabilities[z_shape:z_shape+y_shape])) + y_puffer +1)
475
+ x_lower = max(0,np.argmax(probabilities[z_shape+y_shape:]) - x_puffer)
476
+ x_upper = min(x_shape,x_shape - np.argmax(np.flip(probabilities[z_shape+y_shape:])) + x_puffer +1)
488
477
 
489
478
  # plot result
490
479
  if debug_cropping and path_to_final:
@@ -508,33 +497,27 @@ def crop_volume(img, path_to_model, path_to_final, z_shape, y_shape, x_shape, ba
508
497
  # main functions
509
498
  #=====================
510
499
 
511
- def load_and_train(normalize,path_to_img,path_to_labels,path_to_model,
512
- epochs,batch_size,validation_split,
513
- flip_x,flip_y,flip_z,rotate,labels_to_compute,labels_to_remove,
514
- path_val_img=[None],path_val_labels=[None],
515
- img=None, label=None, img_val=None, label_val=None,
516
- x_scale=256, y_scale=256, z_scale=256):
500
+ def load_and_train(bm, x_scale=256, y_scale=256, z_scale=256):
517
501
 
518
502
  # load training data
519
- img, label, normalization_parameters, channels = load_cropping_training_data(normalize,
520
- path_to_img, path_to_labels, x_scale, y_scale, z_scale, labels_to_compute, labels_to_remove,
521
- img, label)
503
+ img, label, normalization_parameters, channels = load_cropping_training_data(bm.normalize,
504
+ bm.path_to_images, bm.path_to_labels, x_scale, y_scale, z_scale, bm.only, bm.ignore,
505
+ bm.img_data, bm.label_data)
522
506
 
523
507
  # load validation data
524
- if any(path_val_img) or img_val is not None:
525
- img_val, label_val, _, _ = load_cropping_training_data(normalize,
526
- path_val_img, path_val_labels, x_scale, y_scale, z_scale,
527
- labels_to_compute, labels_to_remove,
528
- img_val, label_val, normalization_parameters, channels)
508
+ if any(bm.val_images) or bm.val_img_data is not None:
509
+ img_val, label_val, _, _ = load_cropping_training_data(bm.normalize,
510
+ bm.val_images, bm.val_labels, x_scale, y_scale, z_scale,
511
+ bm.only, bm.ignore, bm.val_img_data, bm.val_label_data, normalization_parameters, channels)
529
512
 
530
513
  # train cropping
531
- train_cropping(img, label, path_to_model, epochs,
532
- batch_size, validation_split,
533
- flip_x, flip_y, flip_z, rotate,
534
- img_val, label_val)
514
+ train_cropping(img, label, bm.path_to_model, bm.cropping_epochs,
515
+ bm.batch_size, bm.validation_split,
516
+ bm.flip_x, bm.flip_y, bm.flip_z, bm.rotate,
517
+ img_val, label_val)
535
518
 
536
519
  # load weights
537
- model = load_model(str(path_to_model))
520
+ model = load_model(str(bm.path_to_model))
538
521
  cropping_weights = []
539
522
  for layer in model.layers:
540
523
  if layer.get_weights() != []:
@@ -542,17 +525,15 @@ def load_and_train(normalize,path_to_img,path_to_labels,path_to_model,
542
525
  cropping_weights.append(arr)
543
526
 
544
527
  # configuration data
545
- cropping_config = np.array([channels, x_scale, y_scale, z_scale, normalize,
528
+ cropping_config = np.array([channels, x_scale, y_scale, z_scale, bm.normalize,
546
529
  normalization_parameters[0,0], normalization_parameters[1,0]])
547
530
 
548
531
  return cropping_weights, cropping_config, normalization_parameters
549
532
 
550
- def crop_data(path_to_data, path_to_model, path_to_cropped_image, batch_size,
551
- debug_cropping=False, save_cropped=True, img_data=None,
552
- x_range=None, y_range=None, z_range=None):
533
+ def crop_data(bm):
553
534
 
554
535
  # get meta data
555
- hf = h5py.File(path_to_model, 'r')
536
+ hf = h5py.File(bm.path_to_model, 'r')
556
537
  meta = hf.get('cropping_meta')
557
538
  configuration = meta.get('configuration')
558
539
  channels, x_scale, y_scale, z_scale, normalize, mu, sig = np.array(configuration)[:]
@@ -567,13 +548,12 @@ def crop_data(path_to_data, path_to_model, path_to_cropped_image, batch_size,
567
548
  hf.close()
568
549
 
569
550
  # load data
570
- img, z_shape, y_shape, x_shape, img_data = load_data_to_crop(path_to_data, channels,
571
- x_scale, y_scale, z_scale, normalize, normalization_parameters, img_data)
551
+ img, z_shape, y_shape, x_shape, bm.img_data = load_data_to_crop(bm.path_to_image, channels,
552
+ x_scale, y_scale, z_scale, normalize, normalization_parameters, bm.img_data)
572
553
 
573
554
  # make prediction
574
- z_lower, z_upper, y_lower, y_upper, x_lower, x_upper, cropped_volume = crop_volume(img, path_to_model,
575
- path_to_cropped_image, z_shape, y_shape, x_shape, batch_size, debug_cropping, save_cropped, img_data,
576
- x_range, y_range, z_range)
555
+ z_lower, z_upper, y_lower, y_upper, x_lower, x_upper, cropped_volume = crop_volume(img, bm.path_to_model,
556
+ bm.path_to_cropped_image, z_shape, y_shape, x_shape, bm.batch_size, bm.debug_cropping, bm.save_cropped, bm.img_data)
577
557
 
578
558
  # region of interest
579
559
  region_of_interest = np.array([z_lower, z_upper, y_lower, y_upper, x_lower, x_upper])
@@ -69,7 +69,7 @@ def create_pid_object(pid, remote=False, queue=None, img_id=None, path_to_model=
69
69
  image.pid = pid
70
70
  image.save()
71
71
 
72
- def post_processing(path_to_final, time_str, server_name, remote, queue, dice=1.0, path_to_model=None, path_to_uq=None, path_to_smooth=None, path_to_cropped_image=None, uncertainty=False, smooth=False, img_id=None, label_id=None, train=False, predict=False, validation=False):
72
+ def post_processing(path_to_final, time_str, server_name, remote, queue, dice=1.0, path_to_model=None, path_to_uq=None, path_to_smooth=None, path_to_cropped_image=None, uncertainty=False, smooth=False, img_id=None, label_id=None, train=False, predict=False):
73
73
 
74
74
  # remote server
75
75
  if remote:
@@ -97,14 +97,9 @@ def post_processing(path_to_final, time_str, server_name, remote, queue, dice=1.
97
97
  filename = 'images/' + image.user.username + '/' + shortfilename
98
98
  Upload.objects.create(pic=filename, user=image.user, project=image.project, imageType=4, shortfilename=shortfilename)
99
99
 
100
- if validation:
101
- # create acc object
102
- shortfilename = os.path.basename(path_to_model.replace('.h5','_acc.png'))
103
- filename = 'images/' + image.user.username + '/' + shortfilename
104
- Upload.objects.create(pic=filename, user=image.user, project=image.project, imageType=6, shortfilename=shortfilename)
105
-
106
- # create loss object
107
- shortfilename = os.path.basename(path_to_model.replace('.h5','_loss.png'))
100
+ # create monitoring objects
101
+ for suffix in ['_acc.png', '_loss.png', '.csv']:
102
+ shortfilename = os.path.basename(path_to_model.replace('.h5', suffix))
108
103
  filename = 'images/' + image.user.username + '/' + shortfilename
109
104
  Upload.objects.create(pic=filename, user=image.user, project=image.project, imageType=6, shortfilename=shortfilename)
110
105
 
@@ -61,45 +61,65 @@ import time
61
61
  import h5py
62
62
  import atexit
63
63
  import tempfile
64
+ import csv
64
65
 
65
66
  class InputError(Exception):
66
67
  def __init__(self, message=None):
67
68
  self.message = message
68
69
 
69
- def save_history(history, path_to_model, val_dice, train_dice):
70
- # summarize history for accuracy
70
+ def save_csv(path, history):
71
+ # remove empty keys
72
+ to_delete = []
73
+ for key in history.keys():
74
+ if len(history[key])==0:
75
+ to_delete.append(key)
76
+ for key in to_delete:
77
+ del history[key]
78
+ # open a file in write mode
79
+ with open(path, 'w', newline='') as file:
80
+ writer = csv.DictWriter(file, fieldnames=history.keys())
81
+ # write header
82
+ writer.writeheader()
83
+ # write data rows (use zip to iterate over values)
84
+ for row in zip(*history.values()):
85
+ writer.writerow(dict(zip(history.keys(), row)))
86
+
87
+ def save_history(history, path_to_model):
88
+ # standard accuracy history
71
89
  plt.plot(history['accuracy'])
72
- plt.plot(history['val_accuracy'])
73
- if val_dice and train_dice:
90
+ legend = ['Accuracy (train)']
91
+ if 'val_accuracy' in history and len(history['val_accuracy'])>0:
92
+ plt.plot(history['val_accuracy'])
93
+ legend.append('Accuracy (test)')
94
+ # dice history
95
+ if 'dice' in history and len(history['dice'])>0:
74
96
  plt.plot(history['dice'])
97
+ legend.append('Dice score (train)')
98
+ if 'val_dice' in history and len(history['val_dice'])>0:
75
99
  plt.plot(history['val_dice'])
76
- plt.legend(['Accuracy (train)', 'Accuracy (test)', 'Dice score (train)', 'Dice score (test)'], loc='upper left')
77
- elif train_dice:
78
- plt.plot(history['dice'])
79
- plt.legend(['Accuracy (train)', 'Accuracy (test)', 'Dice score (train)'], loc='upper left')
80
- elif val_dice:
81
- plt.plot(history['val_dice'])
82
- plt.legend(['Accuracy (train)', 'Accuracy (test)', 'Dice score (test)'], loc='upper left')
83
- else:
84
- plt.legend(['Accuracy (train)', 'Accuracy (test)'], loc='upper left')
100
+ legend.append('Dice score (test)')
85
101
  plt.title('model accuracy')
86
102
  plt.ylabel('accuracy')
87
103
  plt.xlabel('epoch')
104
+ plt.legend(legend, loc='upper left')
88
105
  plt.tight_layout() # To prevent overlapping of subplots
89
106
  plt.savefig(path_to_model.replace('.h5','_acc.png'), dpi=300, bbox_inches='tight')
90
107
  plt.clf()
91
- # summarize history for loss
108
+ # loss history
92
109
  plt.plot(history['loss'])
93
- plt.plot(history['val_loss'])
110
+ legend = ['train']
111
+ if 'val_loss' in history and len(history['val_loss'])>0:
112
+ plt.plot(history['val_loss'])
113
+ legend.append('test')
94
114
  plt.title('model loss')
95
115
  plt.ylabel('loss')
96
116
  plt.xlabel('epoch')
97
- plt.legend(['train', 'test'], loc='upper left')
117
+ plt.legend(legend, loc='upper left')
98
118
  plt.tight_layout() # To prevent overlapping of subplots
99
119
  plt.savefig(path_to_model.replace('.h5','_loss.png'), dpi=300, bbox_inches='tight')
100
120
  plt.clf()
101
- # save history dictonary
102
- np.save(path_to_model.replace('.h5','.npy'), history)
121
+ # save history as csv
122
+ save_csv(path_to_model.replace('.h5','.csv'), history)
103
123
 
104
124
  def predict_blocksize(labelData, x_puffer=25, y_puffer=25, z_puffer=25):
105
125
  zsh, ysh, xsh = labelData.shape
@@ -355,8 +375,7 @@ def read_img_list(img_list, label_list, temp_img_dir, temp_label_dir):
355
375
  label_names.append(label_name)
356
376
  return img_names, label_names
357
377
 
358
- def load_training_data(bm, normalize, img_list, label_list, channels, x_scale, y_scale, z_scale, scaling,
359
- crop_data, labels_to_compute, labels_to_remove, img_in=None, label_in=None,
378
+ def load_training_data(bm, img_list, label_list, channels, img_in=None, label_in=None,
360
379
  normalization_parameters=None, allLabels=None, header=None, extension='.tif',
361
380
  x_puffer=25, y_puffer=25, z_puffer=25):
362
381
 
@@ -381,20 +400,26 @@ def load_training_data(bm, normalize, img_list, label_list, channels, x_scale, y
381
400
  label = label_in
382
401
  label_names = ['label_1']
383
402
  label_dim = label.shape
384
- label = set_labels_to_zero(label, labels_to_compute, labels_to_remove)
385
- if scaling:
403
+ label = set_labels_to_zero(label, bm.only, bm.ignore)
404
+ if any([bm.x_range, bm.y_range, bm.z_range]):
405
+ if len(label_names)>1:
406
+ InputError.message = 'Training on region of interest is only supported for one volume.'
407
+ raise InputError()
408
+ argmin_z, argmax_z, argmin_y, argmax_y, argmin_x, argmax_x = \
409
+ bm.z_range[0], bm.z_range[1], bm.y_range[0], bm.y_range[1], bm.x_range[0], bm.x_range[1]
410
+ label = label[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x].copy()
411
+ elif bm.crop_data:
412
+ argmin_z,argmax_z,argmin_y,argmax_y,argmin_x,argmax_x = predict_blocksize(label, x_puffer, y_puffer, z_puffer)
413
+ label = label[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x].copy()
414
+ if bm.scaling:
386
415
  label_values, counts = np.unique(label, return_counts=True)
387
416
  print(f'{os.path.basename(label_names[0])}:', 'Labels:', label_values[1:], 'Sizes:', counts[1:])
388
- if crop_data:
389
- argmin_z,argmax_z,argmin_y,argmax_y,argmin_x,argmax_x = predict_blocksize(label, x_puffer, y_puffer, z_puffer)
390
- label = np.copy(label[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x], order='C')
391
- if scaling:
392
- label = img_resize(label, z_scale, y_scale, x_scale, labels=True)
417
+ label = img_resize(label, bm.z_scale, bm.y_scale, bm.x_scale, labels=True)
393
418
 
394
419
  # if header is not single data stream Amira Mesh falling back to Multi-TIFF
395
420
  if extension != '.am':
396
421
  if extension != '.tif':
397
- print(f'Warning! Please use -hf or --header_file="path_to_training_label{extension}" for prediction to save your result as "{extension}"')
422
+ print(f'Warning! Please use --header_file="path_to_training_label{extension}" for prediction to save your result as "{extension}"')
398
423
  extension, header = '.tif', None
399
424
  elif len(header) > 1:
400
425
  print('Warning! Multiple data streams are not supported. Falling back to TIFF.')
@@ -429,13 +454,13 @@ def load_training_data(bm, normalize, img_list, label_list, channels, x_scale, y
429
454
  raise InputError()
430
455
 
431
456
  # crop data
432
- if crop_data:
433
- img = np.copy(img[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x], order='C')
457
+ if any([bm.x_range, bm.y_range, bm.z_range]) or bm.crop_data:
458
+ img = img[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x].copy()
434
459
 
435
460
  # scale/resize image data
436
461
  img = img.astype(np.float32)
437
- if scaling:
438
- img = img_resize(img, z_scale, y_scale, x_scale)
462
+ if bm.scaling:
463
+ img = img_resize(img, bm.z_scale, bm.y_scale, bm.x_scale)
439
464
 
440
465
  # normalize image data
441
466
  for c in range(channels):
@@ -445,7 +470,7 @@ def load_training_data(bm, normalize, img_list, label_list, channels, x_scale, y
445
470
  normalization_parameters = np.zeros((2,channels))
446
471
  normalization_parameters[0,c] = np.mean(img[:,:,:,c])
447
472
  normalization_parameters[1,c] = np.std(img[:,:,:,c])
448
- elif normalize:
473
+ elif bm.normalize:
449
474
  mean, std = np.mean(img[:,:,:,c]), np.std(img[:,:,:,c])
450
475
  img[:,:,:,c] = (img[:,:,:,c] - mean) / std
451
476
  img[:,:,:,c] = img[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
@@ -465,15 +490,14 @@ def load_training_data(bm, normalize, img_list, label_list, channels, x_scale, y
465
490
  else:
466
491
  a = label_in[k]
467
492
  label_dim = a.shape
468
- a = set_labels_to_zero(a, labels_to_compute, labels_to_remove)
469
- if scaling:
470
- label_values, counts = np.unique(a, return_counts=True)
471
- print(f'{os.path.basename(label_names[k])}:', 'Labels:', label_values[1:], 'Sizes:', counts[1:])
472
- if crop_data:
493
+ a = set_labels_to_zero(a, bm.only, bm.ignore)
494
+ if bm.crop_data:
473
495
  argmin_z,argmax_z,argmin_y,argmax_y,argmin_x,argmax_x = predict_blocksize(a, x_puffer, y_puffer, z_puffer)
474
496
  a = np.copy(a[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x], order='C')
475
- if scaling:
476
- a = img_resize(a, z_scale, y_scale, x_scale, labels=True)
497
+ if bm.scaling:
498
+ label_values, counts = np.unique(a, return_counts=True)
499
+ print(f'{os.path.basename(label_names[k])}:', 'Labels:', label_values[1:], 'Sizes:', counts[1:])
500
+ a = img_resize(a, bm.z_scale, bm.y_scale, bm.x_scale, labels=True)
477
501
  label = np.append(label, a, axis=0)
478
502
 
479
503
  # append image
@@ -493,15 +517,15 @@ def load_training_data(bm, normalize, img_list, label_list, channels, x_scale, y
493
517
  if a.shape[3] != channels:
494
518
  InputError.message = f'Number of channels must be {channels} for "{os.path.basename(img_names[k])}"'
495
519
  raise InputError()
496
- if crop_data:
520
+ if bm.crop_data:
497
521
  a = np.copy(a[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x], order='C')
498
522
  a = a.astype(np.float32)
499
- if scaling:
500
- a = img_resize(a, z_scale, y_scale, x_scale)
523
+ if bm.scaling:
524
+ a = img_resize(a, bm.z_scale, bm.y_scale, bm.x_scale)
501
525
  for c in range(channels):
502
526
  a[:,:,:,c] -= np.amin(a[:,:,:,c])
503
527
  a[:,:,:,c] /= np.amax(a[:,:,:,c])
504
- if normalize:
528
+ if bm.normalize:
505
529
  mean, std = np.mean(a[:,:,:,c]), np.std(a[:,:,:,c])
506
530
  a[:,:,:,c] = (a[:,:,:,c] - mean) / std
507
531
  a[:,:,:,c] = a[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
@@ -632,6 +656,9 @@ class Metrics(Callback):
632
656
  n_batches = int(np.floor(len_IDs / self.batch_size))
633
657
  np.random.shuffle(self.list_IDs)
634
658
 
659
+ # initialize validation loss
660
+ val_loss = 0
661
+
635
662
  for batch in range(n_batches):
636
663
  # Generate indexes of the batch
637
664
  list_IDs_batch = self.list_IDs[batch*self.batch_size:(batch+1)*self.batch_size]
@@ -667,9 +694,12 @@ class Metrics(Callback):
667
694
  m = rest % self.dim_img[2]
668
695
  result[k:k+self.dim_patch[0],l:l+self.dim_patch[1],m:m+self.dim_patch[2]] += y_predict[i]
669
696
 
670
- # calculate categorical crossentropy
671
- if not self.train:
672
- crossentropy = categorical_crossentropy(self.label, softmax(result))
697
+ # calculate validation loss
698
+ if not self.train:
699
+ val_loss += categorical_crossentropy(self.label[k:k+self.dim_patch[0],l:l+self.dim_patch[1],m:m+self.dim_patch[2]], y_predict[i])
700
+
701
+ # mean validation loss
702
+ val_loss /= (n_batches*self.batch_size)
673
703
 
674
704
  # get result
675
705
  result = np.argmax(result, axis=-1)
@@ -703,10 +733,10 @@ class Metrics(Callback):
703
733
  self.history['dice'].append(round(logs['dice'],4))
704
734
  self.history['val_accuracy'].append(round(accuracy,4))
705
735
  self.history['val_dice'].append(round(dice,4))
706
- self.history['val_loss'].append(round(crossentropy,4))
736
+ self.history['val_loss'].append(round(val_loss,4))
707
737
 
708
738
  # tensorflow monitoring variables
709
- logs['val_loss'] = crossentropy
739
+ logs['val_loss'] = val_loss
710
740
  logs['val_accuracy'] = accuracy
711
741
  logs['val_dice'] = dice
712
742
  logs['best_acc'] = max(self.history['accuracy'])
@@ -716,7 +746,7 @@ class Metrics(Callback):
716
746
  logs['best_val_dice'] = max(self.history['val_dice'])
717
747
 
718
748
  # plot history in figure and save as numpy array
719
- save_history(self.history, self.path_to_model, True, self.train_dice)
749
+ save_history(self.history, self.path_to_model)
720
750
 
721
751
  # print accuracies
722
752
  print('\nValidation history:')
@@ -772,17 +802,11 @@ def dice_coef_loss(nb_labels):
772
802
  return loss
773
803
  return loss_fn
774
804
 
775
- def train_semantic_segmentation(bm,
776
- img_list, label_list,
777
- val_img_list, val_label_list,
778
- img=None, label=None,
779
- img_val=None, label_val=None,
780
- header=None, extension='.tif'):
805
+ def train_segmentation(bm):
781
806
 
782
807
  # training data
783
- img, label, allLabels, normalization_parameters, header, extension, bm.channels = load_training_data(bm, bm.normalize,
784
- img_list, label_list, None, bm.x_scale, bm.y_scale, bm.z_scale, bm.scaling, bm.crop_data,
785
- bm.only, bm.ignore, img, label, None, None, header, extension)
808
+ bm.img_data, bm.label_data, allLabels, normalization_parameters, bm.header, bm.extension, bm.channels = load_training_data(bm,
809
+ bm.path_to_images, bm.path_to_labels, None, bm.img_data, bm.label_data, None, None, bm.header, bm.extension)
786
810
 
787
811
  # configuration data
788
812
  configuration_data = np.array([bm.channels,
@@ -790,21 +814,20 @@ def train_semantic_segmentation(bm,
790
814
  normalization_parameters[0,0], normalization_parameters[1,0]])
791
815
 
792
816
  # img shape
793
- zsh, ysh, xsh, _ = img.shape
817
+ zsh, ysh, xsh, _ = bm.img_data.shape
794
818
 
795
819
  # validation data
796
- if any(val_img_list) or img_val is not None:
797
- img_val, label_val, _, _, _, _, _ = load_training_data(bm, bm.normalize,
798
- val_img_list, val_label_list, bm.channels, bm.x_scale, bm.y_scale, bm.z_scale, bm.scaling, bm.crop_data,
799
- bm.only, bm.ignore, img_val, label_val, normalization_parameters, allLabels)
820
+ if any(bm.val_images) or bm.val_img_data is not None:
821
+ bm.val_img_data, bm.val_label_data, _, _, _, _, _ = load_training_data(bm,
822
+ bm.val_images, bm.val_labels, bm.channels, bm.val_img_data, bm.val_label_data, normalization_parameters, allLabels)
800
823
 
801
824
  elif bm.validation_split:
802
825
  split = round(zsh * bm.validation_split)
803
- img_val = np.copy(img[split:], order='C')
804
- label_val = np.copy(label[split:], order='C')
805
- img = np.copy(img[:split], order='C')
806
- label = np.copy(label[:split], order='C')
807
- zsh, ysh, xsh, _ = img.shape
826
+ bm.val_img_data = bm.img_data[split:].copy()
827
+ bm.val_label_data = bm.label_data[split:].copy()
828
+ bm.img_data = bm.img_data[:split].copy()
829
+ bm.label_data = bm.label_data[:split].copy()
830
+ zsh, ysh, xsh, _ = bm.img_data.shape
808
831
 
809
832
  # list of IDs
810
833
  list_IDs_fg, list_IDs_bg = [], []
@@ -814,7 +837,7 @@ def train_semantic_segmentation(bm,
814
837
  for k in range(0, zsh-bm.z_patch+1, bm.stride_size):
815
838
  for l in range(0, ysh-bm.y_patch+1, bm.stride_size):
816
839
  for m in range(0, xsh-bm.x_patch+1, bm.stride_size):
817
- if np.any(label[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]):
840
+ if np.any(bm.label_data[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]):
818
841
  list_IDs_fg.append(k*ysh*xsh+l*xsh+m)
819
842
  else:
820
843
  list_IDs_bg.append(k*ysh*xsh+l*xsh+m)
@@ -823,17 +846,17 @@ def train_semantic_segmentation(bm,
823
846
  for l in range(0, ysh-bm.y_patch+1, bm.stride_size):
824
847
  for m in range(0, xsh-bm.x_patch+1, bm.stride_size):
825
848
  if bm.separation:
826
- centerLabel = label[k+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
827
- patch = label[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
849
+ centerLabel = bm.label_data[k+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
850
+ patch = bm.label_data[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
828
851
  if centerLabel>0 and np.any(patch!=centerLabel):
829
852
  list_IDs_fg.append(k*ysh*xsh+l*xsh+m)
830
853
  else:
831
854
  list_IDs_fg.append(k*ysh*xsh+l*xsh+m)
832
855
 
833
- if img_val is not None:
856
+ if bm.val_img_data is not None:
834
857
 
835
858
  # img_val shape
836
- zsh_val, ysh_val, xsh_val, _ = img_val.shape
859
+ zsh_val, ysh_val, xsh_val, _ = bm.val_img_data.shape
837
860
 
838
861
  # list of validation IDs
839
862
  list_IDs_val_fg, list_IDs_val_bg = [], []
@@ -843,7 +866,7 @@ def train_semantic_segmentation(bm,
843
866
  for k in range(0, zsh_val-bm.z_patch+1, bm.validation_stride_size):
844
867
  for l in range(0, ysh_val-bm.y_patch+1, bm.validation_stride_size):
845
868
  for m in range(0, xsh_val-bm.x_patch+1, bm.validation_stride_size):
846
- if np.any(label_val[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]):
869
+ if np.any(bm.val_label_data[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]):
847
870
  list_IDs_val_fg.append(k*ysh_val*xsh_val+l*xsh_val+m)
848
871
  else:
849
872
  list_IDs_val_bg.append(k*ysh_val*xsh_val+l*xsh_val+m)
@@ -852,8 +875,8 @@ def train_semantic_segmentation(bm,
852
875
  for l in range(0, ysh_val-bm.y_patch+1, bm.validation_stride_size):
853
876
  for m in range(0, xsh_val-bm.x_patch+1, bm.validation_stride_size):
854
877
  if bm.separation:
855
- centerLabel = label_val[k+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
856
- patch = label_val[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
878
+ centerLabel = bm.val_label_data[k+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
879
+ patch = bm.val_label_data[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
857
880
  if centerLabel>0 and np.any(patch!=centerLabel):
858
881
  list_IDs_val_fg.append(k*ysh_val*xsh_val+l*xsh_val+m)
859
882
  else:
@@ -877,18 +900,18 @@ def train_semantic_segmentation(bm,
877
900
 
878
901
  # data generator
879
902
  validation_generator = None
880
- training_generator = DataGenerator(img, label, list_IDs_fg, list_IDs_bg, True, True, False, **params)
881
- if img_val is not None:
903
+ training_generator = DataGenerator(bm.img_data, bm.label_data, list_IDs_fg, list_IDs_bg, True, True, False, **params)
904
+ if bm.val_img_data is not None:
882
905
  if bm.val_dice:
883
- val_metrics = Metrics(bm, img_val, label_val, list_IDs_val_fg, (zsh_val, ysh_val, xsh_val), nb_labels, False)
906
+ val_metrics = Metrics(bm, bm.val_img_data, bm.val_label_data, list_IDs_val_fg, (zsh_val, ysh_val, xsh_val), nb_labels, False)
884
907
  else:
885
908
  params['dim_img'] = (zsh_val, ysh_val, xsh_val)
886
909
  params['augment'] = (False, False, False, False, 0)
887
- validation_generator = DataGenerator(img_val, label_val, list_IDs_val_fg, list_IDs_val_bg, True, False, False, **params)
910
+ validation_generator = DataGenerator(bm.val_img_data, bm.val_label_data, list_IDs_val_fg, list_IDs_val_bg, True, False, False, **params)
888
911
 
889
912
  # monitor dice score on training data
890
913
  if bm.train_dice:
891
- train_metrics = Metrics(bm, img, label, list_IDs_fg, (zsh, ysh, xsh), nb_labels, True)
914
+ train_metrics = Metrics(bm, bm.img_data, bm.label_data, list_IDs_fg, (zsh, ysh, xsh), nb_labels, True)
892
915
 
893
916
  # create a MirroredStrategy
894
917
  cdo = tf.distribute.ReductionToOneDevice()
@@ -931,11 +954,11 @@ def train_semantic_segmentation(bm,
931
954
 
932
955
  # save meta data
933
956
  meta_data = MetaData(bm.path_to_model, configuration_data, allLabels,
934
- extension, header, bm.crop_data, bm.cropping_weights, bm.cropping_config,
957
+ bm.extension, bm.header, bm.crop_data, bm.cropping_weights, bm.cropping_config,
935
958
  normalization_parameters, bm.cropping_norm, bm.patch_normalization, bm.scaling)
936
959
 
937
960
  # model checkpoint
938
- if img_val is not None:
961
+ if bm.val_img_data is not None:
939
962
  if bm.val_dice:
940
963
  callbacks = [val_metrics, meta_data]
941
964
  else:
@@ -967,8 +990,8 @@ def train_semantic_segmentation(bm,
967
990
  workers=bm.workers)
968
991
 
969
992
  # save monitoring figure on train end
970
- if img_val is not None and not bm.val_dice:
971
- save_history(history.history, bm.path_to_model, False, bm.train_dice)
993
+ if bm.val_img_data is None or not bm.val_dice:
994
+ save_history(history.history, bm.path_to_model)
972
995
 
973
996
  def load_prediction_data(bm, channels, normalize, normalization_parameters,
974
997
  region_of_interest, img, img_header, load_blockwise=False, z=None):
@@ -982,6 +1005,8 @@ def load_prediction_data(bm, channels, normalize, normalization_parameters,
982
1005
  if img.shape[0] < bm.z_patch:
983
1006
  rest = bm.z_patch - img.shape[0]
984
1007
  tmp = imread(bm.path_to_image, key=range(len(tif.pages)-rest,len(tif.pages)))
1008
+ if len(tmp.shape)==2:
1009
+ tmp = tmp.reshape(1,tmp.shape[0],tmp.shape[1])
985
1010
  img = np.append(img, tmp[::-1], axis=0)
986
1011
  else:
987
1012
  img, img_header = load_data(bm.path_to_image, 'first_queue')
@@ -1072,10 +1097,7 @@ def gradient(volData):
1072
1097
  grad[grad>0]=1
1073
1098
  return grad
1074
1099
 
1075
- def predict_semantic_segmentation(bm,
1076
- header, img_header,
1077
- region_of_interest, extension, img_data,
1078
- channels, normalization_parameters):
1100
+ def predict_segmentation(bm, region_of_interest, channels, normalization_parameters):
1079
1101
 
1080
1102
  # initialize results
1081
1103
  results = {}
@@ -1161,8 +1183,8 @@ def predict_semantic_segmentation(bm,
1161
1183
  if not load_blockwise:
1162
1184
 
1163
1185
  # load prediction data
1164
- img, img_header, z_shape, y_shape, x_shape, region_of_interest, img_data = load_prediction_data(
1165
- bm, channels, bm.normalize, normalization_parameters, region_of_interest, img_data, img_header)
1186
+ img, bm.img_header, z_shape, y_shape, x_shape, region_of_interest, bm.img_data = load_prediction_data(
1187
+ bm, channels, bm.normalize, normalization_parameters, region_of_interest, bm.img_data, bm.img_header)
1166
1188
 
1167
1189
  # append ghost areas
1168
1190
  img, z_rest, y_rest, x_rest = append_ghost_areas(bm, img)
@@ -1229,11 +1251,11 @@ def predict_semantic_segmentation(bm,
1229
1251
  z_indices = range(0, zsh-bm.z_patch+1, bm.stride_size)
1230
1252
  for j, z in enumerate(z_indices):
1231
1253
 
1232
- # load blockwise
1254
+ # load blockwise from TIFF
1233
1255
  if load_blockwise:
1234
1256
  img, _, _, _, _, _, _ = load_prediction_data(bm,
1235
1257
  channels, bm.normalize, normalization_parameters,
1236
- region_of_interest, img_data, img_header, load_blockwise, z)
1258
+ region_of_interest, bm.img_data, bm.img_header, load_blockwise, z)
1237
1259
  img, _, _, _ = append_ghost_areas(bm, img)
1238
1260
 
1239
1261
  # list of IDs
@@ -1373,79 +1395,79 @@ def predict_semantic_segmentation(bm,
1373
1395
 
1374
1396
  # load header from file
1375
1397
  if bm.header_file and os.path.exists(bm.header_file):
1376
- _, header = load_data(bm.header_file)
1398
+ _, bm.header = load_data(bm.header_file)
1377
1399
  # update file extension
1378
- if header is not None and bm.path_to_image:
1379
- extension = os.path.splitext(bm.header_file)[1]
1380
- if extension == '.gz':
1381
- extension = '.nii.gz'
1382
- bm.path_to_final = os.path.splitext(bm.path_to_final)[0] + extension
1400
+ if bm.header is not None and bm.path_to_image:
1401
+ bm.extension = os.path.splitext(bm.header_file)[1]
1402
+ if bm.extension == '.gz':
1403
+ bm.extension = '.nii.gz'
1404
+ bm.path_to_final = os.path.splitext(bm.path_to_final)[0] + bm.extension
1383
1405
  if bm.django_env and not bm.remote and not bm.tarfile:
1384
1406
  bm.path_to_final = unique_file_path(bm.path_to_final)
1385
1407
 
1386
1408
  # handle amira header
1387
- if header is not None:
1388
- if extension == '.am':
1389
- header = set_image_dimensions(header[0], label)
1390
- if img_header is not None:
1409
+ if bm.header is not None:
1410
+ if bm.extension == '.am':
1411
+ bm.header = set_image_dimensions(bm.header[0], label)
1412
+ if bm.img_header is not None:
1391
1413
  try:
1392
- header = set_physical_size(header, img_header[0])
1414
+ bm.header = set_physical_size(bm.header, bm.img_header[0])
1393
1415
  except:
1394
1416
  pass
1395
- header = [header]
1417
+ bm.header = [bm.header]
1396
1418
  else:
1397
1419
  # build new header
1398
- if img_header is None:
1420
+ if bm.img_header is None:
1399
1421
  zsh, ysh, xsh = label.shape
1400
- img_header = sitk.Image(xsh, ysh, zsh, header.GetPixelID())
1422
+ bm.img_header = sitk.Image(xsh, ysh, zsh, bm.header.GetPixelID())
1401
1423
  # copy metadata
1402
- for key in header.GetMetaDataKeys():
1424
+ for key in bm.header.GetMetaDataKeys():
1403
1425
  if not (re.match(r'Segment\d+_Extent$', key) or key=='Segmentation_ConversionParameters'):
1404
- img_header.SetMetaData(key, header.GetMetaData(key))
1405
- header = img_header
1406
- results['header'] = header
1426
+ bm.img_header.SetMetaData(key, bm.header.GetMetaData(key))
1427
+ bm.header = bm.img_header
1428
+ results['header'] = bm.header
1407
1429
 
1408
1430
  # save result
1409
1431
  if bm.path_to_image:
1410
- save_data(bm.path_to_final, label, header=header, compress=bm.compression)
1432
+ save_data(bm.path_to_final, label, header=bm.header, compress=bm.compression)
1411
1433
 
1412
1434
  # paths to optional results
1413
- filename, extension = os.path.splitext(bm.path_to_final)
1414
- if extension == '.gz':
1415
- extension = '.nii.gz'
1435
+ filename, bm.extension = os.path.splitext(bm.path_to_final)
1436
+ if bm.extension == '.gz':
1437
+ bm.extension = '.nii.gz'
1416
1438
  filename = filename[:-4]
1417
- path_to_cleaned = filename + '.cleaned' + extension
1418
- path_to_filled = filename + '.filled' + extension
1419
- path_to_cleaned_filled = filename + '.cleaned.filled' + extension
1420
- path_to_refined = filename + '.refined' + extension
1421
- path_to_acwe = filename + '.acwe' + extension
1439
+ path_to_cleaned = filename + '.cleaned' + bm.extension
1440
+ path_to_filled = filename + '.filled' + bm.extension
1441
+ path_to_cleaned_filled = filename + '.cleaned.filled' + bm.extension
1442
+ path_to_refined = filename + '.refined' + bm.extension
1443
+ path_to_acwe = filename + '.acwe' + bm.extension
1422
1444
 
1423
1445
  # remove outliers
1424
1446
  if bm.clean:
1425
1447
  cleaned_result = clean(label, bm.clean)
1426
1448
  results['cleaned'] = cleaned_result
1427
1449
  if bm.path_to_image:
1428
- save_data(path_to_cleaned, cleaned_result, header=header, compress=bm.compression)
1450
+ save_data(path_to_cleaned, cleaned_result, header=bm.header, compress=bm.compression)
1429
1451
  if bm.fill:
1430
1452
  filled_result = clean(label, bm.fill)
1431
1453
  results['filled'] = filled_result
1432
1454
  if bm.path_to_image:
1433
- save_data(path_to_filled, filled_result, header=header, compress=bm.compression)
1455
+ save_data(path_to_filled, filled_result, header=bm.header, compress=bm.compression)
1434
1456
  if bm.clean and bm.fill:
1435
1457
  cleaned_filled_result = cleaned_result + (filled_result - label)
1436
1458
  results['cleaned_filled'] = cleaned_filled_result
1437
1459
  if bm.path_to_image:
1438
- save_data(path_to_cleaned_filled, cleaned_filled_result, header=header, compress=bm.compression)
1460
+ save_data(path_to_cleaned_filled, cleaned_filled_result, header=bm.header, compress=bm.compression)
1439
1461
 
1440
1462
  # post-processing with active contour
1441
1463
  if bm.acwe:
1442
- acwe_result = activeContour(img_data, label, bm.acwe_alpha, bm.acwe_smooth, bm.acwe_steps)
1443
- refined_result = activeContour(img_data, label, simple=True)
1464
+ acwe_result = activeContour(bm.img_data, label, bm.acwe_alpha, bm.acwe_smooth, bm.acwe_steps)
1465
+ refined_result = activeContour(bm.img_data, label, simple=True)
1444
1466
  results['acwe'] = acwe_result
1445
1467
  results['refined'] = refined_result
1446
1468
  if bm.path_to_image:
1447
- save_data(path_to_acwe, acwe_result, header=header, compress=bm.compression)
1448
- save_data(path_to_refined, refined_result, header=header, compress=bm.compression)
1469
+ save_data(path_to_acwe, acwe_result, header=bm.header, compress=bm.compression)
1470
+ save_data(path_to_refined, refined_result, header=bm.header, compress=bm.compression)
1449
1471
 
1450
1472
  return results, bm
1451
1473
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: biomedisa
3
- Version: 24.8.3
3
+ Version: 24.8.4
4
4
  Summary: Segmentation of 3D volumetric image data
5
5
  Author: Philipp Lösel
6
6
  Author-email: philipp.loesel@anu.edu.au
@@ -1,6 +1,6 @@
1
1
  biomedisa/__init__.py,sha256=hw4mzEjGFXm-vxus2DBfKFW0nKoG0ibL5SH6ShfchrY,1526
2
2
  biomedisa/__main__.py,sha256=a1--8vhtztWEloHVtbM43FZLCfrFo4BELgdsgtWE8ls,536
3
- biomedisa/deeplearning.py,sha256=ZjD_05UkWwu8hECV7a1YzP3DQ_6Y8r81GaAWEfx3XlQ,28564
3
+ biomedisa/deeplearning.py,sha256=UD4IeaxITLLqzoaQ1Ul5HI_bN8wINouOGxf14yZ7SWQ,28008
4
4
  biomedisa/interpolation.py,sha256=i10aqwEl-wsVU_nQ-zyubhAs27NSKF4ial7LyhaBLv0,17273
5
5
  biomedisa/mesh.py,sha256=8-iuVsrfW5JovaMrAez7qSxv1LCU3eiqOdik0s0DV1w,16062
6
6
  biomedisa/features/DataGenerator.py,sha256=MINc7emhELPxACOYdnLFHgh-RHUKODLH0DH7zz6u6mc,13116
@@ -12,10 +12,10 @@ biomedisa/features/active_contour.py,sha256=FnPuvYck_KL4xYRFqwzm4Grdm288EdlcLFt8
12
12
  biomedisa/features/assd.py,sha256=q9NUQXEoA4Pi3d8b5fmys615CWu06Sm0N9-OGwJOFnw,6537
13
13
  biomedisa/features/biomedisa_helper.py,sha256=oIsOmIJ8xUQVwgbzMgw65dXcZmzwePpFQoIdbgLTnF8,32727
14
14
  biomedisa/features/create_slices.py,sha256=uSDH1OcEYc5BFPZHSy3UpS4P2DuoVnxOZ-l7wmyT_Po,13108
15
- biomedisa/features/crop_helper.py,sha256=5do03z_DT13qWRR6IGtkFn4BNaw0mWmUXgg3Lk-U6QY,24597
15
+ biomedisa/features/crop_helper.py,sha256=qnMn0AnlkeaecHcDjr-7HywizVYue1kvNT9sADjWViU,23753
16
16
  biomedisa/features/curvop_numba.py,sha256=AjKQJcUBoURTB8pq1HmugQYpBwBELthhcEu51_r_xPI,7049
17
- biomedisa/features/django_env.py,sha256=pdiPcBpqu1BWuyvh-palIGVwHFaY-leQ4Gatlbm8hIg,8942
18
- biomedisa/features/keras_helper.py,sha256=bPA4wc2W3kPRLKd7qcHE6nXJ4sOm8lKHvCB5yC4NGOI,61587
17
+ biomedisa/features/django_env.py,sha256=LNrZ6rBHZ5I0FaWa5xN8K-ASPgq0r5dGDEUI56HzJxE,8615
18
+ biomedisa/features/keras_helper.py,sha256=Emf-Fx9rB3jECL7XgK8DYMRwO27ynijt5YdnwMlDMXY,62747
19
19
  biomedisa/features/nc_reader.py,sha256=RoRMwu3ELSNfoV3qZtaT2OWACnXb2EhNFu_DAF1T93o,7406
20
20
  biomedisa/features/pid.py,sha256=Jmn1VIp0fBlgBrqZ-yUIQVVb5-NAxNBdibXALVr2PPI,2545
21
21
  biomedisa/features/process_image.py,sha256=VtS3fGDvglqJiiJLPK1toe76J58j914NJ8XQKg3CRwo,11091
@@ -37,8 +37,8 @@ biomedisa/features/random_walk/pyopencl_large.py,sha256=q79AxG3p3qFjxfiAZfUK9I5B
37
37
  biomedisa/features/random_walk/pyopencl_small.py,sha256=opNlS-qzOa9qWafBNJdvf6r1aRAFf7_JXf6ISDnkdXE,17068
38
38
  biomedisa/features/random_walk/rw_large.py,sha256=ZnITvk00Y11ZZlGuBRaJO1EwU0wYBdEwdpj9vvXCqF4,19805
39
39
  biomedisa/features/random_walk/rw_small.py,sha256=RPzZe24YrEwYelJukDjvqaoD_SyhgdriEi7uV3kZGXI,14881
40
- biomedisa-24.8.3.dist-info/LICENSE,sha256=sehayP6UhydNnmstfL4yFR3genMRdpuUh6uZVWJN1H0,14152
41
- biomedisa-24.8.3.dist-info/METADATA,sha256=Kcp6gqeA_UYsYv7FNp4_3LjMaR_UIySrnwv2HGvpXA8,10569
42
- biomedisa-24.8.3.dist-info/WHEEL,sha256=cVxcB9AmuTcXqmwrtPhNK88dr7IR_b6qagTj0UvIEbY,91
43
- biomedisa-24.8.3.dist-info/top_level.txt,sha256=opsf1Eb4vCguPSxev4HHSeiUKCccT_C_RcUCdAYbHWQ,10
44
- biomedisa-24.8.3.dist-info/RECORD,,
40
+ biomedisa-24.8.4.dist-info/LICENSE,sha256=sehayP6UhydNnmstfL4yFR3genMRdpuUh6uZVWJN1H0,14152
41
+ biomedisa-24.8.4.dist-info/METADATA,sha256=uhD06s2HX6W6YXnqsa0NSvfUzhntO95T-JM_ZeX0UIM,10569
42
+ biomedisa-24.8.4.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
43
+ biomedisa-24.8.4.dist-info/top_level.txt,sha256=opsf1Eb4vCguPSxev4HHSeiUKCccT_C_RcUCdAYbHWQ,10
44
+ biomedisa-24.8.4.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (74.1.2)
2
+ Generator: setuptools (75.2.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5