biomedisa 2024.5.18__py3-none-any.whl → 2024.5.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- biomedisa/biomedisa_features/biomedisa_helper.py +100 -110
- biomedisa/biomedisa_features/create_slices.py +49 -56
- biomedisa/biomedisa_features/crop_helper.py +107 -112
- biomedisa/biomedisa_features/keras_helper.py +137 -163
- biomedisa/deeplearning.py +78 -79
- {biomedisa-2024.5.18.dist-info → biomedisa-2024.5.19.dist-info}/METADATA +6 -7
- {biomedisa-2024.5.18.dist-info → biomedisa-2024.5.19.dist-info}/RECORD +10 -10
- {biomedisa-2024.5.18.dist-info → biomedisa-2024.5.19.dist-info}/LICENSE +0 -0
- {biomedisa-2024.5.18.dist-info → biomedisa-2024.5.19.dist-info}/WHEEL +0 -0
- {biomedisa-2024.5.18.dist-info → biomedisa-2024.5.19.dist-info}/top_level.txt +0 -0
@@ -60,13 +60,11 @@ import re
|
|
60
60
|
import time
|
61
61
|
import h5py
|
62
62
|
import atexit
|
63
|
-
import
|
63
|
+
import tempfile
|
64
64
|
|
65
65
|
class InputError(Exception):
|
66
|
-
def __init__(self, message=None
|
66
|
+
def __init__(self, message=None):
|
67
67
|
self.message = message
|
68
|
-
self.img_names = img_names
|
69
|
-
self.label_names = label_names
|
70
68
|
|
71
69
|
def save_history(history, path_to_model, val_dice, train_dice):
|
72
70
|
# summarize history for accuracy
|
@@ -307,37 +305,33 @@ def get_labels(arr, allLabels):
|
|
307
305
|
final[arr == k] = allLabels[k]
|
308
306
|
return final
|
309
307
|
|
310
|
-
def read_img_list(img_list, label_list):
|
308
|
+
def read_img_list(img_list, label_list, temp_img_dir, temp_label_dir):
|
311
309
|
# read filenames
|
312
|
-
InputError.img_names = []
|
313
|
-
InputError.label_names = []
|
314
310
|
img_names, label_names = [], []
|
315
311
|
for img_name, label_name in zip(img_list, label_list):
|
316
312
|
|
317
313
|
# check for tarball
|
318
314
|
img_dir, img_ext = os.path.splitext(img_name)
|
319
315
|
if img_ext == '.gz':
|
320
|
-
|
316
|
+
img_ext = os.path.splitext(img_dir)[1]
|
321
317
|
|
322
318
|
label_dir, label_ext = os.path.splitext(label_name)
|
323
319
|
if label_ext == '.gz':
|
324
|
-
|
320
|
+
label_ext = os.path.splitext(label_dir)[1]
|
325
321
|
|
326
322
|
if (img_ext == '.tar' and label_ext == '.tar') or (os.path.isdir(img_name) and os.path.isdir(label_name)):
|
327
323
|
|
328
324
|
# extract files
|
329
325
|
if img_ext == '.tar':
|
330
|
-
img_dir = BASE_DIR + '/tmp/' + id_generator(40)
|
331
326
|
tar = tarfile.open(img_name)
|
332
|
-
tar.extractall(path=
|
327
|
+
tar.extractall(path=temp_img_dir)
|
333
328
|
tar.close()
|
334
|
-
img_name =
|
329
|
+
img_name = temp_img_dir
|
335
330
|
if label_ext == '.tar':
|
336
|
-
label_dir = BASE_DIR + '/tmp/' + id_generator(40)
|
337
331
|
tar = tarfile.open(label_name)
|
338
|
-
tar.extractall(path=
|
332
|
+
tar.extractall(path=temp_label_dir)
|
339
333
|
tar.close()
|
340
|
-
label_name =
|
334
|
+
label_name = temp_label_dir
|
341
335
|
|
342
336
|
for data_type in ['.am','.tif','.tiff','.hdr','.mhd','.mha','.nrrd','.nii','.nii.gz','.zip','.mrc']:
|
343
337
|
img_names += [file for file in glob.glob(img_name+'/**/*'+data_type, recursive=True) if not os.path.basename(file).startswith('.')]
|
@@ -345,15 +339,11 @@ def read_img_list(img_list, label_list):
|
|
345
339
|
img_names = sorted(img_names)
|
346
340
|
label_names = sorted(label_names)
|
347
341
|
if len(img_names)==0 or len(label_names)==0 or len(img_names)!=len(label_names):
|
348
|
-
if img_ext == '.tar' and os.path.exists(img_name):
|
349
|
-
shutil.rmtree(img_name)
|
350
|
-
if label_ext == '.tar' and os.path.exists(label_name):
|
351
|
-
shutil.rmtree(label_name)
|
352
342
|
if len(img_names)!=len(label_names):
|
353
343
|
InputError.message = 'Number of image and label files must be the same'
|
354
|
-
elif img_ext == '.tar':
|
344
|
+
elif img_ext == '.tar' and len(img_names)==0:
|
355
345
|
InputError.message = 'Invalid image TAR file'
|
356
|
-
elif label_ext == '.tar':
|
346
|
+
elif label_ext == '.tar' and len(label_names)==0:
|
357
347
|
InputError.message = 'Invalid label TAR file'
|
358
348
|
elif len(img_names)==0:
|
359
349
|
InputError.message = 'Invalid image data'
|
@@ -365,165 +355,151 @@ def read_img_list(img_list, label_list):
|
|
365
355
|
label_names.append(label_name)
|
366
356
|
return img_names, label_names
|
367
357
|
|
368
|
-
def remove_extracted_data(img_list, label_list):
|
369
|
-
for img_name in img_list:
|
370
|
-
if BASE_DIR + '/tmp/' in img_name:
|
371
|
-
img_dir = img_name[:len(BASE_DIR + '/tmp/') + 40]
|
372
|
-
if os.path.exists(img_dir):
|
373
|
-
shutil.rmtree(img_dir)
|
374
|
-
for label_name in label_list:
|
375
|
-
if BASE_DIR + '/tmp/' in label_name:
|
376
|
-
label_dir = label_name[:len(BASE_DIR + '/tmp/') + 40]
|
377
|
-
if os.path.exists(label_dir):
|
378
|
-
shutil.rmtree(label_dir)
|
379
|
-
|
380
358
|
def load_training_data(normalize, img_list, label_list, channels, x_scale, y_scale, z_scale, no_scaling,
|
381
359
|
crop_data, labels_to_compute, labels_to_remove, img_in=None, label_in=None,
|
382
360
|
normalization_parameters=None, allLabels=None, header=None, extension='.tif',
|
383
361
|
x_puffer=25, y_puffer=25, z_puffer=25):
|
384
362
|
|
385
|
-
#
|
386
|
-
|
387
|
-
|
388
|
-
InputError.img_names = img_names
|
389
|
-
InputError.label_names = label_names
|
390
|
-
|
391
|
-
# load first label
|
392
|
-
if any(img_list):
|
393
|
-
label, header, extension = load_data(label_names[0], 'first_queue', True)
|
394
|
-
if label is None:
|
395
|
-
InputError.message = f'Invalid label data "{os.path.basename(label_names[0])}"'
|
396
|
-
raise InputError()
|
397
|
-
elif type(label_in) is list:
|
398
|
-
label = label_in[0]
|
399
|
-
else:
|
400
|
-
label = label_in
|
401
|
-
label_dim = label.shape
|
402
|
-
label = set_labels_to_zero(label, labels_to_compute, labels_to_remove)
|
403
|
-
label_values, counts = np.unique(label, return_counts=True)
|
404
|
-
print(f'{os.path.basename(label_names[0])}:', 'Labels:', label_values[1:], 'Sizes:', counts[1:])
|
405
|
-
if crop_data:
|
406
|
-
argmin_z,argmax_z,argmin_y,argmax_y,argmin_x,argmax_x = predict_blocksize(label, x_puffer, y_puffer, z_puffer)
|
407
|
-
label = np.copy(label[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x], order='C')
|
408
|
-
if not no_scaling:
|
409
|
-
label = img_resize(label, z_scale, y_scale, x_scale, labels=True)
|
410
|
-
|
411
|
-
# if header is not single data stream Amira Mesh falling back to Multi-TIFF
|
412
|
-
if extension != '.am':
|
413
|
-
if extension != '.tif':
|
414
|
-
print(f'Warning! Please use -hf or --header_file="path_to_training_label{extension}" for prediction to save your result as "{extension}"')
|
415
|
-
extension, header = '.tif', None
|
416
|
-
elif len(header) > 1:
|
417
|
-
print('Warning! Multiple data streams are not supported. Falling back to TIFF.')
|
418
|
-
extension, header = '.tif', None
|
419
|
-
else:
|
420
|
-
header = header[0]
|
421
|
-
|
422
|
-
# load first img
|
423
|
-
if any(img_list):
|
424
|
-
img, _ = load_data(img_names[0], 'first_queue')
|
425
|
-
if img is None:
|
426
|
-
InputError.message = f'Invalid image data "{os.path.basename(img_names[0])}"'
|
427
|
-
raise InputError()
|
428
|
-
elif type(img_in) is list:
|
429
|
-
img = img_in[0]
|
430
|
-
else:
|
431
|
-
img = img_in
|
432
|
-
if label_dim != img.shape:
|
433
|
-
InputError.message = f'Dimensions of "{os.path.basename(img_names[0])}" and "{os.path.basename(label_names[0])}" do not match'
|
434
|
-
raise InputError()
|
435
|
-
|
436
|
-
# ensure images have channels >=1
|
437
|
-
if len(img.shape)==3:
|
438
|
-
z_shape, y_shape, x_shape = img.shape
|
439
|
-
img = img.reshape(z_shape, y_shape, x_shape, 1)
|
440
|
-
if channels is None:
|
441
|
-
channels = img.shape[3]
|
442
|
-
if channels != img.shape[3]:
|
443
|
-
InputError.message = f'Number of channels must be {channels} for "{os.path.basename(img_names[0])}"'
|
444
|
-
raise InputError()
|
445
|
-
|
446
|
-
# crop data
|
447
|
-
if crop_data:
|
448
|
-
img = np.copy(img[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x], order='C')
|
363
|
+
# make temporary directories
|
364
|
+
with tempfile.TemporaryDirectory() as temp_img_dir:
|
365
|
+
with tempfile.TemporaryDirectory() as temp_label_dir:
|
449
366
|
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
img = img_resize(img, z_scale, y_scale, x_scale)
|
454
|
-
|
455
|
-
# normalize image data
|
456
|
-
for c in range(channels):
|
457
|
-
img[:,:,:,c] -= np.amin(img[:,:,:,c])
|
458
|
-
img[:,:,:,c] /= np.amax(img[:,:,:,c])
|
459
|
-
if normalization_parameters is None:
|
460
|
-
normalization_parameters = np.zeros((2,channels))
|
461
|
-
normalization_parameters[0,c] = np.mean(img[:,:,:,c])
|
462
|
-
normalization_parameters[1,c] = np.std(img[:,:,:,c])
|
463
|
-
elif normalize:
|
464
|
-
mean, std = np.mean(img[:,:,:,c]), np.std(img[:,:,:,c])
|
465
|
-
img[:,:,:,c] = (img[:,:,:,c] - mean) / std
|
466
|
-
img[:,:,:,c] = img[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
|
467
|
-
|
468
|
-
# loop over list of images
|
469
|
-
if any(img_list) or type(img_in) is list:
|
470
|
-
number_of_images = len(img_names) if any(img_list) else len(img_in)
|
471
|
-
|
472
|
-
for k in range(1, number_of_images):
|
367
|
+
# read image lists
|
368
|
+
if any(img_list):
|
369
|
+
img_names, label_names = read_img_list(img_list, label_list, temp_img_dir, temp_label_dir)
|
473
370
|
|
474
|
-
#
|
475
|
-
if any(
|
476
|
-
|
477
|
-
if
|
478
|
-
InputError.message = f'Invalid label data "{os.path.basename(label_names[
|
371
|
+
# load first label
|
372
|
+
if any(img_list):
|
373
|
+
label, header, extension = load_data(label_names[0], 'first_queue', True)
|
374
|
+
if label is None:
|
375
|
+
InputError.message = f'Invalid label data "{os.path.basename(label_names[0])}"'
|
479
376
|
raise InputError()
|
377
|
+
elif type(label_in) is list:
|
378
|
+
label = label_in[0]
|
480
379
|
else:
|
481
|
-
|
482
|
-
label_dim =
|
483
|
-
|
484
|
-
label_values, counts = np.unique(
|
485
|
-
print(f'{os.path.basename(label_names[
|
380
|
+
label = label_in
|
381
|
+
label_dim = label.shape
|
382
|
+
label = set_labels_to_zero(label, labels_to_compute, labels_to_remove)
|
383
|
+
label_values, counts = np.unique(label, return_counts=True)
|
384
|
+
print(f'{os.path.basename(label_names[0])}:', 'Labels:', label_values[1:], 'Sizes:', counts[1:])
|
486
385
|
if crop_data:
|
487
|
-
argmin_z,argmax_z,argmin_y,argmax_y,argmin_x,argmax_x = predict_blocksize(
|
488
|
-
|
386
|
+
argmin_z,argmax_z,argmin_y,argmax_y,argmin_x,argmax_x = predict_blocksize(label, x_puffer, y_puffer, z_puffer)
|
387
|
+
label = np.copy(label[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x], order='C')
|
489
388
|
if not no_scaling:
|
490
|
-
|
491
|
-
|
389
|
+
label = img_resize(label, z_scale, y_scale, x_scale, labels=True)
|
390
|
+
|
391
|
+
# if header is not single data stream Amira Mesh falling back to Multi-TIFF
|
392
|
+
if extension != '.am':
|
393
|
+
if extension != '.tif':
|
394
|
+
print(f'Warning! Please use -hf or --header_file="path_to_training_label{extension}" for prediction to save your result as "{extension}"')
|
395
|
+
extension, header = '.tif', None
|
396
|
+
elif len(header) > 1:
|
397
|
+
print('Warning! Multiple data streams are not supported. Falling back to TIFF.')
|
398
|
+
extension, header = '.tif', None
|
399
|
+
else:
|
400
|
+
header = header[0]
|
492
401
|
|
493
|
-
#
|
402
|
+
# load first img
|
494
403
|
if any(img_list):
|
495
|
-
|
496
|
-
if
|
497
|
-
InputError.message = f'Invalid image data "{os.path.basename(img_names[
|
404
|
+
img, _ = load_data(img_names[0], 'first_queue')
|
405
|
+
if img is None:
|
406
|
+
InputError.message = f'Invalid image data "{os.path.basename(img_names[0])}"'
|
498
407
|
raise InputError()
|
408
|
+
elif type(img_in) is list:
|
409
|
+
img = img_in[0]
|
499
410
|
else:
|
500
|
-
|
501
|
-
if label_dim !=
|
502
|
-
InputError.message = f'Dimensions of "{os.path.basename(img_names[
|
411
|
+
img = img_in
|
412
|
+
if label_dim != img.shape:
|
413
|
+
InputError.message = f'Dimensions of "{os.path.basename(img_names[0])}" and "{os.path.basename(label_names[0])}" do not match'
|
503
414
|
raise InputError()
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
415
|
+
|
416
|
+
# ensure images have channels >=1
|
417
|
+
if len(img.shape)==3:
|
418
|
+
z_shape, y_shape, x_shape = img.shape
|
419
|
+
img = img.reshape(z_shape, y_shape, x_shape, 1)
|
420
|
+
if channels is None:
|
421
|
+
channels = img.shape[3]
|
422
|
+
if channels != img.shape[3]:
|
423
|
+
InputError.message = f'Number of channels must be {channels} for "{os.path.basename(img_names[0])}"'
|
509
424
|
raise InputError()
|
425
|
+
|
426
|
+
# crop data
|
510
427
|
if crop_data:
|
511
|
-
|
512
|
-
|
428
|
+
img = np.copy(img[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x], order='C')
|
429
|
+
|
430
|
+
# scale/resize image data
|
431
|
+
img = img.astype(np.float32)
|
513
432
|
if not no_scaling:
|
514
|
-
|
433
|
+
img = img_resize(img, z_scale, y_scale, x_scale)
|
434
|
+
|
435
|
+
# normalize image data
|
515
436
|
for c in range(channels):
|
516
|
-
|
517
|
-
|
518
|
-
if
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
437
|
+
img[:,:,:,c] -= np.amin(img[:,:,:,c])
|
438
|
+
img[:,:,:,c] /= np.amax(img[:,:,:,c])
|
439
|
+
if normalization_parameters is None:
|
440
|
+
normalization_parameters = np.zeros((2,channels))
|
441
|
+
normalization_parameters[0,c] = np.mean(img[:,:,:,c])
|
442
|
+
normalization_parameters[1,c] = np.std(img[:,:,:,c])
|
443
|
+
elif normalize:
|
444
|
+
mean, std = np.mean(img[:,:,:,c]), np.std(img[:,:,:,c])
|
445
|
+
img[:,:,:,c] = (img[:,:,:,c] - mean) / std
|
446
|
+
img[:,:,:,c] = img[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
|
447
|
+
|
448
|
+
# loop over list of images
|
449
|
+
if any(img_list) or type(img_in) is list:
|
450
|
+
number_of_images = len(img_names) if any(img_list) else len(img_in)
|
451
|
+
|
452
|
+
for k in range(1, number_of_images):
|
453
|
+
|
454
|
+
# append label
|
455
|
+
if any(label_list):
|
456
|
+
a, _ = load_data(label_names[k], 'first_queue')
|
457
|
+
if a is None:
|
458
|
+
InputError.message = f'Invalid label data "{os.path.basename(label_names[k])}"'
|
459
|
+
raise InputError()
|
460
|
+
else:
|
461
|
+
a = label_in[k]
|
462
|
+
label_dim = a.shape
|
463
|
+
a = set_labels_to_zero(a, labels_to_compute, labels_to_remove)
|
464
|
+
label_values, counts = np.unique(a, return_counts=True)
|
465
|
+
print(f'{os.path.basename(label_names[k])}:', 'Labels:', label_values[1:], 'Sizes:', counts[1:])
|
466
|
+
if crop_data:
|
467
|
+
argmin_z,argmax_z,argmin_y,argmax_y,argmin_x,argmax_x = predict_blocksize(a, x_puffer, y_puffer, z_puffer)
|
468
|
+
a = np.copy(a[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x], order='C')
|
469
|
+
if not no_scaling:
|
470
|
+
a = img_resize(a, z_scale, y_scale, x_scale, labels=True)
|
471
|
+
label = np.append(label, a, axis=0)
|
472
|
+
|
473
|
+
# append image
|
474
|
+
if any(img_list):
|
475
|
+
a, _ = load_data(img_names[k], 'first_queue')
|
476
|
+
if a is None:
|
477
|
+
InputError.message = f'Invalid image data "{os.path.basename(img_names[k])}"'
|
478
|
+
raise InputError()
|
479
|
+
else:
|
480
|
+
a = img_in[k]
|
481
|
+
if label_dim != a.shape:
|
482
|
+
InputError.message = f'Dimensions of "{os.path.basename(img_names[k])}" and "{os.path.basename(label_names[k])}" do not match'
|
483
|
+
raise InputError()
|
484
|
+
if len(a.shape)==3:
|
485
|
+
z_shape, y_shape, x_shape = a.shape
|
486
|
+
a = a.reshape(z_shape, y_shape, x_shape, 1)
|
487
|
+
if a.shape[3] != channels:
|
488
|
+
InputError.message = f'Number of channels must be {channels} for "{os.path.basename(img_names[k])}"'
|
489
|
+
raise InputError()
|
490
|
+
if crop_data:
|
491
|
+
a = np.copy(a[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x], order='C')
|
492
|
+
a = a.astype(np.float32)
|
493
|
+
if not no_scaling:
|
494
|
+
a = img_resize(a, z_scale, y_scale, x_scale)
|
495
|
+
for c in range(channels):
|
496
|
+
a[:,:,:,c] -= np.amin(a[:,:,:,c])
|
497
|
+
a[:,:,:,c] /= np.amax(a[:,:,:,c])
|
498
|
+
if normalize:
|
499
|
+
mean, std = np.mean(a[:,:,:,c]), np.std(a[:,:,:,c])
|
500
|
+
a[:,:,:,c] = (a[:,:,:,c] - mean) / std
|
501
|
+
a[:,:,:,c] = a[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
|
502
|
+
img = np.append(img, a, axis=0)
|
527
503
|
|
528
504
|
# limit intensity range
|
529
505
|
img[img<0] = 0
|
@@ -973,8 +949,6 @@ def load_prediction_data(path_to_img, channels, x_scale, y_scale, z_scale,
|
|
973
949
|
# read image data
|
974
950
|
if img is None:
|
975
951
|
img, img_header = load_data(path_to_img, 'first_queue')
|
976
|
-
InputError.img_names = [path_to_img]
|
977
|
-
InputError.label_names = []
|
978
952
|
|
979
953
|
# verify validity
|
980
954
|
if img is None:
|
@@ -1147,7 +1121,7 @@ def predict_semantic_segmentation(bm, img, path_to_model,
|
|
1147
1121
|
if extension == '.gz':
|
1148
1122
|
extension = '.nii.gz'
|
1149
1123
|
bm.path_to_final = os.path.splitext(bm.path_to_final)[0] + extension
|
1150
|
-
if bm.django_env and not bm.remote and not bm.
|
1124
|
+
if bm.django_env and not bm.remote and not bm.tarfile:
|
1151
1125
|
bm.path_to_final = unique_file_path(bm.path_to_final)
|
1152
1126
|
|
1153
1127
|
# handle amira header
|
biomedisa/deeplearning.py
CHANGED
@@ -43,6 +43,7 @@ import h5py
|
|
43
43
|
import time
|
44
44
|
import subprocess
|
45
45
|
import glob
|
46
|
+
import tempfile
|
46
47
|
|
47
48
|
class Biomedisa(object):
|
48
49
|
pass
|
@@ -227,81 +228,83 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
|
|
227
228
|
crop_data = True if 'cropping_weights' in hf else False
|
228
229
|
hf.close()
|
229
230
|
|
230
|
-
#
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
#
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
if
|
262
|
-
filename =
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
bm
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
231
|
+
# make temporary directory
|
232
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
233
|
+
|
234
|
+
# extract image files from tar file
|
235
|
+
bm.tarfile = False
|
236
|
+
if bm.path_to_images is not None and (os.path.splitext(bm.path_to_images)[1]=='.tar' or bm.path_to_images[-7:]=='.tar.gz'):
|
237
|
+
bm.tarfile = True
|
238
|
+
path_to_result = os.path.dirname(bm.path_to_images) + '/final.'+os.path.basename(bm.path_to_images)
|
239
|
+
if path_to_result[-3:]=='.gz':
|
240
|
+
path_to_result = path_to_result[:-3]
|
241
|
+
if bm.django_env and not bm.remote:
|
242
|
+
path_to_result = unique_file_path(path_to_result)
|
243
|
+
tar = tarfile.open(bm.path_to_images)
|
244
|
+
tar.extractall(path=temp_dir)
|
245
|
+
tar.close()
|
246
|
+
bm.path_to_images = temp_dir
|
247
|
+
bm.save_cropped, bm.acwe = False, False
|
248
|
+
bm.clean, bm.fill = None, None
|
249
|
+
|
250
|
+
# list of images
|
251
|
+
path_to_finals = []
|
252
|
+
if bm.path_to_images is not None and os.path.isdir(bm.path_to_images):
|
253
|
+
files = glob.glob(bm.path_to_images+'/**/*', recursive=True)
|
254
|
+
bm.path_to_images = [f for f in files if os.path.isfile(f)]
|
255
|
+
else:
|
256
|
+
bm.path_to_images = [bm.path_to_images]
|
257
|
+
|
258
|
+
# loop over all images
|
259
|
+
for bm.path_to_image in bm.path_to_images:
|
260
|
+
|
261
|
+
# create path_to_final
|
262
|
+
if bm.path_to_image:
|
263
|
+
filename = os.path.basename(bm.path_to_image)
|
264
|
+
filename = os.path.splitext(filename)[0]
|
265
|
+
if filename[-4:] == '.nii':
|
266
|
+
filename = filename[:-4]
|
267
|
+
bm.path_to_cropped_image = os.path.dirname(bm.path_to_image) + '/' + filename + '.cropped.tif'
|
268
|
+
if bm.django_env and not bm.remote and not bm.tarfile:
|
269
|
+
bm.path_to_cropped_image = unique_file_path(bm.path_to_cropped_image)
|
270
|
+
filename = 'final.' + filename
|
271
|
+
bm.path_to_final = os.path.dirname(bm.path_to_image) + '/' + filename + extension
|
272
|
+
if bm.django_env and not bm.remote and not bm.tarfile:
|
273
|
+
bm.path_to_final = unique_file_path(bm.path_to_final)
|
274
|
+
|
275
|
+
# crop data
|
276
|
+
region_of_interest, cropped_volume = None, None
|
277
|
+
if crop_data:
|
278
|
+
region_of_interest, cropped_volume = ch.crop_data(bm.path_to_image, bm.path_to_model, bm.path_to_cropped_image,
|
279
|
+
bm.batch_size, bm.debug_cropping, bm.save_cropped, img_data, bm.x_range, bm.y_range, bm.z_range)
|
280
|
+
|
281
|
+
# load prediction data
|
282
|
+
img, img_header, z_shape, y_shape, x_shape, region_of_interest, img_data = load_prediction_data(bm.path_to_image,
|
283
|
+
channels, bm.x_scale, bm.y_scale, bm.z_scale, bm.no_scaling, normalize, normalization_parameters,
|
284
|
+
region_of_interest, img_data, img_header)
|
285
|
+
|
286
|
+
# make prediction
|
287
|
+
results, bm = predict_semantic_segmentation(bm, img, bm.path_to_model,
|
288
|
+
bm.z_patch, bm.y_patch, bm.x_patch, z_shape, y_shape, x_shape, bm.compression, header,
|
289
|
+
img_header, bm.stride_size, allLabels, bm.batch_size, region_of_interest,
|
290
|
+
bm.no_scaling, extension, img_data)
|
291
|
+
|
292
|
+
# results
|
293
|
+
if cropped_volume is not None:
|
294
|
+
results['cropped_volume'] = cropped_volume
|
295
|
+
|
296
|
+
# path to results
|
297
|
+
if bm.path_to_image:
|
298
|
+
path_to_finals.append(bm.path_to_final)
|
299
|
+
|
300
|
+
# write tar file and delete extracted image files
|
301
|
+
if bm.tarfile and os.path.exists(temp_dir):
|
302
|
+
with tarfile.open(path_to_result, 'w') as tar:
|
303
|
+
for file_path in path_to_finals:
|
304
|
+
file_name = os.path.basename(file_path)
|
305
|
+
tar.add(file_path, arcname=file_name)
|
306
|
+
bm.path_to_final = path_to_result
|
307
|
+
bm.path_to_cropped_image = None
|
305
308
|
|
306
309
|
# computation time
|
307
310
|
t = int(time.time() - TIC)
|
@@ -497,13 +500,9 @@ if __name__ == '__main__':
|
|
497
500
|
try:
|
498
501
|
deep_learning(None, **kwargs)
|
499
502
|
except InputError:
|
500
|
-
if any(InputError.img_names):
|
501
|
-
remove_extracted_data(InputError.img_names, InputError.label_names)
|
502
503
|
print(traceback.format_exc())
|
503
504
|
bm = _error_(bm, f'{InputError.message}')
|
504
505
|
except ch.InputError:
|
505
|
-
if any(ch.InputError.img_names):
|
506
|
-
remove_extracted_data(ch.InputError.img_names, ch.InputError.label_names)
|
507
506
|
print(traceback.format_exc())
|
508
507
|
bm = _error_(bm, f'{ch.InputError.message}')
|
509
508
|
except MemoryError:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: biomedisa
|
3
|
-
Version: 2024.5.
|
3
|
+
Version: 2024.5.19
|
4
4
|
Summary: Segmentation of 3D volumetric image data
|
5
5
|
Author: Philipp Lösel
|
6
6
|
Author-email: philipp.loesel@anu.edu.au
|
@@ -140,8 +140,7 @@ save_data('final.Head5.am', results['regular'], results['header'])
|
|
140
140
|
|
141
141
|
#### Command-line based (prediction)
|
142
142
|
```
|
143
|
-
|
144
|
-
python -m biomedisa.deeplearning C:\Users\%USERNAME%\Downloads\testing_axial_crop_pat13.nii.gz C:\Users\%USERNAME%\Downloads\heart.h5 -p -bs 6
|
143
|
+
python -m biomedisa.deeplearning C:\Users\%USERNAME%\Downloads\testing_axial_crop_pat13.nii.gz C:\Users\%USERNAME%\Downloads\heart.h5 -p
|
145
144
|
```
|
146
145
|
|
147
146
|
# Biomedisa Features
|
@@ -224,22 +223,22 @@ assd = ASSD(ground_truth, result)
|
|
224
223
|
```
|
225
224
|
|
226
225
|
# Update Biomedisa
|
227
|
-
If you installed Biomedisa via Pip
|
226
|
+
If you installed Biomedisa via Pip
|
228
227
|
```
|
229
228
|
pip install --upgrade biomedisa
|
230
229
|
```
|
231
|
-
If you used `git clone`, change to the Biomedisa directory and make a pull request
|
230
|
+
If you used `git clone`, change to the Biomedisa directory and make a pull request
|
232
231
|
```
|
233
232
|
cd git/biomedisa
|
234
233
|
git pull
|
235
234
|
```
|
236
235
|
|
237
|
-
If you installed the browser based version of Biomedisa (including MySQL database), you also need to update the database
|
236
|
+
If you installed the browser based version of Biomedisa (including MySQL database), you also need to update the database
|
238
237
|
```
|
239
238
|
python manage.py migrate
|
240
239
|
```
|
241
240
|
|
242
|
-
If you installed an [Apache Server](https://github.com/biomedisa/biomedisa/blob/master/README/APACHE_SERVER.md), you need to restart the server
|
241
|
+
If you installed an [Apache Server](https://github.com/biomedisa/biomedisa/blob/master/README/APACHE_SERVER.md), you need to restart the server
|
243
242
|
```
|
244
243
|
sudo service apache2 restart
|
245
244
|
```
|