biomedisa 2024.5.17__py3-none-any.whl → 2024.5.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -41,10 +41,10 @@ import cv2
41
41
  import time
42
42
  import zipfile
43
43
  import numba
44
- import shutil
45
44
  import subprocess
46
45
  import re
47
46
  import math
47
+ import tempfile
48
48
 
49
49
  def silent_remove(filename):
50
50
  try:
@@ -280,92 +280,85 @@ def load_data(path_to_data, process='None', return_extension=False):
280
280
  data, header = None, None
281
281
 
282
282
  elif extension == '.zip' or os.path.isdir(path_to_data):
283
- # extract files
284
- if extension=='.zip':
285
- path_to_dir = BASE_DIR + '/tmp/' + id_generator(40)
286
- try:
287
- zip_ref = zipfile.ZipFile(path_to_data, 'r')
288
- zip_ref.extractall(path=path_to_dir)
289
- zip_ref.close()
290
- except Exception as e:
291
- print(e)
292
- print('Using unzip package...')
293
- try:
294
- success = subprocess.Popen(['unzip',path_to_data,'-d',path_to_dir]).wait()
295
- if success != 0:
296
- if os.path.isdir(path_to_dir):
297
- shutil.rmtree(path_to_dir)
298
- data, header = None, None
299
- except Exception as e:
300
- print(e)
301
- if os.path.isdir(path_to_dir):
302
- shutil.rmtree(path_to_dir)
303
- data, header = None, None
304
- path_to_data = path_to_dir
305
-
306
- # load files
307
- if os.path.isdir(path_to_data):
308
- files = []
309
- for data_type in ['.[pP][nN][gG]','.[tT][iI][fF]','.[tT][iI][fF][fF]','.[dD][cC][mM]','.[dD][iI][cC][oO][mM]','.[bB][mM][pP]','.[jJ][pP][gG]','.[jJ][pP][eE][gG]','.nc','.nc.bz2']:
310
- files += [file for file in glob.glob(path_to_data+'/**/*'+data_type, recursive=True) if not os.path.basename(file).startswith('.')]
311
- nc_extension = False
312
- for file in files:
313
- if os.path.splitext(file)[1] == '.nc' or os.path.splitext(os.path.splitext(file)[0])[1] == '.nc':
314
- nc_extension = True
315
- if nc_extension:
283
+ with tempfile.TemporaryDirectory() as temp_dir:
284
+
285
+ # extract files
286
+ if extension=='.zip':
316
287
  try:
317
- data, header = nc_to_np(path_to_data)
288
+ zip_ref = zipfile.ZipFile(path_to_data, 'r')
289
+ zip_ref.extractall(path=temp_dir)
290
+ zip_ref.close()
318
291
  except Exception as e:
319
292
  print(e)
320
- data, header = None, None
321
- else:
322
- try:
323
- # remove unreadable files or directories
324
- for name in files:
325
- if os.path.isfile(name):
326
- try:
327
- img, _ = load(name)
328
- except:
293
+ print('Using unzip package...')
294
+ try:
295
+ success = subprocess.Popen(['unzip',path_to_data,'-d',temp_dir]).wait()
296
+ if success != 0:
297
+ data, header = None, None
298
+ except Exception as e:
299
+ print(e)
300
+ data, header = None, None
301
+ path_to_data = temp_dir
302
+
303
+ # load files
304
+ if os.path.isdir(path_to_data):
305
+ files = []
306
+ for data_type in ['.[pP][nN][gG]','.[tT][iI][fF]','.[tT][iI][fF][fF]','.[dD][cC][mM]','.[dD][iI][cC][oO][mM]','.[bB][mM][pP]','.[jJ][pP][gG]','.[jJ][pP][eE][gG]','.nc','.nc.bz2']:
307
+ files += [file for file in glob.glob(path_to_data+'/**/*'+data_type, recursive=True) if not os.path.basename(file).startswith('.')]
308
+ nc_extension = False
309
+ for file in files:
310
+ if os.path.splitext(file)[1] == '.nc' or os.path.splitext(os.path.splitext(file)[0])[1] == '.nc':
311
+ nc_extension = True
312
+ if nc_extension:
313
+ try:
314
+ data, header = nc_to_np(path_to_data)
315
+ except Exception as e:
316
+ print(e)
317
+ data, header = None, None
318
+ else:
319
+ try:
320
+ # remove unreadable files or directories
321
+ for name in files:
322
+ if os.path.isfile(name):
323
+ try:
324
+ img, _ = load(name)
325
+ except:
326
+ files.remove(name)
327
+ else:
329
328
  files.remove(name)
329
+ files.sort()
330
+
331
+ # get data size
332
+ img, _ = load(files[0])
333
+ if len(img.shape)==3:
334
+ ysh, xsh, csh = img.shape[0], img.shape[1], img.shape[2]
335
+ channel = 'last'
336
+ if ysh < csh:
337
+ csh, ysh, xsh = img.shape[0], img.shape[1], img.shape[2]
338
+ channel = 'first'
330
339
  else:
331
- files.remove(name)
332
- files.sort()
333
-
334
- # get data size
335
- img, _ = load(files[0])
336
- if len(img.shape)==3:
337
- ysh, xsh, csh = img.shape[0], img.shape[1], img.shape[2]
338
- channel = 'last'
339
- if ysh < csh:
340
- csh, ysh, xsh = img.shape[0], img.shape[1], img.shape[2]
341
- channel = 'first'
342
- else:
343
- ysh, xsh = img.shape[0], img.shape[1]
344
- csh, channel = 0, None
345
-
346
- # load data slice by slice
347
- data = np.empty((len(files), ysh, xsh), dtype=img.dtype)
348
- header, image_data_shape = [], []
349
- for k, file_name in enumerate(files):
350
- img, img_header = load(file_name)
351
- if csh==3:
352
- img = rgb2gray(img, channel)
353
- elif csh==1 and channel=='last':
354
- img = img[:,:,0]
355
- elif csh==1 and channel=='first':
356
- img = img[0,:,:]
357
- data[k] = img
358
- header.append(img_header)
359
- header = [header, files, data.dtype]
360
- data = np.swapaxes(data, 1, 2)
361
- data = np.copy(data, order='C')
362
- except Exception as e:
363
- print(e)
364
- data, header = None, None
365
-
366
- # remove extracted files
367
- if extension=='.zip' and os.path.isdir(path_to_data):
368
- shutil.rmtree(path_to_data)
340
+ ysh, xsh = img.shape[0], img.shape[1]
341
+ csh, channel = 0, None
342
+
343
+ # load data slice by slice
344
+ data = np.empty((len(files), ysh, xsh), dtype=img.dtype)
345
+ header, image_data_shape = [], []
346
+ for k, file_name in enumerate(files):
347
+ img, img_header = load(file_name)
348
+ if csh==3:
349
+ img = rgb2gray(img, channel)
350
+ elif csh==1 and channel=='last':
351
+ img = img[:,:,0]
352
+ elif csh==1 and channel=='first':
353
+ img = img[0,:,:]
354
+ data[k] = img
355
+ header.append(img_header)
356
+ header = [header, files, data.dtype]
357
+ data = np.swapaxes(data, 1, 2)
358
+ data = np.copy(data, order='C')
359
+ except Exception as e:
360
+ print(e)
361
+ data, header = None, None
369
362
 
370
363
  elif extension == '.mrc':
371
364
  try:
@@ -495,34 +488,31 @@ def save_data(path_to_final, final, header=None, final_image_type=None, compress
495
488
  simg.CopyInformation(header)
496
489
  sitk.WriteImage(simg, path_to_final, useCompression=compress)
497
490
  elif final_image_type in ['.zip', 'directory', '']:
498
- # make results directory
499
- if final_image_type == '.zip':
500
- results_dir = BASE_DIR + '/tmp/' + id_generator(40)
501
- os.makedirs(results_dir)
502
- os.chmod(results_dir, 0o770)
503
- else:
504
- results_dir = path_to_final
505
- if not os.path.isdir(results_dir):
506
- os.makedirs(results_dir)
507
- os.chmod(results_dir, 0o770)
508
- # save data as NC blocks
509
- if os.path.splitext(header[1][0])[1] == '.nc':
510
- np_to_nc(results_dir, final, header)
511
- file_names = header[1]
512
- # save data as PNG, TIF, DICOM slices
513
- else:
514
- header, file_names, final_dtype = header[0], header[1], header[2]
515
- final = final.astype(final_dtype)
516
- final = np.swapaxes(final, 2, 1)
517
- for k, file in enumerate(file_names):
518
- save(final[k], results_dir + '/' + os.path.basename(file), header[k])
519
- # zip data
520
- if final_image_type == '.zip':
521
- with zipfile.ZipFile(path_to_final, 'w') as zip:
522
- for file in file_names:
523
- zip.write(results_dir + '/' + os.path.basename(file), os.path.basename(file))
524
- if os.path.isdir(results_dir):
525
- shutil.rmtree(results_dir)
491
+ with tempfile.TemporaryDirectory() as temp_dir:
492
+ # make results directory
493
+ if final_image_type == '.zip':
494
+ results_dir = temp_dir
495
+ else:
496
+ results_dir = path_to_final
497
+ if not os.path.isdir(results_dir):
498
+ os.makedirs(results_dir)
499
+ os.chmod(results_dir, 0o770)
500
+ # save data as NC blocks
501
+ if os.path.splitext(header[1][0])[1] == '.nc':
502
+ np_to_nc(results_dir, final, header)
503
+ file_names = header[1]
504
+ # save data as PNG, TIF, DICOM slices
505
+ else:
506
+ header, file_names, final_dtype = header[0], header[1], header[2]
507
+ final = final.astype(final_dtype)
508
+ final = np.swapaxes(final, 2, 1)
509
+ for k, file in enumerate(file_names):
510
+ save(final[k], results_dir + '/' + os.path.basename(file), header[k])
511
+ # zip data
512
+ if final_image_type == '.zip':
513
+ with zipfile.ZipFile(path_to_final, 'w') as zip:
514
+ for file in file_names:
515
+ zip.write(results_dir + '/' + os.path.basename(file), os.path.basename(file))
526
516
  else:
527
517
  imageSize = int(final.nbytes * 10e-7)
528
518
  bigtiff = True if imageSize > 2000 else False
@@ -38,6 +38,7 @@ import numba
38
38
  import shutil
39
39
  import cv2
40
40
  import tarfile
41
+ import tempfile
41
42
 
42
43
  def unique(arr):
43
44
  arr = arr.astype(np.uint8)
@@ -110,37 +111,33 @@ def create_slices(path_to_data, path_to_label, on_site=False):
110
111
  # load data and reduce data size
111
112
  path_to_dir, extension = os.path.splitext(path_to_data)
112
113
  if extension == '.gz':
113
- path_to_dir, extension = os.path.splitext(path_to_dir)
114
+ extension = os.path.splitext(path_to_dir)[1]
114
115
  if extension == '.tar' or os.path.isdir(path_to_data):
115
- # extract files
116
- if extension == '.tar':
117
- path_to_dir = BASE_DIR + '/tmp/' + id_generator(40)
118
- tar = tarfile.open(path_to_data)
119
- tar.extractall(path=path_to_dir)
120
- tar.close()
121
- else:
122
- path_to_dir = path_to_data
123
- # load files
124
- img_names = []
125
- for data_type in ['.tif','.tiff','.am','.hdr','.mhd','.mha','.nrrd','.nii','.nii.gz','.zip','.mrc']:
126
- img_names.extend(glob(path_to_dir+'/**/*'+data_type, recursive=True))
127
- img_names = sorted(img_names)
128
- raw = None
129
- for name in img_names:
130
- arr, _ = load_data(name, 'create_slices')
131
- if arr is not None and raw is None:
132
- zsh, ysh, xsh = arr.shape
133
- scale = float(256) / float(max(zsh, ysh, xsh))
134
- z_scale = int(zsh * scale)
135
- y_scale = int(ysh * scale)
136
- x_scale = int(xsh * scale)
137
- raw = img_resize(arr, z_scale, y_scale, x_scale)
138
- elif arr is not None:
139
- arr = img_resize(arr, z_scale, y_scale, x_scale)
140
- raw = np.append(raw, arr, axis=0)
141
- # remove extracted files
142
- if extension == '.tar' and os.path.isdir(path_to_dir):
143
- shutil.rmtree(path_to_dir)
116
+ with tempfile.TemporaryDirectory() as temp_dir:
117
+ # extract files
118
+ if extension == '.tar':
119
+ tar = tarfile.open(path_to_data)
120
+ tar.extractall(path=temp_dir)
121
+ tar.close()
122
+ path_to_data = temp_dir
123
+ # load files
124
+ img_names = []
125
+ for data_type in ['.tif','.tiff','.am','.hdr','.mhd','.mha','.nrrd','.nii','.nii.gz','.zip','.mrc']:
126
+ img_names.extend(glob(path_to_data+'/**/*'+data_type, recursive=True))
127
+ img_names = sorted(img_names)
128
+ raw = None
129
+ for name in img_names:
130
+ arr, _ = load_data(name, 'create_slices')
131
+ if arr is not None and raw is None:
132
+ zsh, ysh, xsh = arr.shape
133
+ scale = float(256) / float(max(zsh, ysh, xsh))
134
+ z_scale = int(zsh * scale)
135
+ y_scale = int(ysh * scale)
136
+ x_scale = int(xsh * scale)
137
+ raw = img_resize(arr, z_scale, y_scale, x_scale)
138
+ elif arr is not None:
139
+ arr = img_resize(arr, z_scale, y_scale, x_scale)
140
+ raw = np.append(raw, arr, axis=0)
144
141
  else:
145
142
  raw, _ = load_data(path_to_data, 'create_slices')
146
143
  zsh, ysh, xsh = raw.shape
@@ -174,33 +171,29 @@ def create_slices(path_to_data, path_to_label, on_site=False):
174
171
  # load data
175
172
  path_to_dir, extension = os.path.splitext(path_to_label)
176
173
  if extension == '.gz':
177
- path_to_dir, extension = os.path.splitext(path_to_dir)
174
+ extension = os.path.splitext(path_to_dir)[1]
178
175
  if extension == '.tar' or os.path.isdir(path_to_label):
179
- # extract files
180
- if extension == '.tar':
181
- path_to_dir = BASE_DIR + '/tmp/' + id_generator(40)
182
- tar = tarfile.open(path_to_label)
183
- tar.extractall(path=path_to_dir)
184
- tar.close()
185
- else:
186
- path_to_dir = path_to_label
187
- # load files
188
- img_names = []
189
- for data_type in ['.tif','.tiff','.am','.hdr','.mhd','.mha','.nrrd','.nii','.nii.gz','.zip','.mrc']:
190
- img_names.extend(glob(path_to_dir+'/**/*'+data_type, recursive=True))
191
- img_names = sorted(img_names)
192
- # load and scale label data to corresponding img data
193
- mask = np.zeros((0, y_scale, x_scale), dtype=np.uint8)
194
- for name in img_names:
195
- arr, _ = load_data(name, 'create_slices')
196
- if arr is not None:
197
- arr = color_to_gray(arr)
198
- arr = arr.astype(np.uint8)
199
- arr = img_resize(arr, z_scale, y_scale, x_scale, labels=True)
200
- mask = np.append(mask, arr, axis=0)
201
- # remove extracted files
202
- if extension == '.tar' and os.path.isdir(path_to_dir):
203
- shutil.rmtree(path_to_dir)
176
+ with tempfile.TemporaryDirectory() as temp_dir:
177
+ # extract files
178
+ if extension == '.tar':
179
+ tar = tarfile.open(path_to_label)
180
+ tar.extractall(path=temp_dir)
181
+ tar.close()
182
+ path_to_label = temp_dir
183
+ # load files
184
+ img_names = []
185
+ for data_type in ['.tif','.tiff','.am','.hdr','.mhd','.mha','.nrrd','.nii','.nii.gz','.zip','.mrc']:
186
+ img_names.extend(glob(path_to_label+'/**/*'+data_type, recursive=True))
187
+ img_names = sorted(img_names)
188
+ # load and scale label data to corresponding img data
189
+ mask = np.zeros((0, y_scale, x_scale), dtype=np.uint8)
190
+ for name in img_names:
191
+ arr, _ = load_data(name, 'create_slices')
192
+ if arr is not None:
193
+ arr = color_to_gray(arr)
194
+ arr = arr.astype(np.uint8)
195
+ arr = img_resize(arr, z_scale, y_scale, x_scale, labels=True)
196
+ mask = np.append(mask, arr, axis=0)
204
197
  else:
205
198
  mask, _ = load_data(path_to_label, 'create_slices')
206
199
  mask = color_to_gray(mask)
@@ -27,7 +27,7 @@
27
27
  ##########################################################################
28
28
 
29
29
  import os
30
- from biomedisa_features.keras_helper import read_img_list, remove_extracted_data
30
+ from biomedisa_features.keras_helper import read_img_list
31
31
  from biomedisa_features.biomedisa_helper import img_resize, load_data, save_data, set_labels_to_zero
32
32
  from tensorflow.python.framework.errors_impl import ResourceExhaustedError
33
33
  from tensorflow.keras.applications import DenseNet121, densenet
@@ -43,12 +43,11 @@ from glob import glob
43
43
  import h5py
44
44
  import tarfile
45
45
  import matplotlib.pyplot as plt
46
+ import tempfile
46
47
 
47
48
  class InputError(Exception):
48
- def __init__(self, message=None, img_names=[], label_names=[]):
49
+ def __init__(self, message=None):
49
50
  self.message = message
50
- self.img_names = img_names
51
- self.label_names = label_names
52
51
 
53
52
  def save_history(history, path_to_model):
54
53
  # summarize history for accuracy
@@ -98,124 +97,122 @@ def make_densenet(inputshape):
98
97
  def load_cropping_training_data(normalize, img_list, label_list, x_scale, y_scale, z_scale,
99
98
  labels_to_compute, labels_to_remove, img_in, label_in, normalization_parameters=None, channels=None):
100
99
 
101
- # read image lists
102
- if any(img_list):
103
- img_names, label_names = read_img_list(img_list, label_list)
104
- InputError.img_names = img_names
105
- InputError.label_names = label_names
106
-
107
- # load first label
108
- if any(img_list):
109
- a, _, _ = load_data(label_names[0], 'first_queue', True)
110
- if a is None:
111
- InputError.message = f'Invalid label data "{os.path.basename(label_names[0])}"'
112
- raise InputError()
113
- elif type(label_in) is list:
114
- a = label_in[0]
115
- else:
116
- a = label_in
117
- a = a.astype(np.uint8)
118
- a = set_labels_to_zero(a, labels_to_compute, labels_to_remove)
119
- label_z = np.any(a,axis=(1,2))
120
- label_y = np.any(a,axis=(0,2))
121
- label_x = np.any(a,axis=(0,1))
122
- label = np.append(label_z,label_y,axis=0)
123
- label = np.append(label,label_x,axis=0)
124
-
125
- # load first img
126
- if any(img_list):
127
- img, _ = load_data(img_names[0], 'first_queue')
128
- if img is None:
129
- InputError.message = f'Invalid image data "{os.path.basename(img_names[0])}"'
130
- raise InputError()
131
- elif type(img_in) is list:
132
- img = img_in[0]
133
- else:
134
- img = img_in
135
- # handle all images having channels >=1
136
- if len(img.shape)==3:
137
- z_shape, y_shape, x_shape = img.shape
138
- img = img.reshape(z_shape, y_shape, x_shape, 1)
139
- if channels is None:
140
- channels = img.shape[3]
141
- if img.shape[3] != channels:
142
- InputError.message = f'Number of channels must be {channels} for "{os.path.basename(img_names[0])}"'
143
- raise InputError()
144
- img = img.astype(np.float32)
145
- img_z = img_resize(img, a.shape[0], y_scale, x_scale)
146
- img_y = np.swapaxes(img_resize(img, z_scale, a.shape[1], x_scale),0,1)
147
- img_x = np.swapaxes(img_resize(img, z_scale, y_scale, a.shape[2]),0,2)
148
- img = np.append(img_z,img_y,axis=0)
149
- img = np.append(img,img_x,axis=0)
100
+ # make temporary directories
101
+ with tempfile.TemporaryDirectory() as temp_img_dir:
102
+ with tempfile.TemporaryDirectory() as temp_label_dir:
150
103
 
151
- # normalize image data
152
- for c in range(channels):
153
- img[:,:,:,c] -= np.amin(img[:,:,:,c])
154
- img[:,:,:,c] /= np.amax(img[:,:,:,c])
155
- if normalization_parameters is None:
156
- normalization_parameters = np.zeros((2,channels))
157
- normalization_parameters[0,c] = np.mean(img[:,:,:,c])
158
- normalization_parameters[1,c] = np.std(img[:,:,:,c])
159
- elif normalize:
160
- mean, std = np.mean(img[:,:,:,c]), np.std(img[:,:,:,c])
161
- img[:,:,:,c] = (img[:,:,:,c] - mean) / std
162
- img[:,:,:,c] = img[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
163
-
164
- # loop over list of images
165
- if any(img_list) or type(img_in) is list:
166
- number_of_images = len(img_names) if any(img_list) else len(img_in)
167
-
168
- for k in range(1, number_of_images):
104
+ # read image lists
105
+ if any(img_list):
106
+ img_names, label_names = read_img_list(img_list, label_list, temp_img_dir, temp_label_dir)
169
107
 
170
- # append label
171
- if any(label_list):
172
- a, _ = load_data(label_names[k], 'first_queue')
108
+ # load first label
109
+ if any(img_list):
110
+ a, _, _ = load_data(label_names[0], 'first_queue', True)
173
111
  if a is None:
174
- InputError.message = f'Invalid label data "{os.path.basename(label_names[k])}"'
112
+ InputError.message = f'Invalid label data "{os.path.basename(label_names[0])}"'
175
113
  raise InputError()
114
+ elif type(label_in) is list:
115
+ a = label_in[0]
176
116
  else:
177
- a = label_in[k]
117
+ a = label_in
178
118
  a = a.astype(np.uint8)
179
119
  a = set_labels_to_zero(a, labels_to_compute, labels_to_remove)
180
- next_label_z = np.any(a,axis=(1,2))
181
- next_label_y = np.any(a,axis=(0,2))
182
- next_label_x = np.any(a,axis=(0,1))
183
- label = np.append(label,next_label_z,axis=0)
184
- label = np.append(label,next_label_y,axis=0)
185
- label = np.append(label,next_label_x,axis=0)
186
-
187
- # append image
120
+ label_z = np.any(a,axis=(1,2))
121
+ label_y = np.any(a,axis=(0,2))
122
+ label_x = np.any(a,axis=(0,1))
123
+ label = np.append(label_z,label_y,axis=0)
124
+ label = np.append(label,label_x,axis=0)
125
+
126
+ # load first img
188
127
  if any(img_list):
189
- a, _ = load_data(img_names[k], 'first_queue')
190
- if a is None:
191
- InputError.message = f'Invalid image data "{os.path.basename(img_names[k])}"'
128
+ img, _ = load_data(img_names[0], 'first_queue')
129
+ if img is None:
130
+ InputError.message = f'Invalid image data "{os.path.basename(img_names[0])}"'
192
131
  raise InputError()
132
+ elif type(img_in) is list:
133
+ img = img_in[0]
193
134
  else:
194
- a = img_in[k]
195
- if len(a.shape)==3:
196
- z_shape, y_shape, x_shape = a.shape
197
- a = a.reshape(z_shape, y_shape, x_shape, 1)
198
- if a.shape[3] != channels:
199
- InputError.message = f'Number of channels must be {channels} for "{os.path.basename(img_names[k])}"'
135
+ img = img_in
136
+ # handle all images having channels >=1
137
+ if len(img.shape)==3:
138
+ z_shape, y_shape, x_shape = img.shape
139
+ img = img.reshape(z_shape, y_shape, x_shape, 1)
140
+ if channels is None:
141
+ channels = img.shape[3]
142
+ if img.shape[3] != channels:
143
+ InputError.message = f'Number of channels must be {channels} for "{os.path.basename(img_names[0])}"'
200
144
  raise InputError()
201
- a = a.astype(np.float32)
202
- img_z = img_resize(a, a.shape[0], y_scale, x_scale)
203
- img_y = np.swapaxes(img_resize(a, z_scale, a.shape[1], x_scale),0,1)
204
- img_x = np.swapaxes(img_resize(a, z_scale, y_scale, a.shape[2]),0,2)
205
- next_img = np.append(img_z,img_y,axis=0)
206
- next_img = np.append(next_img,img_x,axis=0)
145
+ img = img.astype(np.float32)
146
+ img_z = img_resize(img, a.shape[0], y_scale, x_scale)
147
+ img_y = np.swapaxes(img_resize(img, z_scale, a.shape[1], x_scale),0,1)
148
+ img_x = np.swapaxes(img_resize(img, z_scale, y_scale, a.shape[2]),0,2)
149
+ img = np.append(img_z,img_y,axis=0)
150
+ img = np.append(img,img_x,axis=0)
151
+
152
+ # normalize image data
207
153
  for c in range(channels):
208
- next_img[:,:,:,c] -= np.amin(next_img[:,:,:,c])
209
- next_img[:,:,:,c] /= np.amax(next_img[:,:,:,c])
210
- if normalize:
211
- mean, std = np.mean(next_img[:,:,:,c]), np.std(next_img[:,:,:,c])
212
- next_img[:,:,:,c] = (next_img[:,:,:,c] - mean) / std
213
- next_img[:,:,:,c] = next_img[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
214
- img = np.append(img, next_img, axis=0)
215
-
216
- # remove extracted data
217
- if any(img_list):
218
- remove_extracted_data(img_names, label_names)
154
+ img[:,:,:,c] -= np.amin(img[:,:,:,c])
155
+ img[:,:,:,c] /= np.amax(img[:,:,:,c])
156
+ if normalization_parameters is None:
157
+ normalization_parameters = np.zeros((2,channels))
158
+ normalization_parameters[0,c] = np.mean(img[:,:,:,c])
159
+ normalization_parameters[1,c] = np.std(img[:,:,:,c])
160
+ elif normalize:
161
+ mean, std = np.mean(img[:,:,:,c]), np.std(img[:,:,:,c])
162
+ img[:,:,:,c] = (img[:,:,:,c] - mean) / std
163
+ img[:,:,:,c] = img[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
164
+
165
+ # loop over list of images
166
+ if any(img_list) or type(img_in) is list:
167
+ number_of_images = len(img_names) if any(img_list) else len(img_in)
168
+
169
+ for k in range(1, number_of_images):
170
+
171
+ # append label
172
+ if any(label_list):
173
+ a, _ = load_data(label_names[k], 'first_queue')
174
+ if a is None:
175
+ InputError.message = f'Invalid label data "{os.path.basename(label_names[k])}"'
176
+ raise InputError()
177
+ else:
178
+ a = label_in[k]
179
+ a = a.astype(np.uint8)
180
+ a = set_labels_to_zero(a, labels_to_compute, labels_to_remove)
181
+ next_label_z = np.any(a,axis=(1,2))
182
+ next_label_y = np.any(a,axis=(0,2))
183
+ next_label_x = np.any(a,axis=(0,1))
184
+ label = np.append(label,next_label_z,axis=0)
185
+ label = np.append(label,next_label_y,axis=0)
186
+ label = np.append(label,next_label_x,axis=0)
187
+
188
+ # append image
189
+ if any(img_list):
190
+ a, _ = load_data(img_names[k], 'first_queue')
191
+ if a is None:
192
+ InputError.message = f'Invalid image data "{os.path.basename(img_names[k])}"'
193
+ raise InputError()
194
+ else:
195
+ a = img_in[k]
196
+ if len(a.shape)==3:
197
+ z_shape, y_shape, x_shape = a.shape
198
+ a = a.reshape(z_shape, y_shape, x_shape, 1)
199
+ if a.shape[3] != channels:
200
+ InputError.message = f'Number of channels must be {channels} for "{os.path.basename(img_names[k])}"'
201
+ raise InputError()
202
+ a = a.astype(np.float32)
203
+ img_z = img_resize(a, a.shape[0], y_scale, x_scale)
204
+ img_y = np.swapaxes(img_resize(a, z_scale, a.shape[1], x_scale),0,1)
205
+ img_x = np.swapaxes(img_resize(a, z_scale, y_scale, a.shape[2]),0,2)
206
+ next_img = np.append(img_z,img_y,axis=0)
207
+ next_img = np.append(next_img,img_x,axis=0)
208
+ for c in range(channels):
209
+ next_img[:,:,:,c] -= np.amin(next_img[:,:,:,c])
210
+ next_img[:,:,:,c] /= np.amax(next_img[:,:,:,c])
211
+ if normalize:
212
+ mean, std = np.mean(next_img[:,:,:,c]), np.std(next_img[:,:,:,c])
213
+ next_img[:,:,:,c] = (next_img[:,:,:,c] - mean) / std
214
+ next_img[:,:,:,c] = next_img[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
215
+ img = np.append(img, next_img, axis=0)
219
216
 
220
217
  # limit intensity range
221
218
  img[img<0] = 0
@@ -359,8 +356,6 @@ def load_data_to_crop(path_to_img, channels, x_scale, y_scale, z_scale,
359
356
  # read image data
360
357
  if img is None:
361
358
  img, _, _ = load_data(path_to_img, 'first_queue', return_extension=True)
362
- InputError.img_names = [path_to_img]
363
- InputError.label_names = []
364
359
  img_data = np.copy(img, order='C')
365
360
  if img is None:
366
361
  InputError.message = "Invalid image data %s." %(os.path.basename(path_to_img))