biomedisa 24.8.5__py3-none-any.whl → 24.8.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -129,39 +129,25 @@ class DataGenerator(tf.keras.utils.Sequence):
129
129
  def __len__(self):
130
130
  'Denotes the number of batches per epoch'
131
131
  if len(self.list_IDs_bg) > 0:
132
- len_IDs = 2 * max(len(self.list_IDs_fg), len(self.list_IDs_bg))
132
+ len_IDs = max(len(self.list_IDs_fg), len(self.list_IDs_bg))
133
+ n_batches = len_IDs // (self.batch_size // 2)
133
134
  else:
134
135
  len_IDs = len(self.list_IDs_fg)
135
- n_batches = int(np.floor(len_IDs / self.batch_size))
136
+ n_batches = len_IDs // self.batch_size
136
137
  return n_batches
137
138
 
138
139
  def __getitem__(self, index):
139
140
  'Generate one batch of data'
140
-
141
141
  if len(self.list_IDs_bg) > 0:
142
-
143
- # len IDs
144
- len_IDs = max(len(self.list_IDs_fg), len(self.list_IDs_bg))
145
-
146
- # upsample lists of indexes to the same size
147
- repetitions = int(np.floor(len_IDs / len(self.list_IDs_fg))) + 1
148
- upsampled_indexes_fg = np.tile(self.indexes_fg, repetitions)
149
- upsampled_indexes_fg = upsampled_indexes_fg[:len_IDs]
150
-
151
- repetitions = int(np.floor(len_IDs / len(self.list_IDs_bg))) + 1
152
- upsampled_indexes_bg = np.tile(self.indexes_bg, repetitions)
153
- upsampled_indexes_bg = upsampled_indexes_bg[:len_IDs]
154
-
155
142
  # Generate indexes of the batch
156
- tmp_batch_size = int(self.batch_size / 2)
157
- indexes_fg = upsampled_indexes_fg[index*tmp_batch_size:(index+1)*tmp_batch_size]
158
- indexes_bg = upsampled_indexes_bg[index*tmp_batch_size:(index+1)*tmp_batch_size]
143
+ half_batch_size = self.batch_size // 2
144
+ indexes_fg = self.indexes_fg[index*half_batch_size:(index+1)*half_batch_size]
145
+ indexes_bg = self.indexes_bg[index*half_batch_size:(index+1)*half_batch_size]
159
146
 
160
147
  # Find list of IDs
161
148
  list_IDs_temp = [self.list_IDs_fg[k] for k in indexes_fg] + [self.list_IDs_bg[k] for k in indexes_bg]
162
149
 
163
150
  else:
164
-
165
151
  # Generate indexes of the batch
166
152
  indexes_fg = self.indexes_fg[index*self.batch_size:(index+1)*self.batch_size]
167
153
 
@@ -175,11 +161,22 @@ class DataGenerator(tf.keras.utils.Sequence):
175
161
 
176
162
  def on_epoch_end(self):
177
163
  'Updates indexes after each epoch'
178
- self.indexes_fg = np.arange(len(self.list_IDs_fg))
179
- self.indexes_bg = np.arange(len(self.list_IDs_bg))
164
+ if len(self.list_IDs_bg) > 0:
165
+ # upsample lists of indexes
166
+ indexes_fg = np.arange(len(self.list_IDs_fg))
167
+ indexes_bg = np.arange(len(self.list_IDs_bg))
168
+ len_IDs = max(len(self.list_IDs_fg), len(self.list_IDs_bg))
169
+ repetitions = len_IDs // len(self.list_IDs_fg) + 1
170
+ self.indexes_fg = np.tile(indexes_fg, repetitions)
171
+ repetitions = len_IDs // len(self.list_IDs_bg) + 1
172
+ self.indexes_bg = np.tile(indexes_bg, repetitions)
173
+ else:
174
+ self.indexes_fg = np.arange(len(self.list_IDs_fg))
175
+ # shuffle indexes
180
176
  if self.shuffle == True:
181
177
  np.random.shuffle(self.indexes_fg)
182
- np.random.shuffle(self.indexes_bg)
178
+ if len(self.list_IDs_bg) > 0:
179
+ np.random.shuffle(self.indexes_bg)
183
180
 
184
181
  def __data_generation(self, list_IDs_temp):
185
182
  'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
@@ -28,22 +28,25 @@
28
28
 
29
29
  import numpy as np
30
30
  import tensorflow as tf
31
- from scipy.ndimage import gaussian_filter, map_coordinates
31
+ from scipy.ndimage import gaussian_filter, map_coordinates, rotate
32
+ import random
32
33
 
33
34
  def elastic_transform(image, alpha=100, sigma=20):
34
- zsh, ysh, xsh = image.shape
35
+ ysh, xsh, csh = image.shape
35
36
  dx = gaussian_filter((np.random.rand(ysh, xsh) * 2 - 1) * alpha, sigma)
36
37
  dy = gaussian_filter((np.random.rand(ysh, xsh) * 2 - 1) * alpha, sigma)
37
38
  y, x = np.meshgrid(np.arange(ysh), np.arange(xsh), indexing='ij')
38
39
  indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
39
- for k in range(zsh):
40
- image[k] = map_coordinates(image[k], indices, order=1, mode='reflect').reshape(ysh, xsh)
40
+ for k in range(csh):
41
+ image[:,:,k] = map_coordinates(image[:,:,k], indices, order=0, mode='reflect').reshape(ysh, xsh)
41
42
  return image
42
43
 
43
44
  class DataGeneratorCrop(tf.keras.utils.Sequence):
44
45
  'Generates data for Keras'
45
- def __init__(self, img, label, list_IDs_fg, list_IDs_bg, batch_size=32, dim=(32,32,32),
46
- n_channels=3, n_classes=2, shuffle=True):
46
+ def __init__(self, img, label, list_IDs_fg, list_IDs_bg, batch_size=32,
47
+ dim=(32,32,32), n_channels=3, n_classes=2, shuffle=True,
48
+ augment=(False,False,False,0), train=True):
49
+
47
50
  'Initialization'
48
51
  self.dim = dim
49
52
  self.list_IDs_fg = list_IDs_fg
@@ -54,6 +57,8 @@ class DataGeneratorCrop(tf.keras.utils.Sequence):
54
57
  self.n_channels = n_channels
55
58
  self.n_classes = n_classes
56
59
  self.shuffle = shuffle
60
+ self.augment = augment
61
+ self.train = train
57
62
  self.on_epoch_end()
58
63
 
59
64
  def __len__(self):
@@ -108,14 +113,37 @@ class DataGeneratorCrop(tf.keras.utils.Sequence):
108
113
  def __data_generation(self, list_IDs_temp):
109
114
  'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
110
115
 
116
+ # get augmentation parameter
117
+ flip_x, flip_y, flip_z, rotation = self.augment
118
+ elastic = False
119
+
111
120
  # Initialization
112
121
  X = np.empty((self.batch_size, *self.dim, self.n_channels), dtype=np.uint8)
113
122
  y = np.empty((self.batch_size,), dtype=np.int32)
114
123
 
115
124
  # Generate data
116
125
  for i, ID in enumerate(list_IDs_temp):
117
- X[i,...] = self.img[ID,...]
118
- y[i] = self.label[ID]
126
+ tmp_X = self.img[ID,...].copy()
127
+
128
+ # augmentation
129
+ if self.train and (any(self.augment) or elastic):
130
+ if flip_x and np.random.randint(2) and abs(self.label[ID])!=3:
131
+ tmp_X = np.flip(tmp_X, 1)
132
+ if flip_y and np.random.randint(2):
133
+ if abs(self.label[ID])==1:
134
+ tmp_X = np.flip(tmp_X, 0)
135
+ elif abs(self.label[ID])==3:
136
+ tmp_X = np.flip(tmp_X, 1)
137
+ if flip_z and np.random.randint(2) and abs(self.label[ID])!=1:
138
+ tmp_X = np.flip(tmp_X, 0)
139
+ if rotation:
140
+ angle = random.uniform(-rotation, rotation)
141
+ tmp_X = rotate(tmp_X, angle, order=0, mode='reflect', reshape=False)
142
+ if elastic:
143
+ tmp_X = elastic_transform(tmp_X)
144
+
145
+ X[i,...] = tmp_X
146
+ y[i] = 0 if self.label[ID] < 0 else 1
119
147
 
120
148
  return X, y
121
149
 
@@ -44,6 +44,7 @@ import h5py
44
44
  import tarfile
45
45
  import matplotlib.pyplot as plt
46
46
  import tempfile
47
+ import copy
47
48
 
48
49
  class InputError(Exception):
49
50
  def __init__(self, message=None):
@@ -117,9 +118,12 @@ def load_cropping_training_data(normalize, img_list, label_list, x_scale, y_scal
117
118
  a = label_in
118
119
  a = a.astype(np.uint8)
119
120
  a = set_labels_to_zero(a, labels_to_compute, labels_to_remove)
120
- label_z = np.any(a,axis=(1,2))
121
- label_y = np.any(a,axis=(0,2))
122
- label_x = np.any(a,axis=(0,1))
121
+ label_z = np.any(a,axis=(1,2)).astype(np.int8) * 1
122
+ label_y = np.any(a,axis=(0,2)).astype(np.int8) * 2
123
+ label_x = np.any(a,axis=(0,1)).astype(np.int8) * 3
124
+ label_z[label_z==0] = -1
125
+ label_y[label_y==0] = -2
126
+ label_x[label_x==0] = -3
123
127
  label = np.append(label_z,label_y,axis=0)
124
128
  label = np.append(label,label_x,axis=0)
125
129
 
@@ -178,12 +182,15 @@ def load_cropping_training_data(normalize, img_list, label_list, x_scale, y_scal
178
182
  a = label_in[k]
179
183
  a = a.astype(np.uint8)
180
184
  a = set_labels_to_zero(a, labels_to_compute, labels_to_remove)
181
- next_label_z = np.any(a,axis=(1,2))
182
- next_label_y = np.any(a,axis=(0,2))
183
- next_label_x = np.any(a,axis=(0,1))
184
- label = np.append(label,next_label_z,axis=0)
185
- label = np.append(label,next_label_y,axis=0)
186
- label = np.append(label,next_label_x,axis=0)
185
+ label_z = np.any(a,axis=(1,2)).astype(np.int8) * 1
186
+ label_y = np.any(a,axis=(0,2)).astype(np.int8) * 2
187
+ label_x = np.any(a,axis=(0,1)).astype(np.int8) * 3
188
+ label_z[label_z==0] = -1
189
+ label_y[label_y==0] = -2
190
+ label_x[label_x==0] = -3
191
+ label = np.append(label,label_z,axis=0)
192
+ label = np.append(label,label_y,axis=0)
193
+ label = np.append(label,label_x,axis=0)
187
194
 
188
195
  # append image
189
196
  if any(img_list):
@@ -228,20 +235,20 @@ def load_cropping_training_data(normalize, img_list, label_list, x_scale, y_scal
228
235
  return img_rgb, label, normalization_parameters, channels
229
236
 
230
237
  def train_cropping(img, label, path_to_model, epochs, batch_size,
231
- validation_split, flip_x, flip_y, flip_z, rotate,
232
- img_val, label_val):
238
+ validation_split, flip_x, flip_y, flip_z, rotate,
239
+ img_val, label_val):
233
240
 
234
241
  # img shape
235
242
  zsh, ysh, xsh, channels = img.shape
236
243
 
237
244
  # list of IDs
238
- list_IDs_fg = list(np.where(label)[0])
239
- list_IDs_bg = list(np.where(label==False)[0])
245
+ list_IDs_fg = list(np.where(label>0)[0])
246
+ list_IDs_bg = list(np.where(label<0)[0])
240
247
 
241
248
  # validation data
242
249
  if np.any(img_val):
243
- list_IDs_val_fg = list(np.where(label_val)[0])
244
- list_IDs_val_bg = list(np.where(label_val==False)[0])
250
+ list_IDs_val_fg = list(np.where(label_val>0)[0])
251
+ list_IDs_val_bg = list(np.where(label_val<0)[0])
245
252
  elif validation_split:
246
253
  split_fg = int(len(list_IDs_fg) * validation_split)
247
254
  split_bg = int(len(list_IDs_bg) * validation_split)
@@ -287,14 +294,13 @@ def train_cropping(img, label, path_to_model, epochs, batch_size,
287
294
  'batch_size': batch_size,
288
295
  'n_classes': 2,
289
296
  'n_channels': channels,
290
- 'shuffle': True}
297
+ 'shuffle': True,
298
+ 'augment': (flip_x, flip_y, flip_z, rotate),
299
+ 'train': True}
291
300
 
292
301
  # validation parameters
293
- params_val = {'dim': (ysh, xsh),
294
- 'batch_size': batch_size,
295
- 'n_classes': 2,
296
- 'n_channels': channels,
297
- 'shuffle': False}
302
+ params_val = copy.deepcopy(params)
303
+ params_val['train'] = False
298
304
 
299
305
  # data generator
300
306
  training_generator = DataGeneratorCrop(img, label, list_IDs_fg, list_IDs_bg, **params)
@@ -497,7 +503,7 @@ def crop_volume(img, path_to_model, path_to_final, z_shape, y_shape, x_shape, ba
497
503
  # main functions
498
504
  #=====================
499
505
 
500
- def load_and_train(bm, x_scale=256, y_scale=256, z_scale=256):
506
+ def load_and_train(bm, x_scale=256, y_scale=256, z_scale=256, batch_size=24):
501
507
 
502
508
  # load training data
503
509
  img, label, normalization_parameters, channels = load_cropping_training_data(bm.normalize,
@@ -512,8 +518,8 @@ def load_and_train(bm, x_scale=256, y_scale=256, z_scale=256):
512
518
  bm.only, bm.ignore, bm.val_img_data, bm.val_label_data, normalization_parameters, channels)
513
519
 
514
520
  # train cropping
515
- train_cropping(img, label, bm.path_to_model, bm.cropping_epochs,
516
- bm.batch_size, bm.validation_split,
521
+ train_cropping(img, label, bm.path_to_model,
522
+ bm.cropping_epochs, batch_size, bm.validation_split,
517
523
  bm.flip_x, bm.flip_y, bm.flip_z, bm.rotate,
518
524
  img_val, label_val)
519
525
 
@@ -531,7 +537,7 @@ def load_and_train(bm, x_scale=256, y_scale=256, z_scale=256):
531
537
 
532
538
  return cropping_weights, cropping_config, normalization_parameters
533
539
 
534
- def crop_data(bm):
540
+ def crop_data(bm, batch_size=32):
535
541
 
536
542
  # get meta data
537
543
  hf = h5py.File(bm.path_to_model, 'r')
@@ -554,7 +560,7 @@ def crop_data(bm):
554
560
 
555
561
  # make prediction
556
562
  z_lower, z_upper, y_lower, y_upper, x_lower, x_upper, cropped_volume = crop_volume(img, bm.path_to_model,
557
- bm.path_to_cropped_image, z_shape, y_shape, x_shape, bm.batch_size, bm.debug_cropping, bm.save_cropped, bm.img_data)
563
+ bm.path_to_cropped_image, z_shape, y_shape, x_shape, batch_size, bm.debug_cropping, bm.save_cropped, bm.img_data)
558
564
 
559
565
  # region of interest
560
566
  region_of_interest = np.array([z_lower, z_upper, y_lower, y_upper, x_lower, x_upper])
@@ -400,6 +400,12 @@ def load_training_data(bm, img_list, label_list, channels, img_in=None, label_in
400
400
  label = label_in
401
401
  label_names = ['label_1']
402
402
  label_dim = label.shape
403
+ # no-scaling for list of images needs negative values as it encodes padded areas as -1
404
+ label_dtype = label.dtype
405
+ if label_dtype==np.uint8:
406
+ label_dtype = np.int16
407
+ elif label_dtype in [np.uint16, np.uint32]:
408
+ label_dtype = np.int32
403
409
  label = set_labels_to_zero(label, bm.only, bm.ignore)
404
410
  if any([bm.x_range, bm.y_range, bm.z_range]):
405
411
  if len(label_names)>1:
@@ -475,6 +481,11 @@ def load_training_data(bm, img_list, label_list, channels, img_in=None, label_in
475
481
  img[:,:,:,c] = (img[:,:,:,c] - mean) / std
476
482
  img[:,:,:,c] = img[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
477
483
 
484
+ # pad data
485
+ if not bm.scaling:
486
+ img_data_list = [img]
487
+ label_data_list = [label]
488
+
478
489
  # loop over list of images
479
490
  if any(img_list) or type(img_in) is list:
480
491
  number_of_images = len(img_names) if any(img_list) else len(img_in)
@@ -498,7 +509,9 @@ def load_training_data(bm, img_list, label_list, channels, img_in=None, label_in
498
509
  label_values, counts = np.unique(a, return_counts=True)
499
510
  print(f'{os.path.basename(label_names[k])}:', 'Labels:', label_values[1:], 'Sizes:', counts[1:])
500
511
  a = img_resize(a, bm.z_scale, bm.y_scale, bm.x_scale, labels=True)
501
- label = np.append(label, a, axis=0)
512
+ label = np.append(label, a, axis=0)
513
+ else:
514
+ label_data_list.append(a)
502
515
 
503
516
  # append image
504
517
  if any(img_list):
@@ -529,11 +542,33 @@ def load_training_data(bm, img_list, label_list, channels, img_in=None, label_in
529
542
  mean, std = np.mean(a[:,:,:,c]), np.std(a[:,:,:,c])
530
543
  a[:,:,:,c] = (a[:,:,:,c] - mean) / std
531
544
  a[:,:,:,c] = a[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
532
- img = np.append(img, a, axis=0)
545
+ if bm.scaling:
546
+ img = np.append(img, a, axis=0)
547
+ else:
548
+ img_data_list.append(a)
549
+
550
+ # pad and append data to a single volume
551
+ if not bm.scaling and len(img_data_list)>1:
552
+ target_y, target_x = 0, 0
553
+ for img in img_data_list:
554
+ target_y = max(target_y, img.shape[1])
555
+ target_x = max(target_x, img.shape[2])
556
+ img = np.empty((0, target_y, target_x, channels), dtype=np.float32)
557
+ label = np.empty((0, target_y, target_x), dtype=label_dtype)
558
+ for k in range(len(img_data_list)):
559
+ pad_y = target_y - img_data_list[k].shape[1]
560
+ pad_x = target_x - img_data_list[k].shape[2]
561
+ pad_width = [(0, 0), (0, pad_y), (0, pad_x), (0, 0)]
562
+ tmp = np.pad(img_data_list[k], pad_width, mode='constant', constant_values=0)
563
+ img = np.append(img, tmp, axis=0)
564
+ pad_width = [(0, 0), (0, pad_y), (0, pad_x)]
565
+ tmp = np.pad(label_data_list[k].astype(label_dtype), pad_width, mode='constant', constant_values=-1)
566
+ label = np.append(label, tmp, axis=0)
533
567
 
534
568
  # limit intensity range
535
- img[img<0] = 0
536
- img[img>1] = 1
569
+ if bm.normalize:
570
+ img[img<0] = 0
571
+ img[img>1] = 1
537
572
 
538
573
  if bm.separation:
539
574
  allLabels = np.array([0,1])
@@ -541,6 +576,8 @@ def load_training_data(bm, img_list, label_list, channels, img_in=None, label_in
541
576
  # get labels
542
577
  if allLabels is None:
543
578
  allLabels = np.unique(label)
579
+ index = np.argwhere(allLabels<0)
580
+ allLabels = np.delete(allLabels, index)
544
581
 
545
582
  # labels must be in ascending order
546
583
  for k, l in enumerate(allLabels):
@@ -833,25 +870,30 @@ def train_segmentation(bm):
833
870
  list_IDs_fg, list_IDs_bg = [], []
834
871
 
835
872
  # get IDs of patches
836
- if bm.balance:
837
- for k in range(0, zsh-bm.z_patch+1, bm.stride_size):
838
- for l in range(0, ysh-bm.y_patch+1, bm.stride_size):
839
- for m in range(0, xsh-bm.x_patch+1, bm.stride_size):
840
- if np.any(bm.label_data[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]):
841
- list_IDs_fg.append(k*ysh*xsh+l*xsh+m)
842
- else:
843
- list_IDs_bg.append(k*ysh*xsh+l*xsh+m)
844
- else:
845
- for k in range(0, zsh-bm.z_patch+1, bm.stride_size):
846
- for l in range(0, ysh-bm.y_patch+1, bm.stride_size):
847
- for m in range(0, xsh-bm.x_patch+1, bm.stride_size):
848
- if bm.separation:
849
- centerLabel = bm.label_data[k+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
850
- patch = bm.label_data[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
851
- if centerLabel>0 and np.any(patch!=centerLabel):
852
- list_IDs_fg.append(k*ysh*xsh+l*xsh+m)
873
+ for k in range(0, zsh-bm.z_patch+1, bm.stride_size):
874
+ for l in range(0, ysh-bm.y_patch+1, bm.stride_size):
875
+ for m in range(0, xsh-bm.x_patch+1, bm.stride_size):
876
+ patch = bm.label_data[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
877
+ index = k*ysh*xsh+l*xsh+m
878
+ if not np.any(patch==-1): # ignore padded areas
879
+ if bm.balance:
880
+ if bm.separation:
881
+ centerLabel = bm.label_data[k+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
882
+ if centerLabel>0 and np.any(np.logical_and(patch!=centerLabel, patch>0)):
883
+ list_IDs_fg.append(index)
884
+ elif centerLabel>0 and np.any(patch!=centerLabel):
885
+ list_IDs_bg.append(index)
886
+ elif np.any(patch>0):
887
+ list_IDs_fg.append(index)
888
+ else:
889
+ list_IDs_bg.append(index)
853
890
  else:
854
- list_IDs_fg.append(k*ysh*xsh+l*xsh+m)
891
+ if bm.separation:
892
+ centerLabel = bm.label_data[k+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
893
+ if centerLabel>0 and np.any(patch!=centerLabel):
894
+ list_IDs_fg.append(index)
895
+ else:
896
+ list_IDs_fg.append(index)
855
897
 
856
898
  if bm.val_img_data is not None:
857
899
 
@@ -862,25 +904,35 @@ def train_segmentation(bm):
862
904
  list_IDs_val_fg, list_IDs_val_bg = [], []
863
905
 
864
906
  # get validation IDs of patches
865
- if bm.balance and not bm.val_dice:
866
- for k in range(0, zsh_val-bm.z_patch+1, bm.validation_stride_size):
867
- for l in range(0, ysh_val-bm.y_patch+1, bm.validation_stride_size):
868
- for m in range(0, xsh_val-bm.x_patch+1, bm.validation_stride_size):
869
- if np.any(bm.val_label_data[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]):
870
- list_IDs_val_fg.append(k*ysh_val*xsh_val+l*xsh_val+m)
871
- else:
872
- list_IDs_val_bg.append(k*ysh_val*xsh_val+l*xsh_val+m)
873
- else:
874
- for k in range(0, zsh_val-bm.z_patch+1, bm.validation_stride_size):
875
- for l in range(0, ysh_val-bm.y_patch+1, bm.validation_stride_size):
876
- for m in range(0, xsh_val-bm.x_patch+1, bm.validation_stride_size):
877
- if bm.separation:
878
- centerLabel = bm.val_label_data[k+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
879
- patch = bm.val_label_data[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
880
- if centerLabel>0 and np.any(patch!=centerLabel):
881
- list_IDs_val_fg.append(k*ysh_val*xsh_val+l*xsh_val+m)
907
+ for k in range(0, zsh_val-bm.z_patch+1, bm.validation_stride_size):
908
+ for l in range(0, ysh_val-bm.y_patch+1, bm.validation_stride_size):
909
+ for m in range(0, xsh_val-bm.x_patch+1, bm.validation_stride_size):
910
+ patch = bm.val_label_data[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
911
+ index = k*ysh_val*xsh_val+l*xsh_val+m
912
+ if not np.any(patch==-1): # ignore padded areas
913
+ if bm.balance and not bm.val_dice:
914
+ if bm.separation:
915
+ centerLabel = bm.val_label_data[k+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
916
+ if centerLabel>0 and np.any(np.logical_and(patch!=centerLabel, patch>0)):
917
+ list_IDs_val_fg.append(index)
918
+ elif centerLabel>0 and np.any(patch!=centerLabel):
919
+ list_IDs_val_bg.append(index)
920
+ elif np.any(patch>0):
921
+ list_IDs_val_fg.append(index)
922
+ else:
923
+ list_IDs_val_bg.append(index)
882
924
  else:
883
- list_IDs_val_fg.append(k*ysh_val*xsh_val+l*xsh_val+m)
925
+ if bm.separation:
926
+ centerLabel = bm.val_label_data[k+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
927
+ if centerLabel>0 and np.any(patch!=centerLabel):
928
+ list_IDs_val_fg.append(index)
929
+ else:
930
+ list_IDs_val_fg.append(index)
931
+
932
+ # remove padding label
933
+ bm.label_data[bm.label_data<0]=0
934
+ if bm.val_img_data is not None:
935
+ bm.val_label_data[bm.val_label_data<0]=0
884
936
 
885
937
  # number of labels
886
938
  nb_labels = len(allLabels)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: biomedisa
3
- Version: 24.8.5
3
+ Version: 24.8.6
4
4
  Summary: Segmentation of 3D volumetric image data
5
5
  Author: Philipp Lösel
6
6
  Author-email: philipp.loesel@anu.edu.au
@@ -41,12 +41,12 @@ Biomedisa (https://biomedisa.info) is a free and easy-to-use open-source applica
41
41
  ## Installation (command-line based)
42
42
  + [Ubuntu 22.04 + Smart Interpolation](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_interpolation_cli.md)
43
43
  + [Ubuntu 22.04 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8_gpu_cli.md)
44
- + [Windows 10 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_cli.md)
44
+ + [Windows 10/11 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_cli.md)
45
45
  + [Windows (WSL) + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/windows_wsl.md)
46
46
 
47
47
  ## Installation (3D Slicer extension)
48
48
  + [Ubuntu 22.04 + Smart Interpolation + Deep Learning](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8_gpu_slicer.md)
49
- + [Windows 10 + Smart Interpolation](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_slicer.md)
49
+ + [Windows 10/11 + Smart Interpolation](https://github.com/biomedisa/biomedisa/blob/master/README/windows10_cuda_gpu_slicer.md)
50
50
 
51
51
  ## Installation (browser based)
52
52
  + [Ubuntu 22.04](https://github.com/biomedisa/biomedisa/blob/master/README/ubuntu2204_cuda11.8.md)
@@ -3,8 +3,8 @@ biomedisa/__main__.py,sha256=a1--8vhtztWEloHVtbM43FZLCfrFo4BELgdsgtWE8ls,536
3
3
  biomedisa/deeplearning.py,sha256=UD4IeaxITLLqzoaQ1Ul5HI_bN8wINouOGxf14yZ7SWQ,28008
4
4
  biomedisa/interpolation.py,sha256=i10aqwEl-wsVU_nQ-zyubhAs27NSKF4ial7LyhaBLv0,17273
5
5
  biomedisa/mesh.py,sha256=8-iuVsrfW5JovaMrAez7qSxv1LCU3eiqOdik0s0DV1w,16062
6
- biomedisa/features/DataGenerator.py,sha256=MINc7emhELPxACOYdnLFHgh-RHUKODLH0DH7zz6u6mc,13116
7
- biomedisa/features/DataGeneratorCrop.py,sha256=23R4Z-8tB1CsjYTYfhHGovlJpAny_q9OV9hq8kc2GJg,5454
6
+ biomedisa/features/DataGenerator.py,sha256=m7vsKkLhRsVF1BE3Y8YGVx-xx0DWjbBw_inIdZBq6pQ,13111
7
+ biomedisa/features/DataGeneratorCrop.py,sha256=KtGqNadghOd59wIU9hATM_5YgSks95rS1kJ2lsSSX7w,6612
8
8
  biomedisa/features/PredictDataGenerator.py,sha256=JH8SPGQm-Y7_Drec2fw3jBUupvpIkQ1FvkDXP7mUjDY,4074
9
9
  biomedisa/features/PredictDataGeneratorCrop.py,sha256=HF5tJbGtlJMHr7lMT9IiIdLG2CTjXstbKoOjlZJ93Is,3431
10
10
  biomedisa/features/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -12,10 +12,10 @@ biomedisa/features/active_contour.py,sha256=FnPuvYck_KL4xYRFqwzm4Grdm288EdlcLFt8
12
12
  biomedisa/features/assd.py,sha256=q9NUQXEoA4Pi3d8b5fmys615CWu06Sm0N9-OGwJOFnw,6537
13
13
  biomedisa/features/biomedisa_helper.py,sha256=oIsOmIJ8xUQVwgbzMgw65dXcZmzwePpFQoIdbgLTnF8,32727
14
14
  biomedisa/features/create_slices.py,sha256=uSDH1OcEYc5BFPZHSy3UpS4P2DuoVnxOZ-l7wmyT_Po,13108
15
- biomedisa/features/crop_helper.py,sha256=R0_uZ8rxg1l0-HxP1fYBWWCSUjvSvTBSgW47Nq8eK4s,23789
15
+ biomedisa/features/crop_helper.py,sha256=wNVbb1c6qNmJFbDJ2jrjjvqJw-EMLkJt1HLjEolMwAA,24089
16
16
  biomedisa/features/curvop_numba.py,sha256=AjKQJcUBoURTB8pq1HmugQYpBwBELthhcEu51_r_xPI,7049
17
17
  biomedisa/features/django_env.py,sha256=LNrZ6rBHZ5I0FaWa5xN8K-ASPgq0r5dGDEUI56HzJxE,8615
18
- biomedisa/features/keras_helper.py,sha256=Emf-Fx9rB3jECL7XgK8DYMRwO27ynijt5YdnwMlDMXY,62747
18
+ biomedisa/features/keras_helper.py,sha256=twNuZtgfE5Bcw790ZLv4Rw8xFgXaQQb4ciV9qGdMt9w,65209
19
19
  biomedisa/features/nc_reader.py,sha256=RoRMwu3ELSNfoV3qZtaT2OWACnXb2EhNFu_DAF1T93o,7406
20
20
  biomedisa/features/pid.py,sha256=Jmn1VIp0fBlgBrqZ-yUIQVVb5-NAxNBdibXALVr2PPI,2545
21
21
  biomedisa/features/process_image.py,sha256=VtS3fGDvglqJiiJLPK1toe76J58j914NJ8XQKg3CRwo,11091
@@ -37,8 +37,8 @@ biomedisa/features/random_walk/pyopencl_large.py,sha256=q79AxG3p3qFjxfiAZfUK9I5B
37
37
  biomedisa/features/random_walk/pyopencl_small.py,sha256=opNlS-qzOa9qWafBNJdvf6r1aRAFf7_JXf6ISDnkdXE,17068
38
38
  biomedisa/features/random_walk/rw_large.py,sha256=ZnITvk00Y11ZZlGuBRaJO1EwU0wYBdEwdpj9vvXCqF4,19805
39
39
  biomedisa/features/random_walk/rw_small.py,sha256=RPzZe24YrEwYelJukDjvqaoD_SyhgdriEi7uV3kZGXI,14881
40
- biomedisa-24.8.5.dist-info/LICENSE,sha256=sehayP6UhydNnmstfL4yFR3genMRdpuUh6uZVWJN1H0,14152
41
- biomedisa-24.8.5.dist-info/METADATA,sha256=IMCcEHNKZKWZqsMN8vu4h6S812_vkYyKj0NNAiebrrY,10569
42
- biomedisa-24.8.5.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
43
- biomedisa-24.8.5.dist-info/top_level.txt,sha256=opsf1Eb4vCguPSxev4HHSeiUKCccT_C_RcUCdAYbHWQ,10
44
- biomedisa-24.8.5.dist-info/RECORD,,
40
+ biomedisa-24.8.6.dist-info/LICENSE,sha256=sehayP6UhydNnmstfL4yFR3genMRdpuUh6uZVWJN1H0,14152
41
+ biomedisa-24.8.6.dist-info/METADATA,sha256=OKGPd1oxdzvG17a0ITHB7j4e51NjHpIel2tK7rYkU74,10575
42
+ biomedisa-24.8.6.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
43
+ biomedisa-24.8.6.dist-info/top_level.txt,sha256=opsf1Eb4vCguPSxev4HHSeiUKCccT_C_RcUCdAYbHWQ,10
44
+ biomedisa-24.8.6.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.2.0)
2
+ Generator: setuptools (75.6.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5