biomedisa 24.8.1__tar.gz → 24.8.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {biomedisa-24.8.1 → biomedisa-24.8.2}/PKG-INFO +1 -1
- {biomedisa-24.8.1 → biomedisa-24.8.2}/pyproject.toml +1 -1
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/deeplearning.py +8 -1
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/DataGenerator.py +9 -1
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/keras_helper.py +126 -45
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa.egg-info/PKG-INFO +1 -1
- {biomedisa-24.8.1 → biomedisa-24.8.2}/LICENSE +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/README.md +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/setup.cfg +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/__init__.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/__main__.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/DataGeneratorCrop.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/PredictDataGenerator.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/PredictDataGeneratorCrop.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/__init__.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/active_contour.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/amira_to_np/__init__.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/amira_to_np/amira_data_stream.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/amira_to_np/amira_grammar.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/amira_to_np/amira_header.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/amira_to_np/amira_helper.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/assd.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/biomedisa_helper.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/create_slices.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/crop_helper.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/curvop_numba.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/django_env.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/nc_reader.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/pid.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/process_image.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/pycuda_test.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/random_walk/__init__.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/random_walk/gpu_kernels.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/random_walk/pycuda_large.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/random_walk/pycuda_large_allx.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/random_walk/pycuda_small.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/random_walk/pycuda_small_allx.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/random_walk/pyopencl_large.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/random_walk/pyopencl_small.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/random_walk/rw_large.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/random_walk/rw_small.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/remove_outlier.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/split_volume.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/interpolation.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/mesh.py +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa.egg-info/SOURCES.txt +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa.egg-info/dependency_links.txt +0 -0
- {biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa.egg-info/top_level.txt +0 -0
@@ -76,7 +76,8 @@ def deep_learning(img_data, label_data=None, val_img_data=None, val_label_data=N
|
|
76
76
|
path=None, success=True, return_probs=False, patch_normalization=False,
|
77
77
|
z_patch=64, y_patch=64, x_patch=64, path_to_logfile=None, img_id=None, label_id=None,
|
78
78
|
remote=False, queue=0, username=None, shortfilename=None, dice_loss=False,
|
79
|
-
acwe=False, acwe_alpha=1.0, acwe_smooth=1, acwe_steps=3, clean=None, fill=None
|
79
|
+
acwe=False, acwe_alpha=1.0, acwe_smooth=1, acwe_steps=3, clean=None, fill=None,
|
80
|
+
separation=False, mask=None, refinement=False):
|
80
81
|
|
81
82
|
# create biomedisa
|
82
83
|
bm = Biomedisa()
|
@@ -479,6 +480,12 @@ if __name__ == '__main__':
|
|
479
480
|
help='Processing queue when using a remote server')
|
480
481
|
parser.add_argument('-hf','--header_file', type=str, metavar='PATH', default=None,
|
481
482
|
help='Location of header file')
|
483
|
+
parser.add_argument('-s','--separation', action='store_true', default=False,
|
484
|
+
help='Instance segmentation of objects such as cells or rock particles')
|
485
|
+
parser.add_argument('-m','--mask', type=str, metavar='PATH', default=None,
|
486
|
+
help='Location of mask')
|
487
|
+
parser.add_argument('-rf','--refinement', action='store_true', default=False,
|
488
|
+
help='Refine segmentation on full size data')
|
482
489
|
parser.add_argument('-ext','--extension', type=str, default='.tif',
|
483
490
|
help='Save data for example as NRRD file using --extension=".nrrd"')
|
484
491
|
bm = parser.parse_args()
|
@@ -107,7 +107,7 @@ def random_rotation_3d(image, max_angle=180):
|
|
107
107
|
class DataGenerator(tf.keras.utils.Sequence):
|
108
108
|
'Generates data for Keras'
|
109
109
|
def __init__(self, img, label, list_IDs_fg, list_IDs_bg, shuffle, train, classification, batch_size=32, dim=(32,32,32),
|
110
|
-
dim_img=(32,32,32), n_classes=10, n_channels=1, augment=(False,False,False,False,0), patch_normalization=False):
|
110
|
+
dim_img=(32,32,32), n_classes=10, n_channels=1, augment=(False,False,False,False,0), patch_normalization=False, separation=False):
|
111
111
|
'Initialization'
|
112
112
|
self.dim = dim
|
113
113
|
self.dim_img = dim_img
|
@@ -124,6 +124,7 @@ class DataGenerator(tf.keras.utils.Sequence):
|
|
124
124
|
self.classification = classification
|
125
125
|
self.on_epoch_end()
|
126
126
|
self.patch_normalization = patch_normalization
|
127
|
+
self.separation = separation
|
127
128
|
|
128
129
|
def __len__(self):
|
129
130
|
'Denotes the number of batches per epoch'
|
@@ -247,6 +248,13 @@ class DataGenerator(tf.keras.utils.Sequence):
|
|
247
248
|
tmp_X = self.img[k:k+self.dim[0],l:l+self.dim[1],m:m+self.dim[2]]
|
248
249
|
tmp_y = self.label[k:k+self.dim[0],l:l+self.dim[1],m:m+self.dim[2]]
|
249
250
|
|
251
|
+
# center label gets value 1
|
252
|
+
if self.separation:
|
253
|
+
centerLabel = tmp_y[self.dim[0]//2,self.dim[1]//2,self.dim[2]//2]
|
254
|
+
tmp_y = tmp_y.copy()
|
255
|
+
tmp_y[tmp_y!=centerLabel]=0
|
256
|
+
tmp_y[tmp_y==centerLabel]=1
|
257
|
+
|
250
258
|
# augmentation
|
251
259
|
if self.train:
|
252
260
|
|
@@ -355,7 +355,7 @@ def read_img_list(img_list, label_list, temp_img_dir, temp_label_dir):
|
|
355
355
|
label_names.append(label_name)
|
356
356
|
return img_names, label_names
|
357
357
|
|
358
|
-
def load_training_data(normalize, img_list, label_list, channels, x_scale, y_scale, z_scale, scaling,
|
358
|
+
def load_training_data(bm, normalize, img_list, label_list, channels, x_scale, y_scale, z_scale, scaling,
|
359
359
|
crop_data, labels_to_compute, labels_to_remove, img_in=None, label_in=None,
|
360
360
|
normalization_parameters=None, allLabels=None, header=None, extension='.tif',
|
361
361
|
x_puffer=25, y_puffer=25, z_puffer=25):
|
@@ -382,8 +382,9 @@ def load_training_data(normalize, img_list, label_list, channels, x_scale, y_sca
|
|
382
382
|
label_names = ['label_1']
|
383
383
|
label_dim = label.shape
|
384
384
|
label = set_labels_to_zero(label, labels_to_compute, labels_to_remove)
|
385
|
-
|
386
|
-
|
385
|
+
if scaling:
|
386
|
+
label_values, counts = np.unique(label, return_counts=True)
|
387
|
+
print(f'{os.path.basename(label_names[0])}:', 'Labels:', label_values[1:], 'Sizes:', counts[1:])
|
387
388
|
if crop_data:
|
388
389
|
argmin_z,argmax_z,argmin_y,argmax_y,argmin_x,argmax_x = predict_blocksize(label, x_puffer, y_puffer, z_puffer)
|
389
390
|
label = np.copy(label[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x], order='C')
|
@@ -465,8 +466,9 @@ def load_training_data(normalize, img_list, label_list, channels, x_scale, y_sca
|
|
465
466
|
a = label_in[k]
|
466
467
|
label_dim = a.shape
|
467
468
|
a = set_labels_to_zero(a, labels_to_compute, labels_to_remove)
|
468
|
-
|
469
|
-
|
469
|
+
if scaling:
|
470
|
+
label_values, counts = np.unique(a, return_counts=True)
|
471
|
+
print(f'{os.path.basename(label_names[k])}:', 'Labels:', label_values[1:], 'Sizes:', counts[1:])
|
470
472
|
if crop_data:
|
471
473
|
argmin_z,argmax_z,argmin_y,argmax_y,argmin_x,argmax_x = predict_blocksize(a, x_puffer, y_puffer, z_puffer)
|
472
474
|
a = np.copy(a[argmin_z:argmax_z,argmin_y:argmax_y,argmin_x:argmax_x], order='C')
|
@@ -509,13 +511,16 @@ def load_training_data(normalize, img_list, label_list, channels, x_scale, y_sca
|
|
509
511
|
img[img<0] = 0
|
510
512
|
img[img>1] = 1
|
511
513
|
|
512
|
-
|
513
|
-
|
514
|
-
|
514
|
+
if bm.separation:
|
515
|
+
allLabels = np.array([0,1])
|
516
|
+
else:
|
517
|
+
# get labels
|
518
|
+
if allLabels is None:
|
519
|
+
allLabels = np.unique(label)
|
515
520
|
|
516
|
-
|
517
|
-
|
518
|
-
|
521
|
+
# labels must be in ascending order
|
522
|
+
for k, l in enumerate(allLabels):
|
523
|
+
label[label==l] = k
|
519
524
|
|
520
525
|
return img, label, allLabels, normalization_parameters, header, extension, channels
|
521
526
|
|
@@ -775,7 +780,7 @@ def train_semantic_segmentation(bm,
|
|
775
780
|
header=None, extension='.tif'):
|
776
781
|
|
777
782
|
# training data
|
778
|
-
img, label, allLabels, normalization_parameters, header, extension, bm.channels = load_training_data(bm.normalize,
|
783
|
+
img, label, allLabels, normalization_parameters, header, extension, bm.channels = load_training_data(bm, bm.normalize,
|
779
784
|
img_list, label_list, None, bm.x_scale, bm.y_scale, bm.z_scale, bm.scaling, bm.crop_data,
|
780
785
|
bm.only, bm.ignore, img, label, None, None, header, extension)
|
781
786
|
|
@@ -789,7 +794,7 @@ def train_semantic_segmentation(bm,
|
|
789
794
|
|
790
795
|
# validation data
|
791
796
|
if any(val_img_list) or img_val is not None:
|
792
|
-
img_val, label_val, _, _, _, _, _ = load_training_data(bm.normalize,
|
797
|
+
img_val, label_val, _, _, _, _, _ = load_training_data(bm, bm.normalize,
|
793
798
|
val_img_list, val_label_list, bm.channels, bm.x_scale, bm.y_scale, bm.z_scale, bm.scaling, bm.crop_data,
|
794
799
|
bm.only, bm.ignore, img_val, label_val, normalization_parameters, allLabels)
|
795
800
|
|
@@ -817,7 +822,13 @@ def train_semantic_segmentation(bm,
|
|
817
822
|
for k in range(0, zsh-bm.z_patch+1, bm.stride_size):
|
818
823
|
for l in range(0, ysh-bm.y_patch+1, bm.stride_size):
|
819
824
|
for m in range(0, xsh-bm.x_patch+1, bm.stride_size):
|
820
|
-
|
825
|
+
if bm.separation:
|
826
|
+
centerLabel = label[k+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
|
827
|
+
patch = label[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
|
828
|
+
if centerLabel>0 and np.any(patch!=centerLabel):
|
829
|
+
list_IDs_fg.append(k*ysh*xsh+l*xsh+m)
|
830
|
+
else:
|
831
|
+
list_IDs_fg.append(k*ysh*xsh+l*xsh+m)
|
821
832
|
|
822
833
|
if img_val is not None:
|
823
834
|
|
@@ -840,7 +851,13 @@ def train_semantic_segmentation(bm,
|
|
840
851
|
for k in range(0, zsh_val-bm.z_patch+1, bm.validation_stride_size):
|
841
852
|
for l in range(0, ysh_val-bm.y_patch+1, bm.validation_stride_size):
|
842
853
|
for m in range(0, xsh_val-bm.x_patch+1, bm.validation_stride_size):
|
843
|
-
|
854
|
+
if bm.separation:
|
855
|
+
centerLabel = label_val[k+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
|
856
|
+
patch = label_val[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
|
857
|
+
if centerLabel>0 and np.any(patch!=centerLabel):
|
858
|
+
list_IDs_val_fg.append(k*ysh_val*xsh_val+l*xsh_val+m)
|
859
|
+
else:
|
860
|
+
list_IDs_val_fg.append(k*ysh_val*xsh_val+l*xsh_val+m)
|
844
861
|
|
845
862
|
# number of labels
|
846
863
|
nb_labels = len(allLabels)
|
@@ -855,7 +872,8 @@ def train_semantic_segmentation(bm,
|
|
855
872
|
'n_classes': nb_labels,
|
856
873
|
'n_channels': bm.channels,
|
857
874
|
'augment': (bm.flip_x, bm.flip_y, bm.flip_z, bm.swapaxes, bm.rotate),
|
858
|
-
'patch_normalization': bm.patch_normalization
|
875
|
+
'patch_normalization': bm.patch_normalization,
|
876
|
+
'separation': bm.separation}
|
859
877
|
|
860
878
|
# data generator
|
861
879
|
validation_generator = None
|
@@ -1037,6 +1055,23 @@ def append_ghost_areas(bm, img):
|
|
1037
1055
|
img = np.append(img, img[:,:,-x_rest:][:,:,::-1], axis=2)
|
1038
1056
|
return img, z_rest, y_rest, x_rest
|
1039
1057
|
|
1058
|
+
def gradient(volData):
|
1059
|
+
grad = np.zeros(volData.shape, dtype=np.uint8)
|
1060
|
+
tmp = np.abs(volData[:-1] - volData[1:])
|
1061
|
+
tmp[tmp>0]=1
|
1062
|
+
grad[:-1] += tmp
|
1063
|
+
grad[1:] += tmp
|
1064
|
+
tmp = np.abs(volData[:,:-1] - volData[:,1:])
|
1065
|
+
tmp[tmp>0]=1
|
1066
|
+
grad[:,:-1] += tmp
|
1067
|
+
grad[:,1:] += tmp
|
1068
|
+
tmp = np.abs(volData[:,:,:-1] - volData[:,:,1:])
|
1069
|
+
tmp[tmp>0]=1
|
1070
|
+
grad[:,:,:-1] += tmp
|
1071
|
+
grad[:,:,1:] += tmp
|
1072
|
+
grad[grad>0]=1
|
1073
|
+
return grad
|
1074
|
+
|
1040
1075
|
def predict_semantic_segmentation(bm,
|
1041
1076
|
header, img_header,
|
1042
1077
|
region_of_interest, extension, img_data,
|
@@ -1072,6 +1107,13 @@ def predict_semantic_segmentation(bm,
|
|
1072
1107
|
zsh = len(tif.pages)
|
1073
1108
|
ysh, xsh = tif.pages[0].shape
|
1074
1109
|
|
1110
|
+
# load mask
|
1111
|
+
if bm.separation or bm.refinement:
|
1112
|
+
mask, _ = load_data(bm.mask)
|
1113
|
+
mask = mask.reshape(zsh, ysh, xsh, 1)
|
1114
|
+
mask, _, _, _ = append_ghost_areas(bm, mask)
|
1115
|
+
mask = mask.reshape(mask.shape[:-1])
|
1116
|
+
|
1075
1117
|
# determine new image size after appending ghost areas to make image dimensions divisible by patch size
|
1076
1118
|
z_rest = bm.z_patch - (zsh % bm.z_patch)
|
1077
1119
|
if z_rest == bm.z_patch:
|
@@ -1094,9 +1136,20 @@ def predict_semantic_segmentation(bm,
|
|
1094
1136
|
for k in range(0, zsh-bm.z_patch+1, bm.stride_size):
|
1095
1137
|
for l in range(0, ysh-bm.y_patch+1, bm.stride_size):
|
1096
1138
|
for m in range(0, xsh-bm.x_patch+1, bm.stride_size):
|
1097
|
-
|
1139
|
+
if bm.separation:
|
1140
|
+
centerLabel = mask[k+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
|
1141
|
+
patch = mask[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
|
1142
|
+
if centerLabel>0 and np.any(patch!=centerLabel):
|
1143
|
+
list_IDs.append(k*ysh*xsh+l*xsh+m)
|
1144
|
+
elif bm.refinement:
|
1145
|
+
patch = mask[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
|
1146
|
+
if np.any(patch==0) and np.any(patch!=0):
|
1147
|
+
list_IDs.append(k*ysh*xsh+l*xsh+m)
|
1148
|
+
else:
|
1149
|
+
list_IDs.append(k*ysh*xsh+l*xsh+m)
|
1098
1150
|
|
1099
1151
|
# make length of list divisible by batch size
|
1152
|
+
max_i = len(list_IDs)
|
1100
1153
|
rest = bm.batch_size - (len(list_IDs) % bm.batch_size)
|
1101
1154
|
list_IDs = list_IDs + list_IDs[:rest]
|
1102
1155
|
|
@@ -1184,27 +1237,37 @@ def predict_semantic_segmentation(bm,
|
|
1184
1237
|
img, _, _, _ = append_ghost_areas(bm, img)
|
1185
1238
|
|
1186
1239
|
# list of IDs
|
1187
|
-
|
1240
|
+
list_IDs_block = []
|
1188
1241
|
|
1189
1242
|
# get Ids of patches
|
1190
1243
|
for l in range(0, ysh-bm.y_patch+1, bm.stride_size):
|
1191
1244
|
for m in range(0, xsh-bm.x_patch+1, bm.stride_size):
|
1192
|
-
|
1245
|
+
if bm.separation:
|
1246
|
+
centerLabel = mask[z+bm.z_patch//2,l+bm.y_patch//2,m+bm.x_patch//2]
|
1247
|
+
patch = mask[z:z+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
|
1248
|
+
if centerLabel>0 and np.any(patch!=centerLabel):
|
1249
|
+
list_IDs_block.append(z*ysh*xsh+l*xsh+m)
|
1250
|
+
elif bm.refinement:
|
1251
|
+
patch = mask[z:z+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
|
1252
|
+
if np.any(patch==0) and np.any(patch!=0):
|
1253
|
+
list_IDs_block.append(z*ysh*xsh+l*xsh+m)
|
1254
|
+
else:
|
1255
|
+
list_IDs_block.append(z*ysh*xsh+l*xsh+m)
|
1193
1256
|
|
1194
1257
|
# make length of list divisible by batch size
|
1195
|
-
|
1196
|
-
rest = bm.batch_size - (len(
|
1197
|
-
|
1258
|
+
max_i_block = len(list_IDs_block)
|
1259
|
+
rest = bm.batch_size - (len(list_IDs_block) % bm.batch_size)
|
1260
|
+
list_IDs_block = list_IDs_block + list_IDs_block[:rest]
|
1198
1261
|
|
1199
1262
|
# number of patches
|
1200
|
-
nb_patches = len(
|
1263
|
+
nb_patches = len(list_IDs_block)
|
1201
1264
|
|
1202
1265
|
# allocate tmp probabilities array
|
1203
1266
|
probs = np.zeros((bm.z_patch, ysh, xsh, nb_labels), dtype=np.float32)
|
1204
1267
|
|
1205
1268
|
# get one batch of image patches
|
1206
1269
|
for step in range(nb_patches//bm.batch_size):
|
1207
|
-
for i, ID in enumerate(
|
1270
|
+
for i, ID in enumerate(list_IDs_block[step*bm.batch_size:(step+1)*bm.batch_size]):
|
1208
1271
|
|
1209
1272
|
# get patch indices
|
1210
1273
|
k=0 if load_blockwise else ID // (ysh*xsh)
|
@@ -1225,29 +1288,46 @@ def predict_semantic_segmentation(bm,
|
|
1225
1288
|
Y = model.predict(X, verbose=0, steps=None, batch_size=bm.batch_size)
|
1226
1289
|
|
1227
1290
|
# loop over result patches
|
1228
|
-
for i, ID in enumerate(
|
1291
|
+
for i, ID in enumerate(list_IDs_block[step*bm.batch_size:(step+1)*bm.batch_size]):
|
1229
1292
|
rest = ID % (ysh*xsh)
|
1230
1293
|
l = rest // xsh
|
1231
1294
|
m = rest % xsh
|
1232
|
-
if step*bm.batch_size+i <
|
1233
|
-
|
1234
|
-
|
1235
|
-
|
1236
|
-
|
1237
|
-
|
1238
|
-
|
1239
|
-
|
1240
|
-
|
1241
|
-
|
1242
|
-
|
1243
|
-
|
1244
|
-
|
1245
|
-
|
1246
|
-
|
1247
|
-
|
1248
|
-
|
1249
|
-
|
1250
|
-
|
1295
|
+
if step*bm.batch_size+i < max_i_block:
|
1296
|
+
if bm.separation:
|
1297
|
+
patch = np.argmax(Y[i], axis=-1).astype(np.uint8)
|
1298
|
+
label[z:z+bm.z_patch,l:l+bm.y_patch,m:m+bm.x_patch] += gradient(patch)
|
1299
|
+
else:
|
1300
|
+
probs[:,l:l+bm.y_patch,m:m+bm.x_patch] += Y[i]
|
1301
|
+
|
1302
|
+
if not bm.separation:
|
1303
|
+
# overlap in z direction
|
1304
|
+
if bm.stride_size < bm.z_patch:
|
1305
|
+
if j>0:
|
1306
|
+
probs[:bm.stride_size] += overlap
|
1307
|
+
overlap = probs[bm.stride_size:].copy()
|
1308
|
+
|
1309
|
+
# calculate result
|
1310
|
+
if z==z_indices[-1]:
|
1311
|
+
label[z:z+bm.z_patch] = np.argmax(probs, axis=-1).astype(np.uint8)
|
1312
|
+
if bm.return_probs:
|
1313
|
+
final[z:z+bm.z_patch] = probs
|
1314
|
+
else:
|
1315
|
+
block_zsh = min(bm.stride_size, bm.z_patch)
|
1316
|
+
label[z:z+block_zsh] = np.argmax(probs[:block_zsh], axis=-1).astype(np.uint8)
|
1317
|
+
if bm.return_probs:
|
1318
|
+
final[z:z+block_zsh] = probs[:block_zsh]
|
1319
|
+
|
1320
|
+
# refine mask data with result
|
1321
|
+
if bm.refinement:
|
1322
|
+
# loop over boundary patches
|
1323
|
+
for i, ID in enumerate(list_IDs):
|
1324
|
+
if i < max_i:
|
1325
|
+
k = ID // (ysh*xsh)
|
1326
|
+
rest = ID % (ysh*xsh)
|
1327
|
+
l = rest // xsh
|
1328
|
+
m = rest % xsh
|
1329
|
+
mask[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch] = label[k:k+bm.z_patch, l:l+bm.y_patch, m:m+bm.x_patch]
|
1330
|
+
label = mask
|
1251
1331
|
|
1252
1332
|
# remove appendix
|
1253
1333
|
if bm.return_probs:
|
@@ -1287,7 +1367,8 @@ def predict_semantic_segmentation(bm,
|
|
1287
1367
|
label = np.copy(tmp, order='C')
|
1288
1368
|
|
1289
1369
|
# get result
|
1290
|
-
|
1370
|
+
if not bm.separation:
|
1371
|
+
label = get_labels(label, bm.allLabels)
|
1291
1372
|
results['regular'] = label
|
1292
1373
|
|
1293
1374
|
# load header from file
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/amira_to_np/amira_data_stream.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/random_walk/pycuda_large_allx.py
RENAMED
File without changes
|
File without changes
|
{biomedisa-24.8.1 → biomedisa-24.8.2}/src/biomedisa/features/random_walk/pycuda_small_allx.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|