biomedisa 24.8.10__py3-none-any.whl → 25.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- biomedisa/deeplearning.py +35 -9
- biomedisa/features/DataGenerator.py +192 -123
- biomedisa/features/PredictDataGenerator.py +7 -5
- biomedisa/features/biomedisa_helper.py +59 -14
- biomedisa/features/crop_helper.py +7 -7
- biomedisa/features/keras_helper.py +281 -157
- biomedisa/features/random_walk/rw_large.py +6 -2
- biomedisa/features/random_walk/rw_small.py +7 -3
- biomedisa/features/remove_outlier.py +3 -3
- biomedisa/features/split_volume.py +12 -11
- biomedisa/interpolation.py +6 -9
- biomedisa/mesh.py +2 -2
- {biomedisa-24.8.10.dist-info → biomedisa-25.6.1.dist-info}/METADATA +3 -2
- {biomedisa-24.8.10.dist-info → biomedisa-25.6.1.dist-info}/RECORD +17 -17
- {biomedisa-24.8.10.dist-info → biomedisa-25.6.1.dist-info}/WHEEL +1 -1
- {biomedisa-24.8.10.dist-info → biomedisa-25.6.1.dist-info/licenses}/LICENSE +0 -0
- {biomedisa-24.8.10.dist-info → biomedisa-25.6.1.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
##########################################################################
|
2
2
|
## ##
|
3
|
-
## Copyright (c) 2019-
|
3
|
+
## Copyright (c) 2019-2025 Philipp Lösel. All rights reserved. ##
|
4
4
|
## ##
|
5
5
|
## This file is part of the open source project biomedisa. ##
|
6
6
|
## ##
|
@@ -52,6 +52,39 @@ def silent_remove(filename):
|
|
52
52
|
except OSError:
|
53
53
|
pass
|
54
54
|
|
55
|
+
# calculate mean and standard deviation using Welford's algorithm
|
56
|
+
@numba.jit(nopython=True)
|
57
|
+
def welford_mean_std(arr):
|
58
|
+
mean, m2, count = 0.0, 0.0, 0
|
59
|
+
for x in arr.ravel():
|
60
|
+
count += 1
|
61
|
+
delta = x - mean
|
62
|
+
mean += delta / count
|
63
|
+
delta2 = x - mean
|
64
|
+
m2 += delta * delta2 # Running sum of squared differences
|
65
|
+
std_dev = np.sqrt(m2 / count) if count > 1 else 0.0 # Population std deviation
|
66
|
+
return mean, std_dev
|
67
|
+
|
68
|
+
# determine all values and their count from an array
|
69
|
+
def unique(arr, return_counts=False):
|
70
|
+
try:
|
71
|
+
arr = arr.ravel()
|
72
|
+
counts = np.zeros(np.amax(arr)+1, dtype=int)
|
73
|
+
@numba.jit(nopython=True)
|
74
|
+
def __unique__(arr, size, counts):
|
75
|
+
for k in range(size):
|
76
|
+
counts[arr[k]] += 1
|
77
|
+
return counts
|
78
|
+
counts = __unique__(arr, arr.size, counts)
|
79
|
+
labels = np.where(counts)[0]
|
80
|
+
if return_counts:
|
81
|
+
return labels, counts[labels]
|
82
|
+
else:
|
83
|
+
return labels
|
84
|
+
except Exception as e:
|
85
|
+
print(f"Error: {e}")
|
86
|
+
return None
|
87
|
+
|
55
88
|
# create a unique filename
|
56
89
|
def unique_file_path(path, dir_path=biomedisa.BASE_DIR+'/private_storage/'):
|
57
90
|
|
@@ -105,7 +138,7 @@ def unique_file_path(path, dir_path=biomedisa.BASE_DIR+'/private_storage/'):
|
|
105
138
|
def Dice_score(ground_truth, result, average_dice=False):
|
106
139
|
if average_dice:
|
107
140
|
dice = 0
|
108
|
-
allLabels =
|
141
|
+
allLabels = unique(ground_truth)
|
109
142
|
for l in allLabels[1:]:
|
110
143
|
dice += 2 * np.logical_and(ground_truth==l, result==l).sum() / float((ground_truth==l).sum() + (result==l).sum())
|
111
144
|
dice /= float(len(allLabels)-1)
|
@@ -120,7 +153,7 @@ def ASSD(ground_truth, result):
|
|
120
153
|
number_of_elements = 0
|
121
154
|
distances = 0
|
122
155
|
hausdorff = 0
|
123
|
-
for label in
|
156
|
+
for label in unique(ground_truth)[1:]:
|
124
157
|
d, n, h = ASSD_one_label(ground_truth, result, label)
|
125
158
|
number_of_elements += n
|
126
159
|
distances += d
|
@@ -157,7 +190,7 @@ def img_resize(a, z_shape, y_shape, x_shape, interpolation=None, labels=False):
|
|
157
190
|
|
158
191
|
if labels:
|
159
192
|
data = np.zeros((z_shape, y_shape, x_shape), dtype=a.dtype)
|
160
|
-
for k in
|
193
|
+
for k in unique(a):
|
161
194
|
if k!=0:
|
162
195
|
tmp = np.zeros(a.shape, dtype=np.uint8)
|
163
196
|
tmp[a==k] = 1
|
@@ -193,7 +226,7 @@ def set_labels_to_zero(label, labels_to_compute, labels_to_remove):
|
|
193
226
|
# compute only specific labels (set rest to zero)
|
194
227
|
labels_to_compute = labels_to_compute.split(',')
|
195
228
|
if not any([x in ['all', 'All', 'ALL'] for x in labels_to_compute]):
|
196
|
-
allLabels =
|
229
|
+
allLabels = unique(label)
|
197
230
|
labels_to_del = [k for k in allLabels if str(k) not in labels_to_compute and k > 0]
|
198
231
|
for k in labels_to_del:
|
199
232
|
label[label == k] = 0
|
@@ -388,14 +421,21 @@ def load_data(path_to_data, process='None', return_extension=False):
|
|
388
421
|
else:
|
389
422
|
return data, header
|
390
423
|
|
391
|
-
def _error_(bm, message):
|
424
|
+
def _error_(bm, message, level=1):
|
392
425
|
if bm.django_env:
|
393
426
|
from biomedisa.features.django_env import create_error_object
|
394
427
|
create_error_object(message, bm.remote, bm.queue, bm.img_id)
|
395
428
|
with open(bm.path_to_logfile, 'a') as logfile:
|
396
429
|
print('%s %s %s %s' %(time.ctime(), bm.username, bm.shortfilename, message), file=logfile)
|
397
|
-
|
398
|
-
|
430
|
+
elif bm.slicer and bm.path_to_data:
|
431
|
+
error_path = os.path.dirname(bm.path_to_data) + '/biomedisa-error.txt'
|
432
|
+
with open(error_path, 'w') as file:
|
433
|
+
file.write(message)
|
434
|
+
if level==0:
|
435
|
+
print('Warning:', message)
|
436
|
+
elif level==1:
|
437
|
+
print('Error:', message)
|
438
|
+
bm.success = False
|
399
439
|
return bm
|
400
440
|
|
401
441
|
def pre_processing(bm):
|
@@ -430,7 +470,7 @@ def pre_processing(bm):
|
|
430
470
|
bm.labelData = bm.labelData.astype(np.int32)
|
431
471
|
|
432
472
|
# get labels
|
433
|
-
bm.allLabels =
|
473
|
+
bm.allLabels = unique(bm.labelData)
|
434
474
|
index = np.argwhere(bm.allLabels<0)
|
435
475
|
bm.allLabels = np.delete(bm.allLabels, index)
|
436
476
|
|
@@ -553,7 +593,7 @@ def color_to_gray(labelData):
|
|
553
593
|
return labelData
|
554
594
|
|
555
595
|
def delbackground(labels):
|
556
|
-
allLabels, labelcounts =
|
596
|
+
allLabels, labelcounts = unique(labels, return_counts=True)
|
557
597
|
index = np.argmax(labelcounts)
|
558
598
|
labels[labels==allLabels[index]] = 0
|
559
599
|
return labels
|
@@ -593,7 +633,10 @@ def _get_platform(bm):
|
|
593
633
|
if cl and bm.platform is None:
|
594
634
|
for vendor in ['NVIDIA', 'Intel', 'AMD', 'Apple']:
|
595
635
|
for dev, device_type in [('GPU',cl.device_type.GPU),('CPU',cl.device_type.CPU)]:
|
596
|
-
|
636
|
+
try:
|
637
|
+
all_platforms = cl.get_platforms()
|
638
|
+
except:
|
639
|
+
all_platforms = []
|
597
640
|
my_devices = []
|
598
641
|
for p in all_platforms:
|
599
642
|
if p.get_devices(device_type=device_type) and vendor in p.name:
|
@@ -614,7 +657,10 @@ def _get_platform(bm):
|
|
614
657
|
elif cl and len(bm.platform.split('_')) == 3:
|
615
658
|
plat, vendor, dev = bm.platform.split('_')
|
616
659
|
device_type=cl.device_type.GPU if dev=='GPU' else cl.device_type.CPU
|
617
|
-
|
660
|
+
try:
|
661
|
+
all_platforms = cl.get_platforms()
|
662
|
+
except:
|
663
|
+
all_platforms = []
|
618
664
|
my_devices = []
|
619
665
|
for p in all_platforms:
|
620
666
|
if p.get_devices(device_type=device_type) and vendor in p.name:
|
@@ -844,9 +890,8 @@ def _split_indices(indices, ngpus):
|
|
844
890
|
return parts
|
845
891
|
|
846
892
|
def get_labels(pre_final, labels):
|
847
|
-
numos = np.unique(pre_final)
|
848
893
|
final = np.zeros_like(pre_final)
|
849
|
-
for k in
|
894
|
+
for k in unique(pre_final)[1:]:
|
850
895
|
final[pre_final == k] = labels[k]
|
851
896
|
return final
|
852
897
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
##########################################################################
|
2
2
|
## ##
|
3
|
-
## Copyright (c) 2019-
|
3
|
+
## Copyright (c) 2019-2025 Philipp Lösel. All rights reserved. ##
|
4
4
|
## ##
|
5
5
|
## This file is part of the open source project biomedisa. ##
|
6
6
|
## ##
|
@@ -28,7 +28,8 @@
|
|
28
28
|
|
29
29
|
import os
|
30
30
|
from biomedisa.features.keras_helper import read_img_list
|
31
|
-
from biomedisa.features.biomedisa_helper import img_resize,
|
31
|
+
from biomedisa.features.biomedisa_helper import (img_resize,
|
32
|
+
load_data, save_data, set_labels_to_zero, welford_mean_std)
|
32
33
|
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
|
33
34
|
from tensorflow.keras.applications import DenseNet121, densenet
|
34
35
|
from tensorflow.keras.optimizers import Adam
|
@@ -161,7 +162,7 @@ def load_cropping_training_data(normalize, img_list, label_list, x_scale, y_scal
|
|
161
162
|
# normalize first validation image
|
162
163
|
if normalize and np.any(normalization_parameters):
|
163
164
|
for c in range(channels):
|
164
|
-
mean, std =
|
165
|
+
mean, std = welford_mean_std(img[:,:,:,c])
|
165
166
|
img[:,:,:,c] = (img[:,:,:,c] - mean) / std
|
166
167
|
img[:,:,:,c] = img[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
|
167
168
|
|
@@ -170,8 +171,7 @@ def load_cropping_training_data(normalize, img_list, label_list, x_scale, y_scal
|
|
170
171
|
normalization_parameters = np.zeros((2,channels))
|
171
172
|
if normalize:
|
172
173
|
for c in range(channels):
|
173
|
-
normalization_parameters[
|
174
|
-
normalization_parameters[1,c] = np.std(img[:,:,:,c])
|
174
|
+
normalization_parameters[:,c] = welford_mean_std(img[:,:,:,c])
|
175
175
|
|
176
176
|
# loop over list of images
|
177
177
|
if any(img_list) or type(img_in) is list:
|
@@ -223,7 +223,7 @@ def load_cropping_training_data(normalize, img_list, label_list, x_scale, y_scal
|
|
223
223
|
next_img[:,:,:,c] -= np.amin(next_img[:,:,:,c])
|
224
224
|
next_img[:,:,:,c] /= np.amax(next_img[:,:,:,c])
|
225
225
|
if normalize:
|
226
|
-
mean, std =
|
226
|
+
mean, std = welford_mean_std(next_img[:,:,:,c])
|
227
227
|
next_img[:,:,:,c] = (next_img[:,:,:,c] - mean) / std
|
228
228
|
next_img[:,:,:,c] = next_img[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
|
229
229
|
img = np.append(img, next_img, axis=0)
|
@@ -391,7 +391,7 @@ def load_data_to_crop(path_to_img, channels, x_scale, y_scale, z_scale,
|
|
391
391
|
img[:,:,:,c] -= np.amin(img[:,:,:,c])
|
392
392
|
img[:,:,:,c] /= np.amax(img[:,:,:,c])
|
393
393
|
if normalize:
|
394
|
-
mean, std =
|
394
|
+
mean, std = welford_mean_std(img[:,:,:,c])
|
395
395
|
img[:,:,:,c] = (img[:,:,:,c] - mean) / std
|
396
396
|
img[:,:,:,c] = img[:,:,:,c] * normalization_parameters[1,c] + normalization_parameters[0,c]
|
397
397
|
img[img<0] = 0
|