nettracer3d 0.7.3__py3-none-any.whl → 0.7.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nettracer3d/nettracer.py +24 -5
- nettracer3d/nettracer_gui.py +99 -67
- nettracer3d/segmenter - Copy.py +2097 -0
- nettracer3d/segmenter.py +75 -668
- nettracer3d/segmenter_GPU.py +611 -0
- {nettracer3d-0.7.3.dist-info → nettracer3d-0.7.4.dist-info}/METADATA +5 -6
- {nettracer3d-0.7.3.dist-info → nettracer3d-0.7.4.dist-info}/RECORD +11 -9
- {nettracer3d-0.7.3.dist-info → nettracer3d-0.7.4.dist-info}/WHEEL +1 -1
- {nettracer3d-0.7.3.dist-info → nettracer3d-0.7.4.dist-info}/entry_points.txt +0 -0
- {nettracer3d-0.7.3.dist-info → nettracer3d-0.7.4.dist-info}/licenses/LICENSE +0 -0
- {nettracer3d-0.7.3.dist-info → nettracer3d-0.7.4.dist-info}/top_level.txt +0 -0
nettracer3d/segmenter.py
CHANGED
|
@@ -12,35 +12,13 @@ class InteractiveSegmenter:
|
|
|
12
12
|
self.image_3d = image_3d
|
|
13
13
|
self.patterns = []
|
|
14
14
|
|
|
15
|
-
|
|
16
|
-
self.use_gpu = use_gpu and cp.cuda.is_available()
|
|
17
|
-
except:
|
|
18
|
-
self.use_gpu = False
|
|
19
|
-
if self.use_gpu:
|
|
20
|
-
try:
|
|
21
|
-
print(f"Using GPU: {torch.cuda.get_device_name()}")
|
|
22
|
-
except:
|
|
23
|
-
pass
|
|
24
|
-
self.image_gpu = cp.asarray(image_3d)
|
|
25
|
-
try:
|
|
26
|
-
self.model = cuRandomForestClassifier(
|
|
27
|
-
n_estimators=100,
|
|
28
|
-
max_depth=None
|
|
29
|
-
)
|
|
30
|
-
except:
|
|
31
|
-
self.model = RandomForestClassifier(
|
|
32
|
-
n_estimators=100,
|
|
33
|
-
n_jobs=-1,
|
|
34
|
-
max_depth=None
|
|
35
|
-
)
|
|
15
|
+
self.use_gpu = False
|
|
36
16
|
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
max_depth=None
|
|
43
|
-
)
|
|
17
|
+
self.model = RandomForestClassifier(
|
|
18
|
+
n_estimators=100,
|
|
19
|
+
n_jobs=-1,
|
|
20
|
+
max_depth=None
|
|
21
|
+
)
|
|
44
22
|
|
|
45
23
|
self.feature_cache = None
|
|
46
24
|
self.lock = threading.Lock()
|
|
@@ -202,73 +180,7 @@ class InteractiveSegmenter:
|
|
|
202
180
|
if self._currently_processing == slice_z:
|
|
203
181
|
self._currently_processing = None
|
|
204
182
|
|
|
205
|
-
|
|
206
|
-
"""Compute feature maps using CPU"""
|
|
207
|
-
features = []
|
|
208
|
-
if image_3d is None:
|
|
209
|
-
image_3d = self.image_3d
|
|
210
|
-
original_shape = image_3d.shape
|
|
211
|
-
|
|
212
|
-
# Gaussian and DoG using scipy
|
|
213
|
-
#print("Obtaining gaussians")
|
|
214
|
-
for sigma in self.alphas:
|
|
215
|
-
smooth = ndimage.gaussian_filter(image_3d, sigma)
|
|
216
|
-
features.append(smooth)
|
|
217
|
-
|
|
218
|
-
# Difference of Gaussians
|
|
219
|
-
for (s1, s2) in self.dogs:
|
|
220
|
-
g1 = ndimage.gaussian_filter(image_3d, s1)
|
|
221
|
-
g2 = ndimage.gaussian_filter(image_3d, s2)
|
|
222
|
-
dog = g1 - g2
|
|
223
|
-
features.append(dog)
|
|
224
|
-
|
|
225
|
-
#print("Computing local statistics")
|
|
226
|
-
# Local statistics using scipy's convolve
|
|
227
|
-
window_size = self.windows
|
|
228
|
-
kernel = np.ones((window_size, window_size, window_size)) / (window_size**3)
|
|
229
|
-
|
|
230
|
-
# Local mean
|
|
231
|
-
local_mean = ndimage.convolve(image_3d, kernel, mode='reflect')
|
|
232
|
-
features.append(local_mean)
|
|
233
|
-
|
|
234
|
-
# Local variance
|
|
235
|
-
mean = np.mean(image_3d)
|
|
236
|
-
local_var = ndimage.convolve((image_3d - mean)**2, kernel, mode='reflect')
|
|
237
|
-
features.append(local_var)
|
|
238
|
-
|
|
239
|
-
#print("Computing sobel and gradients")
|
|
240
|
-
# Gradient computations using scipy
|
|
241
|
-
gx = ndimage.sobel(image_3d, axis=2, mode='reflect')
|
|
242
|
-
gy = ndimage.sobel(image_3d, axis=1, mode='reflect')
|
|
243
|
-
gz = ndimage.sobel(image_3d, axis=0, mode='reflect')
|
|
244
|
-
|
|
245
|
-
# Gradient magnitude
|
|
246
|
-
gradient_magnitude = np.sqrt(gx**2 + gy**2 + gz**2)
|
|
247
|
-
features.append(gradient_magnitude)
|
|
248
|
-
|
|
249
|
-
#print("Computing second-order features")
|
|
250
|
-
# Second-order gradients
|
|
251
|
-
gxx = ndimage.sobel(gx, axis=2, mode='reflect')
|
|
252
|
-
gyy = ndimage.sobel(gy, axis=1, mode='reflect')
|
|
253
|
-
gzz = ndimage.sobel(gz, axis=0, mode='reflect')
|
|
254
|
-
|
|
255
|
-
# Laplacian (sum of second derivatives)
|
|
256
|
-
laplacian = gxx + gyy + gzz
|
|
257
|
-
features.append(laplacian)
|
|
258
|
-
|
|
259
|
-
# Hessian determinant
|
|
260
|
-
hessian_det = gxx * gyy * gzz
|
|
261
|
-
features.append(hessian_det)
|
|
262
|
-
|
|
263
|
-
#print("Verifying shapes")
|
|
264
|
-
for i, feat in enumerate(features):
|
|
265
|
-
if feat.shape != original_shape:
|
|
266
|
-
feat_adjusted = np.expand_dims(feat, axis=0)
|
|
267
|
-
if feat_adjusted.shape != original_shape:
|
|
268
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
269
|
-
features[i] = feat_adjusted
|
|
270
|
-
|
|
271
|
-
return np.stack(features, axis=-1)
|
|
183
|
+
|
|
272
184
|
|
|
273
185
|
def compute_deep_feature_maps_cpu_parallel(self, image_3d=None):
|
|
274
186
|
"""Compute deep feature maps using CPU with thread-based parallelism"""
|
|
@@ -439,66 +351,6 @@ class InteractiveSegmenter:
|
|
|
439
351
|
|
|
440
352
|
return np.stack(features, axis=-1)
|
|
441
353
|
|
|
442
|
-
def compute_deep_feature_maps_cpu_2d(self, z = None):
|
|
443
|
-
"""Compute 2D feature maps using CPU"""
|
|
444
|
-
features = []
|
|
445
|
-
|
|
446
|
-
image_2d = self.image_3d[z, :, :]
|
|
447
|
-
original_shape = image_2d.shape
|
|
448
|
-
|
|
449
|
-
# Gaussian using scipy
|
|
450
|
-
for sigma in [0.5, 1.0, 2.0, 4.0]:
|
|
451
|
-
smooth = ndimage.gaussian_filter(image_2d, sigma)
|
|
452
|
-
features.append(smooth)
|
|
453
|
-
|
|
454
|
-
# Local statistics using scipy's convolve - adjusted for 2D
|
|
455
|
-
window_size = 5
|
|
456
|
-
kernel = np.ones((window_size, window_size)) / (window_size**2)
|
|
457
|
-
|
|
458
|
-
# Local mean
|
|
459
|
-
local_mean = ndimage.convolve(image_2d, kernel, mode='reflect')
|
|
460
|
-
features.append(local_mean)
|
|
461
|
-
|
|
462
|
-
# Local variance
|
|
463
|
-
mean = np.mean(image_2d)
|
|
464
|
-
local_var = ndimage.convolve((image_2d - mean)**2, kernel, mode='reflect')
|
|
465
|
-
features.append(local_var)
|
|
466
|
-
|
|
467
|
-
# Gradient computations using scipy - adjusted axes for 2D
|
|
468
|
-
gx = ndimage.sobel(image_2d, axis=1, mode='reflect') # x direction
|
|
469
|
-
gy = ndimage.sobel(image_2d, axis=0, mode='reflect') # y direction
|
|
470
|
-
|
|
471
|
-
# Gradient magnitude (2D version)
|
|
472
|
-
gradient_magnitude = np.sqrt(gx**2 + gy**2)
|
|
473
|
-
features.append(gradient_magnitude)
|
|
474
|
-
|
|
475
|
-
# Second-order gradients
|
|
476
|
-
gxx = ndimage.sobel(gx, axis=1, mode='reflect')
|
|
477
|
-
gyy = ndimage.sobel(gy, axis=0, mode='reflect')
|
|
478
|
-
|
|
479
|
-
# Laplacian (sum of second derivatives) - 2D version
|
|
480
|
-
laplacian = gxx + gyy
|
|
481
|
-
features.append(laplacian)
|
|
482
|
-
|
|
483
|
-
# Hessian determinant - 2D version
|
|
484
|
-
hessian_det = gxx * gyy - ndimage.sobel(gx, axis=0, mode='reflect') * ndimage.sobel(gy, axis=1, mode='reflect')
|
|
485
|
-
features.append(hessian_det)
|
|
486
|
-
|
|
487
|
-
for i, feat in enumerate(features):
|
|
488
|
-
if feat.shape != original_shape:
|
|
489
|
-
# Check dimensionality and expand if needed
|
|
490
|
-
if len(feat.shape) < len(original_shape):
|
|
491
|
-
feat_adjusted = feat
|
|
492
|
-
missing_dims = len(original_shape) - len(feat.shape)
|
|
493
|
-
for _ in range(missing_dims):
|
|
494
|
-
feat_adjusted = np.expand_dims(feat_adjusted, axis=0)
|
|
495
|
-
|
|
496
|
-
if feat_adjusted.shape != original_shape:
|
|
497
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
498
|
-
|
|
499
|
-
features[i] = feat_adjusted
|
|
500
|
-
|
|
501
|
-
return np.stack(features, axis=-1)
|
|
502
354
|
|
|
503
355
|
def compute_deep_feature_maps_cpu_2d_parallel(self, z=None):
|
|
504
356
|
"""Compute 2D feature maps using CPU with thread-based parallelism"""
|
|
@@ -673,195 +525,6 @@ class InteractiveSegmenter:
|
|
|
673
525
|
|
|
674
526
|
return np.stack(features, axis=-1)
|
|
675
527
|
|
|
676
|
-
def compute_feature_maps(self):
|
|
677
|
-
"""Compute all feature maps using GPU acceleration"""
|
|
678
|
-
#if not self.use_gpu:
|
|
679
|
-
#return super().compute_feature_maps()
|
|
680
|
-
|
|
681
|
-
features = []
|
|
682
|
-
image = self.image_gpu
|
|
683
|
-
image_3d = self.image_3d
|
|
684
|
-
original_shape = self.image_3d.shape
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
# Gaussian smoothing at different scales
|
|
689
|
-
print("Obtaining gaussians")
|
|
690
|
-
for sigma in [0.5, 1.0, 2.0, 4.0]:
|
|
691
|
-
smooth = cp.asnumpy(self.gaussian_filter_gpu(image, sigma))
|
|
692
|
-
features.append(smooth)
|
|
693
|
-
|
|
694
|
-
print("Obtaining dif of gaussians")
|
|
695
|
-
|
|
696
|
-
# Difference of Gaussians
|
|
697
|
-
for (s1, s2) in [(1, 2), (2, 4)]:
|
|
698
|
-
g1 = self.gaussian_filter_gpu(image, s1)
|
|
699
|
-
g2 = self.gaussian_filter_gpu(image, s2)
|
|
700
|
-
dog = cp.asnumpy(g1 - g2)
|
|
701
|
-
features.append(dog)
|
|
702
|
-
|
|
703
|
-
# Convert image to PyTorch tensor for gradient operations
|
|
704
|
-
image_torch = torch.from_numpy(image_3d).cuda()
|
|
705
|
-
image_torch = image_torch.float().unsqueeze(0).unsqueeze(0)
|
|
706
|
-
|
|
707
|
-
# Calculate required padding
|
|
708
|
-
kernel_size = 3
|
|
709
|
-
padding = kernel_size // 2
|
|
710
|
-
|
|
711
|
-
# Create a single padded version with same padding
|
|
712
|
-
pad = torch.nn.functional.pad(image_torch, (padding, padding, padding, padding, padding, padding), mode='replicate')
|
|
713
|
-
|
|
714
|
-
print("Computing sobel kernels")
|
|
715
|
-
|
|
716
|
-
# Create sobel kernels
|
|
717
|
-
sobel_x = torch.tensor([-1, 0, 1], device='cuda').float().view(1,1,1,1,3)
|
|
718
|
-
sobel_y = torch.tensor([-1, 0, 1], device='cuda').float().view(1,1,1,3,1)
|
|
719
|
-
sobel_z = torch.tensor([-1, 0, 1], device='cuda').float().view(1,1,3,1,1)
|
|
720
|
-
|
|
721
|
-
# Compute gradients
|
|
722
|
-
print("Computing gradiants")
|
|
723
|
-
|
|
724
|
-
gx = torch.nn.functional.conv3d(pad, sobel_x, padding=0)[:,:,:original_shape[0],:original_shape[1],:original_shape[2]]
|
|
725
|
-
gy = torch.nn.functional.conv3d(pad, sobel_y, padding=0)[:,:,:original_shape[0],:original_shape[1],:original_shape[2]]
|
|
726
|
-
gz = torch.nn.functional.conv3d(pad, sobel_z, padding=0)[:,:,:original_shape[0],:original_shape[1],:original_shape[2]]
|
|
727
|
-
|
|
728
|
-
# Compute gradient magnitude
|
|
729
|
-
print("Computing gradiant mags")
|
|
730
|
-
|
|
731
|
-
gradient_magnitude = torch.sqrt(gx**2 + gy**2 + gz**2)
|
|
732
|
-
gradient_feature = gradient_magnitude.cpu().numpy().squeeze()
|
|
733
|
-
|
|
734
|
-
features.append(gradient_feature)
|
|
735
|
-
|
|
736
|
-
print(features.shape)
|
|
737
|
-
|
|
738
|
-
# Verify shapes
|
|
739
|
-
for i, feat in enumerate(features):
|
|
740
|
-
if feat.shape != original_shape:
|
|
741
|
-
# Create a copy of the feature to modify
|
|
742
|
-
feat_adjusted = np.expand_dims(feat, axis=0)
|
|
743
|
-
if feat_adjusted.shape != original_shape:
|
|
744
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
745
|
-
# Important: Update the original features list with the expanded version
|
|
746
|
-
features[i] = feat_adjusted
|
|
747
|
-
|
|
748
|
-
return np.stack(features, axis=-1)
|
|
749
|
-
|
|
750
|
-
def compute_feature_maps_2d(self, z=None):
|
|
751
|
-
"""Compute all feature maps for 2D images using GPU acceleration"""
|
|
752
|
-
|
|
753
|
-
features = []
|
|
754
|
-
|
|
755
|
-
image = self.image_gpu[z, :, :]
|
|
756
|
-
image_2d = self.image_3d[z, :, :]
|
|
757
|
-
original_shape = image_2d.shape
|
|
758
|
-
|
|
759
|
-
# Gaussian smoothing at different scales
|
|
760
|
-
print("Obtaining gaussians")
|
|
761
|
-
for sigma in [0.5, 1.0, 2.0, 4.0]:
|
|
762
|
-
smooth = cp.asnumpy(self.gaussian_filter_gpu(image, sigma))
|
|
763
|
-
features.append(smooth)
|
|
764
|
-
|
|
765
|
-
print("Obtaining diff of gaussians")
|
|
766
|
-
# Difference of Gaussians
|
|
767
|
-
for (s1, s2) in [(1, 2), (2, 4)]:
|
|
768
|
-
g1 = self.gaussian_filter_gpu(image, s1)
|
|
769
|
-
g2 = self.gaussian_filter_gpu(image, s2)
|
|
770
|
-
dog = cp.asnumpy(g1 - g2)
|
|
771
|
-
features.append(dog)
|
|
772
|
-
|
|
773
|
-
# Convert image to PyTorch tensor for gradient operations
|
|
774
|
-
image_torch = torch.from_numpy(image_2d).cuda()
|
|
775
|
-
image_torch = image_torch.float().unsqueeze(0).unsqueeze(0)
|
|
776
|
-
|
|
777
|
-
# Calculate required padding
|
|
778
|
-
kernel_size = 3
|
|
779
|
-
padding = kernel_size // 2
|
|
780
|
-
|
|
781
|
-
# Create a single padded version with same padding
|
|
782
|
-
pad = torch.nn.functional.pad(image_torch, (padding, padding, padding, padding), mode='replicate')
|
|
783
|
-
|
|
784
|
-
print("Computing sobel kernels")
|
|
785
|
-
# Create 2D sobel kernels
|
|
786
|
-
sobel_x = torch.tensor([-1, 0, 1], device='cuda').float().view(1, 1, 1, 3)
|
|
787
|
-
sobel_y = torch.tensor([-1, 0, 1], device='cuda').float().view(1, 1, 3, 1)
|
|
788
|
-
|
|
789
|
-
# Compute gradients
|
|
790
|
-
print("Computing gradients")
|
|
791
|
-
gx = torch.nn.functional.conv2d(pad, sobel_x, padding=0)[:, :, :original_shape[0], :original_shape[1]]
|
|
792
|
-
gy = torch.nn.functional.conv2d(pad, sobel_y, padding=0)[:, :, :original_shape[0], :original_shape[1]]
|
|
793
|
-
|
|
794
|
-
# Compute gradient magnitude (no z component in 2D)
|
|
795
|
-
print("Computing gradient mags")
|
|
796
|
-
gradient_magnitude = torch.sqrt(gx**2 + gy**2)
|
|
797
|
-
gradient_feature = gradient_magnitude.cpu().numpy().squeeze()
|
|
798
|
-
|
|
799
|
-
features.append(gradient_feature)
|
|
800
|
-
|
|
801
|
-
# Verify shapes
|
|
802
|
-
for i, feat in enumerate(features):
|
|
803
|
-
if feat.shape != original_shape:
|
|
804
|
-
# Create a copy of the feature to modify
|
|
805
|
-
feat_adjusted = feat
|
|
806
|
-
# Check dimensionality and expand if needed
|
|
807
|
-
if len(feat.shape) < len(original_shape):
|
|
808
|
-
missing_dims = len(original_shape) - len(feat.shape)
|
|
809
|
-
for _ in range(missing_dims):
|
|
810
|
-
feat_adjusted = np.expand_dims(feat_adjusted, axis=0)
|
|
811
|
-
|
|
812
|
-
if feat_adjusted.shape != original_shape:
|
|
813
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
814
|
-
|
|
815
|
-
# Update the original features list with the adjusted version
|
|
816
|
-
features[i] = feat_adjusted
|
|
817
|
-
|
|
818
|
-
return np.stack(features, axis=-1)
|
|
819
|
-
|
|
820
|
-
def compute_feature_maps_cpu_2d(self, z = None):
|
|
821
|
-
"""Compute feature maps for 2D images using CPU"""
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
features = []
|
|
825
|
-
|
|
826
|
-
image_2d = self.image_3d[z, :, :]
|
|
827
|
-
original_shape = image_2d.shape
|
|
828
|
-
|
|
829
|
-
# Gaussian smoothing at different scales
|
|
830
|
-
for sigma in [0.5, 1.0, 2.0, 4.0]:
|
|
831
|
-
smooth = ndimage.gaussian_filter(image_2d, sigma)
|
|
832
|
-
features.append(smooth)
|
|
833
|
-
|
|
834
|
-
# Difference of Gaussians
|
|
835
|
-
for (s1, s2) in [(1, 2), (2, 4)]:
|
|
836
|
-
g1 = ndimage.gaussian_filter(image_2d, s1)
|
|
837
|
-
g2 = ndimage.gaussian_filter(image_2d, s2)
|
|
838
|
-
dog = g1 - g2
|
|
839
|
-
features.append(dog)
|
|
840
|
-
|
|
841
|
-
# Gradient computations using scipy - note axis changes for 2D
|
|
842
|
-
gx = ndimage.sobel(image_2d, axis=1, mode='reflect') # x direction
|
|
843
|
-
gy = ndimage.sobel(image_2d, axis=0, mode='reflect') # y direction
|
|
844
|
-
|
|
845
|
-
# Gradient magnitude (no z component in 2D)
|
|
846
|
-
gradient_magnitude = np.sqrt(gx**2 + gy**2)
|
|
847
|
-
features.append(gradient_magnitude)
|
|
848
|
-
|
|
849
|
-
# Verify shapes
|
|
850
|
-
for i, feat in enumerate(features):
|
|
851
|
-
if feat.shape != original_shape:
|
|
852
|
-
# Check dimensionality and expand if needed
|
|
853
|
-
if len(feat.shape) < len(original_shape):
|
|
854
|
-
feat_adjusted = feat
|
|
855
|
-
missing_dims = len(original_shape) - len(feat.shape)
|
|
856
|
-
for _ in range(missing_dims):
|
|
857
|
-
feat_adjusted = np.expand_dims(feat_adjusted, axis=0)
|
|
858
|
-
|
|
859
|
-
if feat_adjusted.shape != original_shape:
|
|
860
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
861
|
-
|
|
862
|
-
features[i] = feat_adjusted
|
|
863
|
-
|
|
864
|
-
return np.stack(features, axis=-1)
|
|
865
528
|
|
|
866
529
|
def compute_feature_maps_cpu_2d_parallel(self, z=None):
|
|
867
530
|
"""Compute feature maps for 2D images using CPU with thread-based parallelism"""
|
|
@@ -949,50 +612,6 @@ class InteractiveSegmenter:
|
|
|
949
612
|
|
|
950
613
|
return np.stack(features, axis=-1)
|
|
951
614
|
|
|
952
|
-
def compute_feature_maps_cpu(self, image_3d = None):
|
|
953
|
-
"""Compute feature maps using CPU"""
|
|
954
|
-
features = []
|
|
955
|
-
if image_3d is None:
|
|
956
|
-
image_3d = self.image_3d
|
|
957
|
-
|
|
958
|
-
original_shape = image_3d.shape
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
# Gaussian smoothing at different scales
|
|
962
|
-
#print("Obtaining gaussians")
|
|
963
|
-
for sigma in self.alphas:
|
|
964
|
-
smooth = ndimage.gaussian_filter(image_3d, sigma)
|
|
965
|
-
features.append(smooth)
|
|
966
|
-
|
|
967
|
-
#print("Obtaining dif of gaussians")
|
|
968
|
-
# Difference of Gaussians
|
|
969
|
-
for (s1, s2) in self.dogs:
|
|
970
|
-
g1 = ndimage.gaussian_filter(image_3d, s1)
|
|
971
|
-
g2 = ndimage.gaussian_filter(image_3d, s2)
|
|
972
|
-
dog = g1 - g2
|
|
973
|
-
features.append(dog)
|
|
974
|
-
|
|
975
|
-
#print("Computing sobel and gradients")
|
|
976
|
-
# Gradient computations using scipy
|
|
977
|
-
gx = ndimage.sobel(image_3d, axis=2, mode='reflect') # x direction
|
|
978
|
-
gy = ndimage.sobel(image_3d, axis=1, mode='reflect') # y direction
|
|
979
|
-
gz = ndimage.sobel(image_3d, axis=0, mode='reflect') # z direction
|
|
980
|
-
|
|
981
|
-
# Gradient magnitude
|
|
982
|
-
#print("Computing gradient magnitude")
|
|
983
|
-
gradient_magnitude = np.sqrt(gx**2 + gy**2 + gz**2)
|
|
984
|
-
features.append(gradient_magnitude)
|
|
985
|
-
|
|
986
|
-
# Verify shapes
|
|
987
|
-
#print("Verifying shapes")
|
|
988
|
-
for i, feat in enumerate(features):
|
|
989
|
-
if feat.shape != original_shape:
|
|
990
|
-
feat_adjusted = np.expand_dims(feat, axis=0)
|
|
991
|
-
if feat_adjusted.shape != original_shape:
|
|
992
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
993
|
-
features[i] = feat_adjusted
|
|
994
|
-
|
|
995
|
-
return np.stack(features, axis=-1)
|
|
996
615
|
|
|
997
616
|
def compute_feature_maps_cpu_parallel(self, image_3d=None):
|
|
998
617
|
"""Use ThreadPoolExecutor
|
|
@@ -1052,156 +671,6 @@ class InteractiveSegmenter:
|
|
|
1052
671
|
|
|
1053
672
|
return np.stack(features, axis=-1)
|
|
1054
673
|
|
|
1055
|
-
def compute_deep_feature_maps(self):
|
|
1056
|
-
"""Compute all feature maps using GPU acceleration"""
|
|
1057
|
-
#if not self.use_gpu:
|
|
1058
|
-
#return super().compute_feature_maps()
|
|
1059
|
-
|
|
1060
|
-
features = []
|
|
1061
|
-
image = self.image_gpu
|
|
1062
|
-
original_shape = self.image_3d.shape
|
|
1063
|
-
|
|
1064
|
-
# Original features (Gaussians and DoG)
|
|
1065
|
-
print("Obtaining gaussians")
|
|
1066
|
-
for sigma in [0.5, 1.0, 2.0, 4.0]:
|
|
1067
|
-
smooth = cp.asnumpy(self.gaussian_filter_gpu(image, sigma))
|
|
1068
|
-
features.append(smooth)
|
|
1069
|
-
|
|
1070
|
-
print("Computing local statistics")
|
|
1071
|
-
image_torch = torch.from_numpy(self.image_3d).cuda()
|
|
1072
|
-
image_torch = image_torch.float().unsqueeze(0).unsqueeze(1) # [1, 1, 1, 512, 384]
|
|
1073
|
-
|
|
1074
|
-
# Create kernel
|
|
1075
|
-
window_size = 5
|
|
1076
|
-
pad = window_size // 2
|
|
1077
|
-
|
|
1078
|
-
if image_torch.shape[2] == 1: # Single slice case
|
|
1079
|
-
# Squeeze out the z dimension for 2D operations
|
|
1080
|
-
image_2d = image_torch.squeeze(2) # Now [1, 1, 512, 384]
|
|
1081
|
-
kernel_2d = torch.ones((1, 1, window_size, window_size), device='cuda')
|
|
1082
|
-
kernel_2d = kernel_2d / (window_size**2)
|
|
1083
|
-
|
|
1084
|
-
# 2D padding and convolution
|
|
1085
|
-
padded = torch.nn.functional.pad(image_2d,
|
|
1086
|
-
(pad, pad, # x dimension
|
|
1087
|
-
pad, pad), # y dimension
|
|
1088
|
-
mode='reflect')
|
|
1089
|
-
|
|
1090
|
-
local_mean = torch.nn.functional.conv2d(padded, kernel_2d)
|
|
1091
|
-
local_mean = local_mean.unsqueeze(2) # Add z dimension back
|
|
1092
|
-
features.append(local_mean.cpu().numpy().squeeze())
|
|
1093
|
-
|
|
1094
|
-
# Local variance
|
|
1095
|
-
mean = torch.mean(image_2d)
|
|
1096
|
-
padded_sq = torch.nn.functional.pad((image_2d - mean)**2,
|
|
1097
|
-
(pad, pad, pad, pad),
|
|
1098
|
-
mode='reflect')
|
|
1099
|
-
local_var = torch.nn.functional.conv2d(padded_sq, kernel_2d)
|
|
1100
|
-
local_var = local_var.unsqueeze(2) # Add z dimension back
|
|
1101
|
-
features.append(local_var.cpu().numpy().squeeze())
|
|
1102
|
-
else:
|
|
1103
|
-
# Original 3D operations for multi-slice case
|
|
1104
|
-
kernel = torch.ones((1, 1, window_size, window_size, window_size), device='cuda')
|
|
1105
|
-
kernel = kernel / (window_size**3)
|
|
1106
|
-
|
|
1107
|
-
padded = torch.nn.functional.pad(image_torch,
|
|
1108
|
-
(pad, pad, # x dimension
|
|
1109
|
-
pad, pad, # y dimension
|
|
1110
|
-
pad, pad), # z dimension
|
|
1111
|
-
mode='reflect')
|
|
1112
|
-
local_mean = torch.nn.functional.conv3d(padded, kernel)
|
|
1113
|
-
features.append(local_mean.cpu().numpy().squeeze())
|
|
1114
|
-
|
|
1115
|
-
mean = torch.mean(image_torch)
|
|
1116
|
-
padded_sq = torch.nn.functional.pad((image_torch - mean)**2,
|
|
1117
|
-
(pad, pad, pad, pad, pad, pad),
|
|
1118
|
-
mode='reflect')
|
|
1119
|
-
local_var = torch.nn.functional.conv3d(padded_sq, kernel)
|
|
1120
|
-
features.append(local_var.cpu().numpy().squeeze())
|
|
1121
|
-
|
|
1122
|
-
# Original gradient computations
|
|
1123
|
-
print("Computing sobel and gradients")
|
|
1124
|
-
kernel_size = 3
|
|
1125
|
-
padding = kernel_size // 2
|
|
1126
|
-
pad = torch.nn.functional.pad(image_torch, (padding,)*6, mode='replicate')
|
|
1127
|
-
|
|
1128
|
-
sobel_x = torch.tensor([-1, 0, 1], device='cuda').float().view(1,1,1,1,3)
|
|
1129
|
-
sobel_y = torch.tensor([-1, 0, 1], device='cuda').float().view(1,1,1,3,1)
|
|
1130
|
-
sobel_z = torch.tensor([-1, 0, 1], device='cuda').float().view(1,1,3,1,1)
|
|
1131
|
-
|
|
1132
|
-
gx = torch.nn.functional.conv3d(pad, sobel_x, padding=0)[:,:,:original_shape[0],:original_shape[1],:original_shape[2]]
|
|
1133
|
-
gy = torch.nn.functional.conv3d(pad, sobel_y, padding=0)[:,:,:original_shape[0],:original_shape[1],:original_shape[2]]
|
|
1134
|
-
gz = torch.nn.functional.conv3d(pad, sobel_z, padding=0)[:,:,:original_shape[0],:original_shape[1],:original_shape[2]]
|
|
1135
|
-
|
|
1136
|
-
gradient_magnitude = torch.sqrt(gx**2 + gy**2 + gz**2)
|
|
1137
|
-
features.append(gradient_magnitude.cpu().numpy().squeeze())
|
|
1138
|
-
|
|
1139
|
-
# Second-order gradients
|
|
1140
|
-
print("Computing second-order features")
|
|
1141
|
-
gxx = torch.nn.functional.conv3d(gx, sobel_x, padding=padding)
|
|
1142
|
-
gyy = torch.nn.functional.conv3d(gy, sobel_y, padding=padding)
|
|
1143
|
-
gzz = torch.nn.functional.conv3d(gz, sobel_z, padding=padding)
|
|
1144
|
-
|
|
1145
|
-
# Get minimum size in each dimension
|
|
1146
|
-
min_size_0 = min(gxx.size(2), gyy.size(2), gzz.size(2))
|
|
1147
|
-
min_size_1 = min(gxx.size(3), gyy.size(3), gzz.size(3))
|
|
1148
|
-
min_size_2 = min(gxx.size(4), gyy.size(4), gzz.size(4))
|
|
1149
|
-
|
|
1150
|
-
# Crop to smallest common size
|
|
1151
|
-
gxx = gxx[:, :, :min_size_0, :min_size_1, :min_size_2]
|
|
1152
|
-
gyy = gyy[:, :, :min_size_0, :min_size_1, :min_size_2]
|
|
1153
|
-
gzz = gzz[:, :, :min_size_0, :min_size_1, :min_size_2]
|
|
1154
|
-
|
|
1155
|
-
laplacian = gxx + gyy + gzz # Second derivatives in each direction
|
|
1156
|
-
features.append(laplacian.cpu().numpy().squeeze())
|
|
1157
|
-
|
|
1158
|
-
# Now they should have matching dimensions for multiplication
|
|
1159
|
-
hessian_det = gxx * gyy * gzz
|
|
1160
|
-
features.append(hessian_det.cpu().numpy().squeeze())
|
|
1161
|
-
|
|
1162
|
-
print("Verifying shapes")
|
|
1163
|
-
for i, feat in enumerate(features):
|
|
1164
|
-
if feat.shape != original_shape:
|
|
1165
|
-
feat_adjusted = np.expand_dims(feat, axis=0)
|
|
1166
|
-
if feat_adjusted.shape != original_shape:
|
|
1167
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
1168
|
-
features[i] = feat_adjusted
|
|
1169
|
-
|
|
1170
|
-
return np.stack(features, axis=-1)
|
|
1171
|
-
|
|
1172
|
-
def gaussian_filter_gpu(self, image, sigma):
|
|
1173
|
-
"""GPU-accelerated Gaussian filter"""
|
|
1174
|
-
# Create Gaussian kernel
|
|
1175
|
-
result = cpx.gaussian_filter(image, sigma=sigma)
|
|
1176
|
-
|
|
1177
|
-
return result
|
|
1178
|
-
|
|
1179
|
-
def process_chunk_GPU(self, chunk_coords):
|
|
1180
|
-
"""Process a chunk of coordinates using GPU acceleration"""
|
|
1181
|
-
coords = np.array(chunk_coords)
|
|
1182
|
-
z, y, x = coords.T
|
|
1183
|
-
|
|
1184
|
-
# Extract features
|
|
1185
|
-
features = self.feature_cache[z, y, x]
|
|
1186
|
-
|
|
1187
|
-
if self.use_gpu:
|
|
1188
|
-
# Move to GPU
|
|
1189
|
-
features_gpu = cp.array(features)
|
|
1190
|
-
|
|
1191
|
-
# Predict on GPU
|
|
1192
|
-
predictions = self.model.predict(features_gpu)
|
|
1193
|
-
predictions = cp.asnumpy(predictions)
|
|
1194
|
-
else:
|
|
1195
|
-
predictions = self.model.predict(features)
|
|
1196
|
-
|
|
1197
|
-
# Split results
|
|
1198
|
-
foreground_mask = predictions == 1
|
|
1199
|
-
background_mask = ~foreground_mask
|
|
1200
|
-
|
|
1201
|
-
foreground = set(map(tuple, coords[foreground_mask]))
|
|
1202
|
-
background = set(map(tuple, coords[background_mask]))
|
|
1203
|
-
|
|
1204
|
-
return foreground, background
|
|
1205
674
|
|
|
1206
675
|
def organize_by_z(self, coordinates):
|
|
1207
676
|
"""
|
|
@@ -1240,58 +709,53 @@ class InteractiveSegmenter:
|
|
|
1240
709
|
background = set()
|
|
1241
710
|
|
|
1242
711
|
if self.previewing or not self.use_two:
|
|
1243
|
-
if self.mem_lock:
|
|
1244
|
-
# For mem_lock, we need to extract a subarray and compute features
|
|
1245
|
-
|
|
1246
|
-
if self.realtimechunks is None: #Presuming we're segmenting all
|
|
1247
|
-
z_min, z_max = chunk_coords[0], chunk_coords[1]
|
|
1248
|
-
y_min, y_max = chunk_coords[2], chunk_coords[3]
|
|
1249
|
-
x_min, x_max = chunk_coords[4], chunk_coords[5]
|
|
1250
|
-
|
|
1251
|
-
# Consider moving this to process chunk ??
|
|
1252
|
-
chunk_coords = np.stack(np.meshgrid(
|
|
1253
|
-
np.arange(z_min, z_max),
|
|
1254
|
-
np.arange(y_min, y_max),
|
|
1255
|
-
np.arange(x_min, x_max),
|
|
1256
|
-
indexing='ij'
|
|
1257
|
-
)).reshape(3, -1).T
|
|
1258
|
-
|
|
1259
|
-
chunk_coords = (list(map(tuple, chunk_coords)))
|
|
1260
|
-
else: #Presumes we're not segmenting all
|
|
1261
|
-
# Find min/max bounds of the coordinates to get the smallest containing subarray
|
|
1262
|
-
z_coords = [z for z, y, x in chunk_coords]
|
|
1263
|
-
y_coords = [y for z, y, x in chunk_coords]
|
|
1264
|
-
x_coords = [x for z, y, x in chunk_coords]
|
|
1265
|
-
|
|
1266
|
-
z_min, z_max = min(z_coords), max(z_coords)
|
|
1267
|
-
y_min, y_max = min(y_coords), max(y_coords)
|
|
1268
|
-
x_min, x_max = min(x_coords), max(x_coords)
|
|
1269
712
|
|
|
713
|
+
if self.realtimechunks is None: #Presuming we're segmenting all
|
|
714
|
+
z_min, z_max = chunk_coords[0], chunk_coords[1]
|
|
715
|
+
y_min, y_max = chunk_coords[2], chunk_coords[3]
|
|
716
|
+
x_min, x_max = chunk_coords[4], chunk_coords[5]
|
|
717
|
+
|
|
718
|
+
# Consider moving this to process chunk ??
|
|
719
|
+
chunk_coords = np.stack(np.meshgrid(
|
|
720
|
+
np.arange(z_min, z_max),
|
|
721
|
+
np.arange(y_min, y_max),
|
|
722
|
+
np.arange(x_min, x_max),
|
|
723
|
+
indexing='ij'
|
|
724
|
+
)).reshape(3, -1).T
|
|
1270
725
|
|
|
1271
|
-
|
|
1272
|
-
|
|
726
|
+
chunk_coords = (list(map(tuple, chunk_coords)))
|
|
727
|
+
else: #Presumes we're not segmenting all
|
|
728
|
+
# Find min/max bounds of the coordinates to get the smallest containing subarray
|
|
729
|
+
z_coords = [z for z, y, x in chunk_coords]
|
|
730
|
+
y_coords = [y for z, y, x in chunk_coords]
|
|
731
|
+
x_coords = [x for z, y, x in chunk_coords]
|
|
1273
732
|
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
733
|
+
z_min, z_max = min(z_coords), max(z_coords)
|
|
734
|
+
y_min, y_max = min(y_coords), max(y_coords)
|
|
735
|
+
x_min, x_max = min(x_coords), max(x_coords)
|
|
736
|
+
|
|
737
|
+
|
|
738
|
+
# Extract the subarray
|
|
739
|
+
subarray = self.image_3d[z_min:z_max+1, y_min:y_max+1, x_min:x_max+1]
|
|
740
|
+
|
|
741
|
+
# Compute features for this subarray
|
|
742
|
+
if self.speed:
|
|
743
|
+
feature_map = self.compute_feature_maps_cpu_parallel(subarray) #If the interactive segmenter is slow
|
|
744
|
+
else: #Due to the parallel, consider singleton implementation for it specifically
|
|
745
|
+
feature_map = self.compute_deep_feature_maps_cpu_parallel(subarray)
|
|
746
|
+
|
|
747
|
+
# Extract features for each coordinate, adjusting for subarray offset
|
|
748
|
+
features = []
|
|
749
|
+
for z, y, x in chunk_coords:
|
|
750
|
+
# Transform global coordinates to local subarray coordinates
|
|
751
|
+
local_z = z - z_min
|
|
752
|
+
local_y = y - y_min
|
|
753
|
+
local_x = x - x_min
|
|
1279
754
|
|
|
1280
|
-
#
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
# Transform global coordinates to local subarray coordinates
|
|
1284
|
-
local_z = z - z_min
|
|
1285
|
-
local_y = y - y_min
|
|
1286
|
-
local_x = x - x_min
|
|
1287
|
-
|
|
1288
|
-
# Get feature at this position
|
|
1289
|
-
feature = feature_map[local_z, local_y, local_x]
|
|
1290
|
-
features.append(feature)
|
|
755
|
+
# Get feature at this position
|
|
756
|
+
feature = feature_map[local_z, local_y, local_x]
|
|
757
|
+
features.append(feature)
|
|
1291
758
|
|
|
1292
|
-
else:
|
|
1293
|
-
# For non-mem_lock, simply use the feature cache
|
|
1294
|
-
features = [self.feature_cache[z, y, x] for z, y, x in chunk_coords]
|
|
1295
759
|
|
|
1296
760
|
# Make predictions
|
|
1297
761
|
predictions = self.model.predict(features)
|
|
@@ -1305,8 +769,7 @@ class InteractiveSegmenter:
|
|
|
1305
769
|
|
|
1306
770
|
else:
|
|
1307
771
|
|
|
1308
|
-
|
|
1309
|
-
chunk_coords = self.twodim_coords(chunk_coords[0], chunk_coords[1], chunk_coords[2], chunk_coords[3], chunk_coords[4])
|
|
772
|
+
chunk_coords = self.twodim_coords(chunk_coords[0], chunk_coords[1], chunk_coords[2], chunk_coords[3], chunk_coords[4])
|
|
1310
773
|
|
|
1311
774
|
chunk_coords = self.organize_by_z(chunk_coords)
|
|
1312
775
|
|
|
@@ -1397,15 +860,14 @@ class InteractiveSegmenter:
|
|
|
1397
860
|
|
|
1398
861
|
|
|
1399
862
|
|
|
1400
|
-
def segment_volume(self, chunk_size=None, gpu=False):
|
|
863
|
+
def segment_volume(self, array, chunk_size=None, gpu=False):
|
|
1401
864
|
"""Segment volume using parallel processing of chunks with vectorized chunk creation"""
|
|
1402
865
|
#Change the above chunk size to None to have it auto-compute largest chunks (not sure which is faster, 64 seems reasonable in test cases)
|
|
1403
866
|
|
|
1404
867
|
self.realtimechunks = None # Presumably no longer need this.
|
|
1405
868
|
self.map_slice = None
|
|
1406
869
|
|
|
1407
|
-
|
|
1408
|
-
chunk_size = self.master_chunk #memory efficient chunk
|
|
870
|
+
chunk_size = self.master_chunk #memory efficient chunk
|
|
1409
871
|
|
|
1410
872
|
|
|
1411
873
|
def create_2d_chunks():
|
|
@@ -1418,8 +880,7 @@ class InteractiveSegmenter:
|
|
|
1418
880
|
List of chunks, where each chunk contains the coordinates for one z-slice or subchunk
|
|
1419
881
|
"""
|
|
1420
882
|
MAX_CHUNK_SIZE = 262144
|
|
1421
|
-
|
|
1422
|
-
MAX_CHUNK_SIZE = 10000000000000000000000000 #unlimited i guess
|
|
883
|
+
|
|
1423
884
|
chunks = []
|
|
1424
885
|
|
|
1425
886
|
for z in range(self.image_3d.shape[0]):
|
|
@@ -1431,13 +892,7 @@ class InteractiveSegmenter:
|
|
|
1431
892
|
# If the slice is small enough, do not subchunk
|
|
1432
893
|
if total_pixels <= MAX_CHUNK_SIZE:
|
|
1433
894
|
|
|
1434
|
-
|
|
1435
|
-
if not self.mem_lock:
|
|
1436
|
-
chunks.append(self.twodim_coords(y_dim, x_dim, z, total_pixels))
|
|
1437
|
-
else:
|
|
1438
|
-
chunks.append([y_dim, x_dim, z, total_pixels, None])
|
|
1439
|
-
|
|
1440
|
-
|
|
895
|
+
chunks.append([y_dim, x_dim, z, total_pixels, None])
|
|
1441
896
|
|
|
1442
897
|
else:
|
|
1443
898
|
# Determine which dimension to divide (the largest one)
|
|
@@ -1453,10 +908,8 @@ class InteractiveSegmenter:
|
|
|
1453
908
|
for i in range(0, y_dim, div_size):
|
|
1454
909
|
end_i = min(i + div_size, y_dim)
|
|
1455
910
|
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
else:
|
|
1459
|
-
chunks.append([y_dim, x_dim, z, None, ['y', i, end_i]])
|
|
911
|
+
|
|
912
|
+
chunks.append([y_dim, x_dim, z, None, ['y', i, end_i]])
|
|
1460
913
|
|
|
1461
914
|
else: # largest_dim == 'x'
|
|
1462
915
|
div_size = int(np.ceil(x_dim / num_divisions))
|
|
@@ -1464,24 +917,10 @@ class InteractiveSegmenter:
|
|
|
1464
917
|
for i in range(0, x_dim, div_size):
|
|
1465
918
|
end_i = min(i + div_size, x_dim)
|
|
1466
919
|
|
|
1467
|
-
|
|
1468
|
-
chunks.append(self.twodim_coords(y_dim, x_dim, z, None, ['x', i, end_i]))
|
|
1469
|
-
else:
|
|
1470
|
-
chunks.append([y_dim, x_dim, z, None, ['x', i, end_i]])
|
|
920
|
+
chunks.append([y_dim, x_dim, z, None, ['x', i, end_i]])
|
|
1471
921
|
|
|
1472
922
|
return chunks
|
|
1473
923
|
|
|
1474
|
-
#try:
|
|
1475
|
-
#from cuml.ensemble import RandomForestClassifier as cuRandomForestClassifier
|
|
1476
|
-
#except:
|
|
1477
|
-
#print("Cannot find cuML, using CPU to segment instead...")
|
|
1478
|
-
#gpu = False
|
|
1479
|
-
|
|
1480
|
-
if self.feature_cache is None and not self.mem_lock and not self.use_two:
|
|
1481
|
-
with self.lock:
|
|
1482
|
-
if self.feature_cache is None:
|
|
1483
|
-
self.feature_cache = self.compute_feature_maps()
|
|
1484
|
-
|
|
1485
924
|
print("Chunking data...")
|
|
1486
925
|
|
|
1487
926
|
if not self.use_two:
|
|
@@ -1522,22 +961,8 @@ class InteractiveSegmenter:
|
|
|
1522
961
|
y_end = min(y_start + chunk_size, self.image_3d.shape[1])
|
|
1523
962
|
x_end = min(x_start + chunk_size, self.image_3d.shape[2])
|
|
1524
963
|
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
coords = [z_start, z_end, y_start, y_end, x_start, x_end]
|
|
1528
|
-
chunks.append(coords)
|
|
1529
|
-
|
|
1530
|
-
else:
|
|
1531
|
-
# Consider moving this to process chunk ??
|
|
1532
|
-
coords = np.stack(np.meshgrid(
|
|
1533
|
-
np.arange(z_start, z_end),
|
|
1534
|
-
np.arange(y_start, y_end),
|
|
1535
|
-
np.arange(x_start, x_end),
|
|
1536
|
-
indexing='ij'
|
|
1537
|
-
)).reshape(3, -1).T
|
|
1538
|
-
|
|
1539
|
-
chunks.append(list(map(tuple, coords)))
|
|
1540
|
-
|
|
964
|
+
coords = [z_start, z_end, y_start, y_end, x_start, x_end]
|
|
965
|
+
chunks.append(coords)
|
|
1541
966
|
|
|
1542
967
|
|
|
1543
968
|
else:
|
|
@@ -1549,34 +974,23 @@ class InteractiveSegmenter:
|
|
|
1549
974
|
|
|
1550
975
|
print("Segmenting chunks...")
|
|
1551
976
|
|
|
1552
|
-
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
background_coords.update(back)
|
|
1567
|
-
print(f"Processed {i}/{len(chunks)} chunks")
|
|
1568
|
-
else: #Prioritize RAM
|
|
1569
|
-
for i, chunk in enumerate(chunks):
|
|
1570
|
-
fore, back = self.process_chunk(chunk)
|
|
1571
|
-
foreground_coords.update(fore)
|
|
1572
|
-
background_coords.update(back)
|
|
1573
|
-
try:
|
|
1574
|
-
chunk[i] = None #Help garbage collection
|
|
1575
|
-
except:
|
|
1576
|
-
pass
|
|
1577
|
-
print(f"Processed {i}/{len(chunks)} chunks")
|
|
977
|
+
for i, chunk in enumerate(chunks):
|
|
978
|
+
fore, _ = self.process_chunk(chunk)
|
|
979
|
+
fg_array = np.array(list(fore))
|
|
980
|
+
del fore
|
|
981
|
+
if len(fg_array) > 0: # Check if we have any foreground coordinates
|
|
982
|
+
# Unpack into separate coordinate arrays
|
|
983
|
+
z_coords, y_coords, x_coords = fg_array[:, 0], fg_array[:, 1], fg_array[:, 2]
|
|
984
|
+
# Assign values in a single vectorized operation
|
|
985
|
+
array[z_coords, y_coords, x_coords] = 255
|
|
986
|
+
try:
|
|
987
|
+
chunk[i] = None #Help garbage collection
|
|
988
|
+
except:
|
|
989
|
+
pass
|
|
990
|
+
print(f"Processed {i}/{len(chunks)} chunks")
|
|
1578
991
|
|
|
1579
|
-
|
|
992
|
+
#Ok so this should be returned one chunk at a time I presume.
|
|
993
|
+
return array
|
|
1580
994
|
|
|
1581
995
|
def update_position(self, z=None, x=None, y=None):
|
|
1582
996
|
"""Update current position for chunk prioritization with safeguards"""
|
|
@@ -1666,13 +1080,6 @@ class InteractiveSegmenter:
|
|
|
1666
1080
|
|
|
1667
1081
|
def segment_volume_realtime(self, gpu = False):
|
|
1668
1082
|
|
|
1669
|
-
try:
|
|
1670
|
-
from cuml.ensemble import RandomForestClassifier as cuRandomForestClassifier
|
|
1671
|
-
except:
|
|
1672
|
-
print("Cannot find cuML, using CPU to segment instead...")
|
|
1673
|
-
gpu = False
|
|
1674
|
-
|
|
1675
|
-
|
|
1676
1083
|
|
|
1677
1084
|
if self.realtimechunks is None:
|
|
1678
1085
|
self.get_realtime_chunks()
|