nettracer3d 0.5.9__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nettracer3d/segmenter.py CHANGED
@@ -20,7 +20,6 @@ from scipy import ndimage
20
20
  import multiprocessing
21
21
  from collections import defaultdict
22
22
 
23
-
24
23
  class InteractiveSegmenter:
25
24
  def __init__(self, image_3d, use_gpu=True):
26
25
  self.image_3d = image_3d
@@ -81,6 +80,7 @@ class InteractiveSegmenter:
81
80
  self._currently_processing = False
82
81
  self._skip_next_update = False
83
82
  self._last_processed_slice = None
83
+ self.mem_lock = False
84
84
 
85
85
  def segment_slice_chunked(self, slice_z, block_size=64):
86
86
  """
@@ -203,42 +203,44 @@ class InteractiveSegmenter:
203
203
  if self._currently_processing == slice_z:
204
204
  self._currently_processing = None
205
205
 
206
- def compute_deep_feature_maps_cpu(self):
206
+ def compute_deep_feature_maps_cpu(self, image_3d = None):
207
207
  """Compute feature maps using CPU"""
208
208
  features = []
209
- original_shape = self.image_3d.shape
209
+ if image_3d is None:
210
+ image_3d = self.image_3d
211
+ original_shape = image_3d.shape
210
212
 
211
213
  # Gaussian and DoG using scipy
212
- print("Obtaining gaussians")
214
+ #print("Obtaining gaussians")
213
215
  for sigma in [0.5, 1.0, 2.0, 4.0]:
214
- smooth = ndimage.gaussian_filter(self.image_3d, sigma)
216
+ smooth = ndimage.gaussian_filter(image_3d, sigma)
215
217
  features.append(smooth)
216
218
 
217
- print("Computing local statistics")
219
+ #print("Computing local statistics")
218
220
  # Local statistics using scipy's convolve
219
221
  window_size = 5
220
222
  kernel = np.ones((window_size, window_size, window_size)) / (window_size**3)
221
223
 
222
224
  # Local mean
223
- local_mean = ndimage.convolve(self.image_3d, kernel, mode='reflect')
225
+ local_mean = ndimage.convolve(image_3d, kernel, mode='reflect')
224
226
  features.append(local_mean)
225
227
 
226
228
  # Local variance
227
- mean = np.mean(self.image_3d)
228
- local_var = ndimage.convolve((self.image_3d - mean)**2, kernel, mode='reflect')
229
+ mean = np.mean(image_3d)
230
+ local_var = ndimage.convolve((image_3d - mean)**2, kernel, mode='reflect')
229
231
  features.append(local_var)
230
232
 
231
- print("Computing sobel and gradients")
233
+ #print("Computing sobel and gradients")
232
234
  # Gradient computations using scipy
233
- gx = ndimage.sobel(self.image_3d, axis=2, mode='reflect')
234
- gy = ndimage.sobel(self.image_3d, axis=1, mode='reflect')
235
- gz = ndimage.sobel(self.image_3d, axis=0, mode='reflect')
235
+ gx = ndimage.sobel(image_3d, axis=2, mode='reflect')
236
+ gy = ndimage.sobel(image_3d, axis=1, mode='reflect')
237
+ gz = ndimage.sobel(image_3d, axis=0, mode='reflect')
236
238
 
237
239
  # Gradient magnitude
238
240
  gradient_magnitude = np.sqrt(gx**2 + gy**2 + gz**2)
239
241
  features.append(gradient_magnitude)
240
242
 
241
- print("Computing second-order features")
243
+ #print("Computing second-order features")
242
244
  # Second-order gradients
243
245
  gxx = ndimage.sobel(gx, axis=2, mode='reflect')
244
246
  gyy = ndimage.sobel(gy, axis=1, mode='reflect')
@@ -252,7 +254,162 @@ class InteractiveSegmenter:
252
254
  hessian_det = gxx * gyy * gzz
253
255
  features.append(hessian_det)
254
256
 
255
- print("Verifying shapes")
257
+ #print("Verifying shapes")
258
+ for i, feat in enumerate(features):
259
+ if feat.shape != original_shape:
260
+ feat_adjusted = np.expand_dims(feat, axis=0)
261
+ if feat_adjusted.shape != original_shape:
262
+ raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
263
+ features[i] = feat_adjusted
264
+
265
+ return np.stack(features, axis=-1)
266
+
267
+ def compute_deep_feature_maps_cpu_parallel(self, image_3d=None):
268
+ """Compute deep feature maps using CPU with thread-based parallelism"""
269
+ if image_3d is None:
270
+ image_3d = self.image_3d
271
+
272
+ original_shape = image_3d.shape
273
+
274
+ # Use ThreadPoolExecutor for parallelization
275
+ with ThreadPoolExecutor(max_workers=min(7, multiprocessing.cpu_count())) as executor:
276
+ # Stage 1: Independent computations that can be parallelized
277
+ futures = []
278
+
279
+ # Gaussian smoothing
280
+ def compute_gaussian(sigma):
281
+ return ndimage.gaussian_filter(image_3d, sigma)
282
+
283
+ for sigma in [0.5, 1.0, 2.0, 4.0]:
284
+ future = executor.submit(compute_gaussian, sigma)
285
+ futures.append(('gaussian', sigma, future))
286
+
287
+ # Local statistics computation
288
+ def compute_local_mean():
289
+ window_size = 5
290
+ kernel = np.ones((window_size, window_size, window_size)) / (window_size**3)
291
+ return ndimage.convolve(image_3d, kernel, mode='reflect')
292
+
293
+ future = executor.submit(compute_local_mean)
294
+ futures.append(('local_mean', None, future))
295
+
296
+ def compute_local_variance():
297
+ window_size = 5
298
+ kernel = np.ones((window_size, window_size, window_size)) / (window_size**3)
299
+ mean = np.mean(image_3d)
300
+ return ndimage.convolve((image_3d - mean)**2, kernel, mode='reflect')
301
+
302
+ future = executor.submit(compute_local_variance)
303
+ futures.append(('local_var', None, future))
304
+
305
+ # Gradient computation
306
+ def compute_gradients():
307
+ gx = ndimage.sobel(image_3d, axis=2, mode='reflect')
308
+ gy = ndimage.sobel(image_3d, axis=1, mode='reflect')
309
+ gz = ndimage.sobel(image_3d, axis=0, mode='reflect')
310
+ return gx, gy, gz
311
+
312
+ future = executor.submit(compute_gradients)
313
+ futures.append(('gradients', None, future))
314
+
315
+ # Collect results for the independent computations
316
+ results = {}
317
+ for task_type, params, future in futures:
318
+ try:
319
+ result = future.result()
320
+ if task_type == 'gradients':
321
+ # Store the gradient components separately
322
+ gx, gy, gz = result
323
+ results['gx'] = gx
324
+ results['gy'] = gy
325
+ results['gz'] = gz
326
+ else:
327
+ results[f"{task_type}_{params}" if params is not None else task_type] = result
328
+ except Exception as e:
329
+ raise RuntimeError(f"Error in task {task_type}: {str(e)}")
330
+
331
+ # Stage 2: Dependent computations that need results from Stage 1
332
+ futures = []
333
+
334
+ # Gradient magnitude (depends on gradients)
335
+ def compute_gradient_magnitude(gx, gy, gz):
336
+ return np.sqrt(gx**2 + gy**2 + gz**2)
337
+
338
+ future = executor.submit(compute_gradient_magnitude,
339
+ results['gx'], results['gy'], results['gz'])
340
+ futures.append(('gradient_magnitude', None, future))
341
+
342
+ # Second-order gradients (depend on first gradients)
343
+ def compute_second_derivatives(gx, gy, gz):
344
+ gxx = ndimage.sobel(gx, axis=2, mode='reflect')
345
+ gyy = ndimage.sobel(gy, axis=1, mode='reflect')
346
+ gzz = ndimage.sobel(gz, axis=0, mode='reflect')
347
+ return gxx, gyy, gzz
348
+
349
+ future = executor.submit(compute_second_derivatives,
350
+ results['gx'], results['gy'], results['gz'])
351
+ futures.append(('second_derivatives', None, future))
352
+
353
+ # Collect results for the dependent computations
354
+ for task_type, params, future in futures:
355
+ try:
356
+ result = future.result()
357
+ if task_type == 'second_derivatives':
358
+ # Store the second derivative components separately
359
+ gxx, gyy, gzz = result
360
+ results['gxx'] = gxx
361
+ results['gyy'] = gyy
362
+ results['gzz'] = gzz
363
+ else:
364
+ results[task_type] = result
365
+ except Exception as e:
366
+ raise RuntimeError(f"Error in task {task_type}: {str(e)}")
367
+
368
+ # Stage 3: Final computations that depend on Stage 2 results
369
+ futures = []
370
+
371
+ # Laplacian and Hessian determinant (depend on second derivatives)
372
+ def compute_laplacian(gxx, gyy, gzz):
373
+ return gxx + gyy + gzz
374
+
375
+ future = executor.submit(compute_laplacian,
376
+ results['gxx'], results['gyy'], results['gzz'])
377
+ futures.append(('laplacian', None, future))
378
+
379
+ def compute_hessian_det(gxx, gyy, gzz):
380
+ return gxx * gyy * gzz
381
+
382
+ future = executor.submit(compute_hessian_det,
383
+ results['gxx'], results['gyy'], results['gzz'])
384
+ futures.append(('hessian_det', None, future))
385
+
386
+ # Collect final results
387
+ for task_type, params, future in futures:
388
+ try:
389
+ result = future.result()
390
+ results[task_type] = result
391
+ except Exception as e:
392
+ raise RuntimeError(f"Error in task {task_type}: {str(e)}")
393
+
394
+ # Organize results in the expected order
395
+ features = []
396
+
397
+ # Add Gaussian features
398
+ for sigma in [0.5, 1.0, 2.0, 4.0]:
399
+ features.append(results[f'gaussian_{sigma}'])
400
+
401
+ # Add local statistics
402
+ features.append(results['local_mean'])
403
+ features.append(results['local_var'])
404
+
405
+ # Add gradient magnitude
406
+ features.append(results['gradient_magnitude'])
407
+
408
+ # Add Laplacian and Hessian determinant
409
+ features.append(results['laplacian'])
410
+ features.append(results['hessian_det'])
411
+
412
+ # Verify shapes
256
413
  for i, feat in enumerate(features):
257
414
  if feat.shape != original_shape:
258
415
  feat_adjusted = np.expand_dims(feat, axis=0)
@@ -323,6 +480,165 @@ class InteractiveSegmenter:
323
480
 
324
481
  return np.stack(features, axis=-1)
325
482
 
483
+ def compute_deep_feature_maps_cpu_2d_parallel(self, z=None):
484
+ """Compute 2D feature maps using CPU with thread-based parallelism"""
485
+ image_2d = self.image_3d[z, :, :]
486
+ original_shape = image_2d.shape
487
+
488
+ # Use ThreadPoolExecutor for parallelization
489
+ with ThreadPoolExecutor(max_workers=min(7, multiprocessing.cpu_count())) as executor:
490
+ # Stage 1: Independent computations that can be parallelized
491
+ futures = []
492
+
493
+ # Gaussian smoothing
494
+ def compute_gaussian(sigma):
495
+ return ndimage.gaussian_filter(image_2d, sigma)
496
+
497
+ for sigma in [0.5, 1.0, 2.0, 4.0]:
498
+ future = executor.submit(compute_gaussian, sigma)
499
+ futures.append(('gaussian', sigma, future))
500
+
501
+ # Local statistics computation
502
+ def compute_local_mean():
503
+ window_size = 5
504
+ kernel = np.ones((window_size, window_size)) / (window_size**2)
505
+ return ndimage.convolve(image_2d, kernel, mode='reflect')
506
+
507
+ future = executor.submit(compute_local_mean)
508
+ futures.append(('local_mean', None, future))
509
+
510
+ def compute_local_variance():
511
+ window_size = 5
512
+ kernel = np.ones((window_size, window_size)) / (window_size**2)
513
+ mean = np.mean(image_2d)
514
+ return ndimage.convolve((image_2d - mean)**2, kernel, mode='reflect')
515
+
516
+ future = executor.submit(compute_local_variance)
517
+ futures.append(('local_var', None, future))
518
+
519
+ # Gradient computation
520
+ def compute_gradients():
521
+ gx = ndimage.sobel(image_2d, axis=1, mode='reflect') # x direction
522
+ gy = ndimage.sobel(image_2d, axis=0, mode='reflect') # y direction
523
+ return gx, gy
524
+
525
+ future = executor.submit(compute_gradients)
526
+ futures.append(('gradients', None, future))
527
+
528
+ # Collect results for the independent computations
529
+ results = {}
530
+ for task_type, params, future in futures:
531
+ try:
532
+ result = future.result()
533
+ if task_type == 'gradients':
534
+ # Store the gradient components separately
535
+ gx, gy = result
536
+ results['gx'] = gx
537
+ results['gy'] = gy
538
+ else:
539
+ results[f"{task_type}_{params}" if params is not None else task_type] = result
540
+ except Exception as e:
541
+ raise RuntimeError(f"Error in task {task_type}: {str(e)}")
542
+
543
+ # Stage 2: Dependent computations that need results from Stage 1
544
+ futures = []
545
+
546
+ # Gradient magnitude (depends on gradients)
547
+ def compute_gradient_magnitude(gx, gy):
548
+ return np.sqrt(gx**2 + gy**2)
549
+
550
+ future = executor.submit(compute_gradient_magnitude, results['gx'], results['gy'])
551
+ futures.append(('gradient_magnitude', None, future))
552
+
553
+ # Second-order gradients (depend on first gradients)
554
+ def compute_second_derivatives(gx, gy):
555
+ gxx = ndimage.sobel(gx, axis=1, mode='reflect')
556
+ gyy = ndimage.sobel(gy, axis=0, mode='reflect')
557
+ # Cross derivatives for Hessian determinant
558
+ gxy = ndimage.sobel(gx, axis=0, mode='reflect')
559
+ gyx = ndimage.sobel(gy, axis=1, mode='reflect')
560
+ return gxx, gyy, gxy, gyx
561
+
562
+ future = executor.submit(compute_second_derivatives, results['gx'], results['gy'])
563
+ futures.append(('second_derivatives', None, future))
564
+
565
+ # Collect results for the dependent computations
566
+ for task_type, params, future in futures:
567
+ try:
568
+ result = future.result()
569
+ if task_type == 'second_derivatives':
570
+ # Store the second derivative components separately
571
+ gxx, gyy, gxy, gyx = result
572
+ results['gxx'] = gxx
573
+ results['gyy'] = gyy
574
+ results['gxy'] = gxy
575
+ results['gyx'] = gyx
576
+ else:
577
+ results[task_type] = result
578
+ except Exception as e:
579
+ raise RuntimeError(f"Error in task {task_type}: {str(e)}")
580
+
581
+ # Stage 3: Final computations that depend on Stage 2 results
582
+ futures = []
583
+
584
+ # Laplacian and Hessian determinant (depend on second derivatives)
585
+ def compute_laplacian(gxx, gyy):
586
+ return gxx + gyy
587
+
588
+ future = executor.submit(compute_laplacian, results['gxx'], results['gyy'])
589
+ futures.append(('laplacian', None, future))
590
+
591
+ def compute_hessian_det(gxx, gyy, gxy, gyx):
592
+ return gxx * gyy - gxy * gyx
593
+
594
+ future = executor.submit(compute_hessian_det,
595
+ results['gxx'], results['gyy'],
596
+ results['gxy'], results['gyx'])
597
+ futures.append(('hessian_det', None, future))
598
+
599
+ # Collect final results
600
+ for task_type, params, future in futures:
601
+ try:
602
+ result = future.result()
603
+ results[task_type] = result
604
+ except Exception as e:
605
+ raise RuntimeError(f"Error in task {task_type}: {str(e)}")
606
+
607
+ # Organize results in the expected order
608
+ features = []
609
+
610
+ # Add Gaussian features
611
+ for sigma in [0.5, 1.0, 2.0, 4.0]:
612
+ features.append(results[f'gaussian_{sigma}'])
613
+
614
+ # Add local statistics
615
+ features.append(results['local_mean'])
616
+ features.append(results['local_var'])
617
+
618
+ # Add gradient magnitude
619
+ features.append(results['gradient_magnitude'])
620
+
621
+ # Add Laplacian and Hessian determinant
622
+ features.append(results['laplacian'])
623
+ features.append(results['hessian_det'])
624
+
625
+ # Verify shapes
626
+ for i, feat in enumerate(features):
627
+ if feat.shape != original_shape:
628
+ # Check dimensionality and expand if needed
629
+ if len(feat.shape) < len(original_shape):
630
+ feat_adjusted = feat
631
+ missing_dims = len(original_shape) - len(feat.shape)
632
+ for _ in range(missing_dims):
633
+ feat_adjusted = np.expand_dims(feat_adjusted, axis=0)
634
+
635
+ if feat_adjusted.shape != original_shape:
636
+ raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
637
+
638
+ features[i] = feat_adjusted
639
+
640
+ return np.stack(features, axis=-1)
641
+
326
642
  def compute_feature_maps(self):
327
643
  """Compute all feature maps using GPU acceleration"""
328
644
  #if not self.use_gpu:
@@ -513,38 +829,186 @@ class InteractiveSegmenter:
513
829
 
514
830
  return np.stack(features, axis=-1)
515
831
 
516
- def compute_feature_maps_cpu(self):
832
+ def compute_feature_maps_cpu_2d_parallel(self, z=None):
833
+ """Compute feature maps for 2D images using CPU with thread-based parallelism"""
834
+ image_2d = self.image_3d[z, :, :]
835
+ original_shape = image_2d.shape
836
+
837
+ # Use ThreadPoolExecutor for parallelization
838
+ with ThreadPoolExecutor(max_workers=min(7, multiprocessing.cpu_count())) as executor:
839
+ # Submit tasks for independent computations
840
+ futures = []
841
+
842
+ # Gaussian smoothing at different scales
843
+ def compute_gaussian(sigma):
844
+ return ndimage.gaussian_filter(image_2d, sigma)
845
+
846
+ gaussian_sigmas = [0.5, 1.0, 2.0, 4.0]
847
+ for sigma in gaussian_sigmas:
848
+ future = executor.submit(compute_gaussian, sigma)
849
+ futures.append(('gaussian', sigma, future))
850
+
851
+ # Difference of Gaussians
852
+ def compute_dog(s1, s2):
853
+ g1 = ndimage.gaussian_filter(image_2d, s1)
854
+ g2 = ndimage.gaussian_filter(image_2d, s2)
855
+ return g1 - g2
856
+
857
+ dog_pairs = [(1, 2), (2, 4)]
858
+ for (s1, s2) in dog_pairs:
859
+ future = executor.submit(compute_dog, s1, s2)
860
+ futures.append(('dog', (s1, s2), future))
861
+
862
+ # Gradient computation
863
+ def compute_gradient_magnitude():
864
+ gx = ndimage.sobel(image_2d, axis=1, mode='reflect') # x direction
865
+ gy = ndimage.sobel(image_2d, axis=0, mode='reflect') # y direction
866
+ return np.sqrt(gx**2 + gy**2)
867
+
868
+ future = executor.submit(compute_gradient_magnitude)
869
+ futures.append(('gradient_magnitude', None, future))
870
+
871
+ # Collect results
872
+ results = {}
873
+ for task_type, params, future in futures:
874
+ try:
875
+ result = future.result()
876
+ if params is not None:
877
+ if task_type == 'dog':
878
+ s1, s2 = params
879
+ results[f"{task_type}_{s1}_{s2}"] = result
880
+ else:
881
+ results[f"{task_type}_{params}"] = result
882
+ else:
883
+ results[task_type] = result
884
+ except Exception as e:
885
+ raise RuntimeError(f"Error in task {task_type} with params {params}: {str(e)}")
886
+
887
+ # Organize results in the expected order
888
+ features = []
889
+
890
+ # Add Gaussian features
891
+ for sigma in gaussian_sigmas:
892
+ features.append(results[f'gaussian_{sigma}'])
893
+
894
+ # Add Difference of Gaussians features
895
+ for (s1, s2) in dog_pairs:
896
+ features.append(results[f'dog_{s1}_{s2}'])
897
+
898
+ # Add gradient magnitude
899
+ features.append(results['gradient_magnitude'])
900
+
901
+ # Verify shapes
902
+ for i, feat in enumerate(features):
903
+ if feat.shape != original_shape:
904
+ # Check dimensionality and expand if needed
905
+ if len(feat.shape) < len(original_shape):
906
+ feat_adjusted = feat
907
+ missing_dims = len(original_shape) - len(feat.shape)
908
+ for _ in range(missing_dims):
909
+ feat_adjusted = np.expand_dims(feat_adjusted, axis=0)
910
+
911
+ if feat_adjusted.shape != original_shape:
912
+ raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
913
+
914
+ features[i] = feat_adjusted
915
+
916
+ return np.stack(features, axis=-1)
917
+
918
+ def compute_feature_maps_cpu(self, image_3d = None):
517
919
  """Compute feature maps using CPU"""
518
920
  features = []
519
- original_shape = self.image_3d.shape
921
+ if image_3d is None:
922
+ image_3d = self.image_3d
923
+
924
+ original_shape = image_3d.shape
925
+
520
926
 
521
927
  # Gaussian smoothing at different scales
522
- print("Obtaining gaussians")
928
+ #print("Obtaining gaussians")
523
929
  for sigma in [0.5, 1.0, 2.0, 4.0]:
524
- smooth = ndimage.gaussian_filter(self.image_3d, sigma)
930
+ smooth = ndimage.gaussian_filter(image_3d, sigma)
525
931
  features.append(smooth)
526
932
 
527
- print("Obtaining dif of gaussians")
933
+ #print("Obtaining dif of gaussians")
528
934
  # Difference of Gaussians
529
935
  for (s1, s2) in [(1, 2), (2, 4)]:
530
- g1 = ndimage.gaussian_filter(self.image_3d, s1)
531
- g2 = ndimage.gaussian_filter(self.image_3d, s2)
936
+ g1 = ndimage.gaussian_filter(image_3d, s1)
937
+ g2 = ndimage.gaussian_filter(image_3d, s2)
532
938
  dog = g1 - g2
533
939
  features.append(dog)
534
940
 
535
- print("Computing sobel and gradients")
941
+ #print("Computing sobel and gradients")
536
942
  # Gradient computations using scipy
537
- gx = ndimage.sobel(self.image_3d, axis=2, mode='reflect') # x direction
538
- gy = ndimage.sobel(self.image_3d, axis=1, mode='reflect') # y direction
539
- gz = ndimage.sobel(self.image_3d, axis=0, mode='reflect') # z direction
943
+ gx = ndimage.sobel(image_3d, axis=2, mode='reflect') # x direction
944
+ gy = ndimage.sobel(image_3d, axis=1, mode='reflect') # y direction
945
+ gz = ndimage.sobel(image_3d, axis=0, mode='reflect') # z direction
540
946
 
541
947
  # Gradient magnitude
542
- print("Computing gradient magnitude")
948
+ #print("Computing gradient magnitude")
543
949
  gradient_magnitude = np.sqrt(gx**2 + gy**2 + gz**2)
544
950
  features.append(gradient_magnitude)
545
951
 
546
952
  # Verify shapes
547
- print("Verifying shapes")
953
+ #print("Verifying shapes")
954
+ for i, feat in enumerate(features):
955
+ if feat.shape != original_shape:
956
+ feat_adjusted = np.expand_dims(feat, axis=0)
957
+ if feat_adjusted.shape != original_shape:
958
+ raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
959
+ features[i] = feat_adjusted
960
+
961
+ return np.stack(features, axis=-1)
962
+
963
+ def compute_feature_maps_cpu_parallel(self, image_3d=None):
964
+ """Use ThreadPoolExecutor
965
+
966
+ While threads don't give true parallelism for CPU-bound tasks due to the GIL,
967
+ numpy/scipy release the GIL during computation, so this can still be effective.
968
+ """
969
+ if image_3d is None:
970
+ image_3d = self.image_3d
971
+ original_shape = image_3d.shape
972
+
973
+ features = []
974
+
975
+ # Using ThreadPoolExecutor which is more compatible with GUI applications
976
+ with ThreadPoolExecutor(max_workers=min(7, multiprocessing.cpu_count())) as executor:
977
+ # Submit all tasks to the executor
978
+ futures = []
979
+
980
+ # Gaussian smoothing at different scales
981
+ for sigma in [0.5, 1.0, 2.0, 4.0]:
982
+ future = executor.submit(ndimage.gaussian_filter, image_3d, sigma)
983
+ futures.append(future)
984
+
985
+ # Difference of Gaussians
986
+ for (s1, s2) in [(1, 2), (2, 4)]:
987
+ # Need to define a local function for this task
988
+ def compute_dog_local(img, s1, s2):
989
+ g1 = ndimage.gaussian_filter(img, s1)
990
+ g2 = ndimage.gaussian_filter(img, s2)
991
+ return g1 - g2
992
+
993
+ future = executor.submit(compute_dog_local, image_3d, s1, s2)
994
+ futures.append(future)
995
+
996
+ # Gradient magnitude
997
+ def compute_gradient_local(img):
998
+ gx = ndimage.sobel(img, axis=2, mode='reflect')
999
+ gy = ndimage.sobel(img, axis=1, mode='reflect')
1000
+ gz = ndimage.sobel(img, axis=0, mode='reflect')
1001
+ return np.sqrt(gx**2 + gy**2 + gz**2)
1002
+
1003
+ future = executor.submit(compute_gradient_local, image_3d)
1004
+ futures.append(future)
1005
+
1006
+ # Collect results
1007
+ for future in futures:
1008
+ result = future.result()
1009
+ features.append(result)
1010
+
1011
+ # Verify shapes
548
1012
  for i, feat in enumerate(features):
549
1013
  if feat.shape != original_shape:
550
1014
  feat_adjusted = np.expand_dims(feat, axis=0)
@@ -723,17 +1187,66 @@ class InteractiveSegmenter:
723
1187
  return dict(z_dict) # Convert back to regular dict
724
1188
 
725
1189
  def process_chunk(self, chunk_coords):
726
- """Process a chunk of coordinates"""
727
-
1190
+ """
1191
+ Process a chunk of coordinates, handling both mem_lock and non-mem_lock cases.
1192
+ Uses a consistent approach based on coordinates.
1193
+
1194
+ Parameters:
1195
+ -----------
1196
+ chunk_coords : list of tuples
1197
+ List of (z,y,x) coordinate tuples to process
1198
+
1199
+ Returns:
1200
+ --------
1201
+ tuple : (foreground, background)
1202
+ Sets of coordinates classified as foreground or background
1203
+ """
728
1204
  foreground = set()
729
1205
  background = set()
730
-
1206
+
731
1207
  if not self.use_two:
732
-
733
-
734
- features = [self.feature_cache[z, y, x] for z, y, x in chunk_coords]
1208
+ if self.mem_lock:
1209
+ # For mem_lock, we need to extract a subarray and compute features
1210
+
1211
+ # Find min/max bounds of the coordinates to get the smallest containing subarray
1212
+ z_coords = [z for z, y, x in chunk_coords]
1213
+ y_coords = [y for z, y, x in chunk_coords]
1214
+ x_coords = [x for z, y, x in chunk_coords]
1215
+
1216
+ z_min, z_max = min(z_coords), max(z_coords)
1217
+ y_min, y_max = min(y_coords), max(y_coords)
1218
+ x_min, x_max = min(x_coords), max(x_coords)
1219
+
1220
+
1221
+ # Extract the subarray
1222
+ subarray = self.image_3d[z_min:z_max+1, y_min:y_max+1, x_min:x_max+1]
1223
+
1224
+ # Compute features for this subarray
1225
+ if self.speed:
1226
+ feature_map = self.compute_feature_maps_cpu_parallel(subarray) #If the interactive segmenter is slow
1227
+ else: #Due to the parallel, consider singleton implementation for it specifically
1228
+ feature_map = self.compute_deep_feature_maps_cpu_parallel(subarray)
1229
+
1230
+ # Extract features for each coordinate, adjusting for subarray offset
1231
+ features = []
1232
+ for z, y, x in chunk_coords:
1233
+ # Transform global coordinates to local subarray coordinates
1234
+ local_z = z - z_min
1235
+ local_y = y - y_min
1236
+ local_x = x - x_min
1237
+
1238
+ # Get feature at this position
1239
+ feature = feature_map[local_z, local_y, local_x]
1240
+ features.append(feature)
1241
+
1242
+ else:
1243
+ # For non-mem_lock, simply use the feature cache
1244
+ features = [self.feature_cache[z, y, x] for z, y, x in chunk_coords]
1245
+
1246
+ # Make predictions
735
1247
  predictions = self.model.predict(features)
736
1248
 
1249
+ # Assign coordinates based on predictions
737
1250
  for coord, pred in zip(chunk_coords, predictions):
738
1251
  if pred:
739
1252
  foreground.add(coord)
@@ -776,6 +1289,10 @@ class InteractiveSegmenter:
776
1289
  """Segment volume using parallel processing of chunks with vectorized chunk creation"""
777
1290
  #Change the above chunk size to None to have it auto-compute largest chunks (not sure which is faster, 64 seems reasonable in test cases)
778
1291
 
1292
+ if self.mem_lock:
1293
+ chunk_size = 64 #memory efficient chunk
1294
+
1295
+
779
1296
  def create_2d_chunks():
780
1297
  """
781
1298
  Create chunks by z-slices for 2D processing.
@@ -814,7 +1331,7 @@ class InteractiveSegmenter:
814
1331
  print("Cannot find cuML, using CPU to segment instead...")
815
1332
  gpu = False
816
1333
 
817
- if self.feature_cache is None:
1334
+ if self.feature_cache is None and not self.mem_lock and not self.use_two:
818
1335
  with self.lock:
819
1336
  if self.feature_cache is None:
820
1337
  self.feature_cache = self.compute_feature_maps()
@@ -877,23 +1394,28 @@ class InteractiveSegmenter:
877
1394
 
878
1395
  print("Segmenting chunks...")
879
1396
 
880
-
881
- with ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:
882
- if gpu:
883
- try:
884
- futures = [executor.submit(self.process_chunk_GPU, chunk) for chunk in chunks]
885
- except:
886
- futures = [executor.submit(self.process_chunk, chunk) for chunk in chunks]
1397
+ if not self.mem_lock:
1398
+ with ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:
1399
+ if gpu:
1400
+ try:
1401
+ futures = [executor.submit(self.process_chunk_GPU, chunk) for chunk in chunks]
1402
+ except:
1403
+ futures = [executor.submit(self.process_chunk, chunk) for chunk in chunks]
887
1404
 
888
- else:
889
- futures = [executor.submit(self.process_chunk, chunk) for chunk in chunks]
890
-
891
- for i, future in enumerate(futures):
892
- fore, back = future.result()
1405
+ else:
1406
+ futures = [executor.submit(self.process_chunk, chunk) for chunk in chunks]
1407
+
1408
+ for i, future in enumerate(futures):
1409
+ fore, back = future.result()
1410
+ foreground_coords.update(fore)
1411
+ background_coords.update(back)
1412
+ print(f"Processed {i}/{len(chunks)} chunks")
1413
+ else: #Prioritize RAM
1414
+ for i, chunk in enumerate(chunks):
1415
+ fore, back = self.process_chunk(chunk)
893
1416
  foreground_coords.update(fore)
894
1417
  background_coords.update(back)
895
- if i % 10 == 0:
896
- print(f"Processed {i}/{len(chunks)} chunks")
1418
+ print(f"Processed {i}/{len(chunks)} chunks")
897
1419
 
898
1420
  return foreground_coords, background_coords
899
1421
 
@@ -933,6 +1455,7 @@ class InteractiveSegmenter:
933
1455
  # Update previous z
934
1456
  self.prev_z = z
935
1457
 
1458
+
936
1459
  def get_realtime_chunks(self, chunk_size = 64):
937
1460
  print("Computing some overhead...")
938
1461
 
@@ -1105,11 +1628,17 @@ class InteractiveSegmenter:
1105
1628
  except:
1106
1629
  pass
1107
1630
 
1108
- def train_batch(self, foreground_array, speed = True, use_gpu = False, use_two = False):
1631
+ def train_batch(self, foreground_array, speed = True, use_gpu = False, use_two = False, mem_lock = False):
1109
1632
  """Train directly on foreground and background arrays"""
1110
1633
 
1111
1634
  self.speed = speed
1112
1635
  self.cur_gpu = use_gpu
1636
+ if mem_lock != self.mem_lock:
1637
+ self.realtimechunks = None #dump ram
1638
+ self.feature_cache = None
1639
+
1640
+
1641
+ self.mem_lock = mem_lock
1113
1642
 
1114
1643
  if self.current_speed != speed:
1115
1644
  self.feature_cache = None
@@ -1197,6 +1726,83 @@ class InteractiveSegmenter:
1197
1726
  background_features.append(feature_vector)
1198
1727
 
1199
1728
 
1729
+ elif mem_lock: #Forces ram efficiency
1730
+
1731
+ box_size = 32
1732
+
1733
+ # Memory-efficient approach: compute features only for necessary subarrays
1734
+ foreground_features = []
1735
+ background_features = []
1736
+
1737
+ # Find coordinates of foreground and background scribbles
1738
+ z_fore = np.argwhere(foreground_array == 1)
1739
+ z_back = np.argwhere(foreground_array == 2)
1740
+
1741
+ # If no scribbles, return empty lists
1742
+ if len(z_fore) == 0 and len(z_back) == 0:
1743
+ return foreground_features, background_features
1744
+
1745
+ # Get dimensions of the input array
1746
+ depth, height, width = foreground_array.shape
1747
+
1748
+ # Determine the minimum number of boxes needed to cover all scribbles
1749
+ half_box = box_size // 2
1750
+
1751
+ # Step 1: Find the minimum set of boxes that cover all scribbles
1752
+ # We'll divide the volume into a grid of boxes of size box_size
1753
+
1754
+ # Calculate how many boxes are needed in each dimension
1755
+ z_grid_size = (depth + box_size - 1) // box_size
1756
+ y_grid_size = (height + box_size - 1) // box_size
1757
+ x_grid_size = (width + box_size - 1) // box_size
1758
+
1759
+ # Track which grid cells contain scribbles
1760
+ grid_cells_with_scribbles = set()
1761
+
1762
+ # Map original coordinates to grid cells
1763
+ for z, y, x in np.vstack((z_fore, z_back)) if len(z_back) > 0 else z_fore:
1764
+ grid_z = z // box_size
1765
+ grid_y = y // box_size
1766
+ grid_x = x // box_size
1767
+ grid_cells_with_scribbles.add((grid_z, grid_y, grid_x))
1768
+
1769
+ # Create a mapping from original coordinates to their corresponding subarray and local coordinates
1770
+ coord_mapping = {}
1771
+
1772
+ # Step 2: Process each grid cell that contains scribbles
1773
+ for grid_z, grid_y, grid_x in grid_cells_with_scribbles:
1774
+ # Calculate the boundaries of this grid cell
1775
+ z_min = grid_z * box_size
1776
+ y_min = grid_y * box_size
1777
+ x_min = grid_x * box_size
1778
+
1779
+ z_max = min(z_min + box_size, depth)
1780
+ y_max = min(y_min + box_size, height)
1781
+ x_max = min(x_min + box_size, width)
1782
+
1783
+ # Extract the subarray
1784
+ subarray = self.image_3d[z_min:z_max, y_min:y_max, x_min:x_max]
1785
+ subarray2 = foreground_array[z_min:z_max, y_min:y_max, x_min:x_max]
1786
+
1787
+ # Compute features for this subarray
1788
+ if self.speed:
1789
+ subarray_features = self.compute_feature_maps_cpu_parallel(subarray)
1790
+ else:
1791
+ subarray_features = self.compute_deep_feature_maps_cpu_parallel(subarray)
1792
+
1793
+ # For each foreground point in this grid cell, extract its feature
1794
+ # Extract foreground features using a direct mask comparison
1795
+ local_fore_coords = np.argwhere(subarray2 == 1)
1796
+ for local_z, local_y, local_x in local_fore_coords:
1797
+ feature = subarray_features[local_z, local_y, local_x]
1798
+ foreground_features.append(feature)
1799
+
1800
+ # Extract background features using a direct mask comparison
1801
+ local_back_coords = np.argwhere(subarray2 == 2)
1802
+ for local_z, local_y, local_x in local_back_coords:
1803
+ feature = subarray_features[local_z, local_y, local_x]
1804
+ background_features.append(feature)
1805
+
1200
1806
  else:
1201
1807
 
1202
1808
  self.two_slices = []
@@ -1239,7 +1845,11 @@ class InteractiveSegmenter:
1239
1845
  y = np.hstack([np.ones(len(z_fore)), np.zeros(len(z_back))])
1240
1846
 
1241
1847
  # Train the model
1242
- self.model.fit(X, y)
1848
+ try:
1849
+ self.model.fit(X, y)
1850
+ except:
1851
+ print(X)
1852
+ print(y)
1243
1853
 
1244
1854
  self.current_speed = speed
1245
1855
 
@@ -1253,13 +1863,19 @@ class InteractiveSegmenter:
1253
1863
  if self._currently_segmenting is not None:
1254
1864
  return
1255
1865
 
1256
- with self.lock:
1257
- if speed:
1866
+ #with self.lock <- cant remember why this was here
1867
+ if speed:
1258
1868
 
1869
+ if self.mem_lock:
1870
+ output = self.compute_feature_maps_cpu_2d_parallel(z = z)
1871
+ else:
1259
1872
  output = self.compute_feature_maps_cpu_2d(z = z)
1260
1873
 
1261
- elif not speed:
1874
+ elif not speed:
1262
1875
 
1876
+ if self.mem_lock:
1877
+ output = self.compute_deep_feature_maps_cpu_2d_parallel(z = z)
1878
+ else:
1263
1879
  output = self.compute_deep_feature_maps_cpu_2d(z = z)
1264
1880
 
1265
1881
  return output