nettracer3d 0.6.5__py3-none-any.whl → 0.6.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nettracer3d/segmenter.py CHANGED
@@ -1,18 +1,5 @@
1
1
  from sklearn.ensemble import RandomForestClassifier
2
2
  import numpy as np
3
- try:
4
- import torch
5
- except:
6
- pass
7
- try:
8
- import cupy as cp
9
- import cupyx.scipy.ndimage as cpx
10
- except:
11
- pass
12
- try:
13
- from cuml.ensemble import RandomForestClassifier as cuRandomForestClassifier
14
- except:
15
- pass
16
3
  import concurrent.futures
17
4
  from concurrent.futures import ThreadPoolExecutor
18
5
  import threading
@@ -21,7 +8,7 @@ import multiprocessing
21
8
  from collections import defaultdict
22
9
 
23
10
  class InteractiveSegmenter:
24
- def __init__(self, image_3d, use_gpu=True):
11
+ def __init__(self, image_3d, use_gpu=False):
25
12
  self.image_3d = image_3d
26
13
  self.patterns = []
27
14
 
@@ -82,7 +69,13 @@ class InteractiveSegmenter:
82
69
  self._last_processed_slice = None
83
70
  self.mem_lock = False
84
71
 
85
- def segment_slice_chunked(self, slice_z, block_size=64):
72
+ #Adjustable feature map params:
73
+ self.alphas = [1,2,4,8]
74
+ self.windows = 10
75
+ self.dogs = [(1, 2), (2, 4), (4, 8)]
76
+ self.master_chunk = 49
77
+
78
+ def segment_slice_chunked(self, slice_z, block_size = 49):
86
79
  """
87
80
  A completely standalone method to segment a single z-slice in chunks
88
81
  with improved safeguards.
@@ -212,13 +205,20 @@ class InteractiveSegmenter:
212
205
 
213
206
  # Gaussian and DoG using scipy
214
207
  #print("Obtaining gaussians")
215
- for sigma in [0.5, 1.0, 2.0, 4.0]:
208
+ for sigma in self.alphas:
216
209
  smooth = ndimage.gaussian_filter(image_3d, sigma)
217
210
  features.append(smooth)
211
+
212
+ # Difference of Gaussians
213
+ for (s1, s2) in self.dogs:
214
+ g1 = ndimage.gaussian_filter(image_3d, s1)
215
+ g2 = ndimage.gaussian_filter(image_3d, s2)
216
+ dog = g1 - g2
217
+ features.append(dog)
218
218
 
219
219
  #print("Computing local statistics")
220
220
  # Local statistics using scipy's convolve
221
- window_size = 5
221
+ window_size = self.windows
222
222
  kernel = np.ones((window_size, window_size, window_size)) / (window_size**3)
223
223
 
224
224
  # Local mean
@@ -280,13 +280,24 @@ class InteractiveSegmenter:
280
280
  def compute_gaussian(sigma):
281
281
  return ndimage.gaussian_filter(image_3d, sigma)
282
282
 
283
- for sigma in [0.5, 1.0, 2.0, 4.0]:
283
+ for sigma in self.alphas:
284
284
  future = executor.submit(compute_gaussian, sigma)
285
285
  futures.append(('gaussian', sigma, future))
286
+
287
+ def compute_dog_local(img, s1, s2):
288
+ g1 = ndimage.gaussian_filter(img, s1)
289
+ g2 = ndimage.gaussian_filter(img, s2)
290
+ return g1 - g2
291
+
292
+ # Difference of Gaussians
293
+ for (s1, s2) in self.dogs:
294
+
295
+ future = executor.submit(compute_dog_local, image_3d, s1, s2)
296
+ futures.append(('dog', s1, future))
286
297
 
287
298
  # Local statistics computation
288
299
  def compute_local_mean():
289
- window_size = 5
300
+ window_size = self.windows
290
301
  kernel = np.ones((window_size, window_size, window_size)) / (window_size**3)
291
302
  return ndimage.convolve(image_3d, kernel, mode='reflect')
292
303
 
@@ -294,7 +305,7 @@ class InteractiveSegmenter:
294
305
  futures.append(('local_mean', None, future))
295
306
 
296
307
  def compute_local_variance():
297
- window_size = 5
308
+ window_size = self.windows
298
309
  kernel = np.ones((window_size, window_size, window_size)) / (window_size**3)
299
310
  mean = np.mean(image_3d)
300
311
  return ndimage.convolve((image_3d - mean)**2, kernel, mode='reflect')
@@ -395,8 +406,11 @@ class InteractiveSegmenter:
395
406
  features = []
396
407
 
397
408
  # Add Gaussian features
398
- for sigma in [0.5, 1.0, 2.0, 4.0]:
409
+ for sigma in self.alphas:
399
410
  features.append(results[f'gaussian_{sigma}'])
411
+
412
+ for sigma in self.dogs:
413
+ features.append(results[f'dog_{sigma[0]}'])
400
414
 
401
415
  # Add local statistics
402
416
  features.append(results['local_mean'])
@@ -494,13 +508,24 @@ class InteractiveSegmenter:
494
508
  def compute_gaussian(sigma):
495
509
  return ndimage.gaussian_filter(image_2d, sigma)
496
510
 
497
- for sigma in [0.5, 1.0, 2.0, 4.0]:
511
+ for sigma in self.alphas:
498
512
  future = executor.submit(compute_gaussian, sigma)
499
513
  futures.append(('gaussian', sigma, future))
514
+
515
+ # Difference of Gaussians
516
+ def compute_dog(s1, s2):
517
+ g1 = ndimage.gaussian_filter(image_2d, s1)
518
+ g2 = ndimage.gaussian_filter(image_2d, s2)
519
+ return g1 - g2
520
+
521
+ dog_pairs = self.dogs
522
+ for (s1, s2) in dog_pairs:
523
+ future = executor.submit(compute_dog, s1, s2)
524
+ futures.append(('dog', s1, future))
500
525
 
501
526
  # Local statistics computation
502
527
  def compute_local_mean():
503
- window_size = 5
528
+ window_size = self.windows
504
529
  kernel = np.ones((window_size, window_size)) / (window_size**2)
505
530
  return ndimage.convolve(image_2d, kernel, mode='reflect')
506
531
 
@@ -508,7 +533,7 @@ class InteractiveSegmenter:
508
533
  futures.append(('local_mean', None, future))
509
534
 
510
535
  def compute_local_variance():
511
- window_size = 5
536
+ window_size = self.windows
512
537
  kernel = np.ones((window_size, window_size)) / (window_size**2)
513
538
  mean = np.mean(image_2d)
514
539
  return ndimage.convolve((image_2d - mean)**2, kernel, mode='reflect')
@@ -608,8 +633,11 @@ class InteractiveSegmenter:
608
633
  features = []
609
634
 
610
635
  # Add Gaussian features
611
- for sigma in [0.5, 1.0, 2.0, 4.0]:
636
+ for sigma in self.alphas:
612
637
  features.append(results[f'gaussian_{sigma}'])
638
+
639
+ for sigma in self.dogs:
640
+ features.append(results[f'dog_{sigma[0]}'])
613
641
 
614
642
  # Add local statistics
615
643
  features.append(results['local_mean'])
@@ -843,7 +871,7 @@ class InteractiveSegmenter:
843
871
  def compute_gaussian(sigma):
844
872
  return ndimage.gaussian_filter(image_2d, sigma)
845
873
 
846
- gaussian_sigmas = [0.5, 1.0, 2.0, 4.0]
874
+ gaussian_sigmas = self.alphas
847
875
  for sigma in gaussian_sigmas:
848
876
  future = executor.submit(compute_gaussian, sigma)
849
877
  futures.append(('gaussian', sigma, future))
@@ -854,7 +882,7 @@ class InteractiveSegmenter:
854
882
  g2 = ndimage.gaussian_filter(image_2d, s2)
855
883
  return g1 - g2
856
884
 
857
- dog_pairs = [(1, 2), (2, 4)]
885
+ dog_pairs = self.dogs
858
886
  for (s1, s2) in dog_pairs:
859
887
  future = executor.submit(compute_dog, s1, s2)
860
888
  futures.append(('dog', (s1, s2), future))
@@ -926,13 +954,13 @@ class InteractiveSegmenter:
926
954
 
927
955
  # Gaussian smoothing at different scales
928
956
  #print("Obtaining gaussians")
929
- for sigma in [0.5, 1.0, 2.0, 4.0]:
957
+ for sigma in self.alphas:
930
958
  smooth = ndimage.gaussian_filter(image_3d, sigma)
931
959
  features.append(smooth)
932
960
 
933
961
  #print("Obtaining dif of gaussians")
934
962
  # Difference of Gaussians
935
- for (s1, s2) in [(1, 2), (2, 4)]:
963
+ for (s1, s2) in self.dogs:
936
964
  g1 = ndimage.gaussian_filter(image_3d, s1)
937
965
  g2 = ndimage.gaussian_filter(image_3d, s2)
938
966
  dog = g1 - g2
@@ -978,17 +1006,17 @@ class InteractiveSegmenter:
978
1006
  futures = []
979
1007
 
980
1008
  # Gaussian smoothing at different scales
981
- for sigma in [0.5, 1.0, 2.0, 4.0]:
1009
+ for sigma in self.alphas:
982
1010
  future = executor.submit(ndimage.gaussian_filter, image_3d, sigma)
983
1011
  futures.append(future)
984
1012
 
1013
+ def compute_dog_local(img, s1, s2):
1014
+ g1 = ndimage.gaussian_filter(img, s1)
1015
+ g2 = ndimage.gaussian_filter(img, s2)
1016
+ return g1 - g2
1017
+
985
1018
  # Difference of Gaussians
986
- for (s1, s2) in [(1, 2), (2, 4)]:
987
- # Need to define a local function for this task
988
- def compute_dog_local(img, s1, s2):
989
- g1 = ndimage.gaussian_filter(img, s1)
990
- g2 = ndimage.gaussian_filter(img, s2)
991
- return g1 - g2
1019
+ for (s1, s2) in self.dogs:
992
1020
 
993
1021
  future = executor.submit(compute_dog_local, image_3d, s1, s2)
994
1022
  futures.append(future)
@@ -1180,9 +1208,10 @@ class InteractiveSegmenter:
1180
1208
  Dictionary with z-values as keys and lists of corresponding [y, x] coordinates as values
1181
1209
  """
1182
1210
  z_dict = defaultdict(list)
1183
-
1211
+
1184
1212
  for z, y, x in coordinates:
1185
1213
  z_dict[z].append((y, x))
1214
+
1186
1215
 
1187
1216
  return dict(z_dict) # Convert back to regular dict
1188
1217
 
@@ -1204,19 +1233,34 @@ class InteractiveSegmenter:
1204
1233
  foreground = set()
1205
1234
  background = set()
1206
1235
 
1207
- if not self.use_two:
1236
+ if self.previewing or not self.use_two:
1208
1237
  if self.mem_lock:
1209
1238
  # For mem_lock, we need to extract a subarray and compute features
1210
-
1211
- # Find min/max bounds of the coordinates to get the smallest containing subarray
1212
- z_coords = [z for z, y, x in chunk_coords]
1213
- y_coords = [y for z, y, x in chunk_coords]
1214
- x_coords = [x for z, y, x in chunk_coords]
1215
-
1216
- z_min, z_max = min(z_coords), max(z_coords)
1217
- y_min, y_max = min(y_coords), max(y_coords)
1218
- x_min, x_max = min(x_coords), max(x_coords)
1219
-
1239
+
1240
+ if self.realtimechunks is None: #Presuming we're segmenting all
1241
+ z_min, z_max = chunk_coords[0], chunk_coords[1]
1242
+ y_min, y_max = chunk_coords[2], chunk_coords[3]
1243
+ x_min, x_max = chunk_coords[4], chunk_coords[5]
1244
+
1245
+ # Consider moving this to process chunk ??
1246
+ chunk_coords = np.stack(np.meshgrid(
1247
+ np.arange(z_min, z_max),
1248
+ np.arange(y_min, y_max),
1249
+ np.arange(x_min, x_max),
1250
+ indexing='ij'
1251
+ )).reshape(3, -1).T
1252
+
1253
+ chunk_coords = (list(map(tuple, chunk_coords)))
1254
+ else: #Presumes we're not segmenting all
1255
+ # Find min/max bounds of the coordinates to get the smallest containing subarray
1256
+ z_coords = [z for z, y, x in chunk_coords]
1257
+ y_coords = [y for z, y, x in chunk_coords]
1258
+ x_coords = [x for z, y, x in chunk_coords]
1259
+
1260
+ z_min, z_max = min(z_coords), max(z_coords)
1261
+ y_min, y_max = min(y_coords), max(y_coords)
1262
+ x_min, x_max = min(x_coords), max(x_coords)
1263
+
1220
1264
 
1221
1265
  # Extract the subarray
1222
1266
  subarray = self.image_3d[z_min:z_max+1, y_min:y_max+1, x_min:x_max+1]
@@ -1254,8 +1298,13 @@ class InteractiveSegmenter:
1254
1298
  background.add(coord)
1255
1299
 
1256
1300
  else:
1257
- chunk_by_z = self.organize_by_z(chunk_coords)
1258
- for z, coords in chunk_by_z.items():
1301
+
1302
+ if self.mem_lock:
1303
+ chunk_coords = self.twodim_coords(chunk_coords[0], chunk_coords[1], chunk_coords[2], chunk_coords[3], chunk_coords[4])
1304
+
1305
+ chunk_coords = self.organize_by_z(chunk_coords)
1306
+
1307
+ for z, coords in chunk_coords.items():
1259
1308
 
1260
1309
  if self.feature_cache is None:
1261
1310
  features = self.get_feature_map_slice(z, self.speed, self.cur_gpu)
@@ -1283,6 +1332,63 @@ class InteractiveSegmenter:
1283
1332
 
1284
1333
  return foreground, background
1285
1334
 
1335
+ def twodim_coords(self, y_dim, x_dim, z, chunk_size = None, subrange = None):
1336
+
1337
+ if subrange is None:
1338
+ y_coords, x_coords = np.meshgrid(
1339
+ np.arange(y_dim),
1340
+ np.arange(x_dim),
1341
+ indexing='ij'
1342
+ )
1343
+
1344
+ slice_coords = np.column_stack((
1345
+ np.full(chunk_size, z),
1346
+ y_coords.ravel(),
1347
+ x_coords.ravel()
1348
+ ))
1349
+
1350
+ elif subrange[0] == 'y':
1351
+
1352
+ y_subrange = np.arange(subrange[1], subrange[2])
1353
+
1354
+ # Create meshgrid for this subchunk
1355
+ y_sub, x_sub = np.meshgrid(
1356
+ y_subrange,
1357
+ np.arange(x_dim),
1358
+ indexing='ij'
1359
+ )
1360
+
1361
+ # Create coordinates for this subchunk
1362
+ subchunk_size = len(y_subrange) * x_dim
1363
+ slice_coords = np.column_stack((
1364
+ np.full(subchunk_size, z),
1365
+ y_sub.ravel(),
1366
+ x_sub.ravel()
1367
+ ))
1368
+
1369
+ elif subrange[0] == 'x':
1370
+
1371
+ x_subrange = np.arange(subrange[1], subrange[2])
1372
+
1373
+ # Create meshgrid for this subchunk
1374
+ y_sub, x_sub = np.meshgrid(
1375
+ np.arange(y_dim),
1376
+ x_subrange,
1377
+ indexing='ij'
1378
+ )
1379
+
1380
+ # Create coordinates for this subchunk
1381
+ subchunk_size = y_dim * len(x_subrange)
1382
+ slice_coords = np.column_stack((
1383
+ np.full(subchunk_size, z),
1384
+ y_sub.ravel(),
1385
+ x_sub.ravel()
1386
+ ))
1387
+
1388
+
1389
+
1390
+ return list(map(tuple, slice_coords))
1391
+
1286
1392
 
1287
1393
 
1288
1394
  def segment_volume(self, chunk_size=None, gpu=False):
@@ -1293,19 +1399,21 @@ class InteractiveSegmenter:
1293
1399
  self.map_slice = None
1294
1400
 
1295
1401
  if self.mem_lock:
1296
- chunk_size = 32 #memory efficient chunk
1402
+ chunk_size = self.master_chunk #memory efficient chunk
1297
1403
 
1298
1404
 
1299
1405
  def create_2d_chunks():
1300
1406
  """
1301
1407
  Create chunks by z-slices for 2D processing.
1302
1408
  Each chunk is a complete z-slice with all y,x coordinates,
1303
- unless the slice exceeds 32768 pixels, in which case it's divided into subchunks.
1409
+ unless the slice exceeds 262144 pixels, in which case it's divided into subchunks.
1304
1410
 
1305
1411
  Returns:
1306
1412
  List of chunks, where each chunk contains the coordinates for one z-slice or subchunk
1307
1413
  """
1308
- MAX_CHUNK_SIZE = 32768
1414
+ MAX_CHUNK_SIZE = 262144
1415
+ if not self.mem_lock:
1416
+ MAX_CHUNK_SIZE = 10000000000000000000000000 #unlimited i guess
1309
1417
  chunks = []
1310
1418
 
1311
1419
  for z in range(self.image_3d.shape[0]):
@@ -1315,20 +1423,16 @@ class InteractiveSegmenter:
1315
1423
  total_pixels = y_dim * x_dim
1316
1424
 
1317
1425
  # If the slice is small enough, do not subchunk
1318
- if total_pixels <= MAX_CHUNK_SIZE or not self.mem_lock:
1319
- y_coords, x_coords = np.meshgrid(
1320
- np.arange(y_dim),
1321
- np.arange(x_dim),
1322
- indexing='ij'
1323
- )
1324
-
1325
- slice_coords = np.column_stack((
1326
- np.full(total_pixels, z),
1327
- y_coords.ravel(),
1328
- x_coords.ravel()
1329
- ))
1330
-
1331
- chunks.append(list(map(tuple, slice_coords)))
1426
+ if total_pixels <= MAX_CHUNK_SIZE:
1427
+
1428
+
1429
+ if not self.mem_lock:
1430
+ chunks.append(self.twodim_coords(y_dim, x_dim, z, total_pixels))
1431
+ else:
1432
+ chunks.append([y_dim, x_dim, z, total_pixels, None])
1433
+
1434
+
1435
+
1332
1436
  else:
1333
1437
  # Determine which dimension to divide (the largest one)
1334
1438
  largest_dim = 'y' if y_dim >= x_dim else 'x'
@@ -1342,56 +1446,31 @@ class InteractiveSegmenter:
1342
1446
  # Create subchunks by dividing the y-dimension
1343
1447
  for i in range(0, y_dim, div_size):
1344
1448
  end_i = min(i + div_size, y_dim)
1345
- y_subrange = np.arange(i, end_i)
1346
-
1347
- # Create meshgrid for this subchunk
1348
- y_sub, x_sub = np.meshgrid(
1349
- y_subrange,
1350
- np.arange(x_dim),
1351
- indexing='ij'
1352
- )
1353
-
1354
- # Create coordinates for this subchunk
1355
- subchunk_size = len(y_subrange) * x_dim
1356
- subchunk_coords = np.column_stack((
1357
- np.full(subchunk_size, z),
1358
- y_sub.ravel(),
1359
- x_sub.ravel()
1360
- ))
1361
-
1362
- chunks.append(list(map(tuple, subchunk_coords)))
1449
+
1450
+ if not self.mem_lock:
1451
+ chunks.append(self.twodim_coords(y_dim, x_dim, z, None, ['y', i, end_i]))
1452
+ else:
1453
+ chunks.append([y_dim, x_dim, z, None, ['y', i, end_i]])
1454
+
1363
1455
  else: # largest_dim == 'x'
1364
1456
  div_size = int(np.ceil(x_dim / num_divisions))
1365
1457
  # Create subchunks by dividing the x-dimension
1366
1458
  for i in range(0, x_dim, div_size):
1367
1459
  end_i = min(i + div_size, x_dim)
1368
- x_subrange = np.arange(i, end_i)
1369
-
1370
- # Create meshgrid for this subchunk
1371
- y_sub, x_sub = np.meshgrid(
1372
- np.arange(y_dim),
1373
- x_subrange,
1374
- indexing='ij'
1375
- )
1376
-
1377
- # Create coordinates for this subchunk
1378
- subchunk_size = y_dim * len(x_subrange)
1379
- subchunk_coords = np.column_stack((
1380
- np.full(subchunk_size, z),
1381
- y_sub.ravel(),
1382
- x_sub.ravel()
1383
- ))
1384
-
1385
- chunks.append(list(map(tuple, subchunk_coords)))
1460
+
1461
+ if not self.mem_lock:
1462
+ chunks.append(self.twodim_coords(y_dim, x_dim, z, None, ['x', i, end_i]))
1463
+ else:
1464
+ chunks.append([y_dim, x_dim, z, None, ['x', i, end_i]])
1386
1465
 
1387
1466
  return chunks
1388
1467
 
1389
- try:
1390
- from cuml.ensemble import RandomForestClassifier as cuRandomForestClassifier
1391
- except:
1392
- print("Cannot find cuML, using CPU to segment instead...")
1393
- gpu = False
1394
-
1468
+ #try:
1469
+ #from cuml.ensemble import RandomForestClassifier as cuRandomForestClassifier
1470
+ #except:
1471
+ #print("Cannot find cuML, using CPU to segment instead...")
1472
+ #gpu = False
1473
+
1395
1474
  if self.feature_cache is None and not self.mem_lock and not self.use_two:
1396
1475
  with self.lock:
1397
1476
  if self.feature_cache is None:
@@ -1437,15 +1516,24 @@ class InteractiveSegmenter:
1437
1516
  y_end = min(y_start + chunk_size, self.image_3d.shape[1])
1438
1517
  x_end = min(x_start + chunk_size, self.image_3d.shape[2])
1439
1518
 
1440
- # Create coordinates for this chunk efficiently
1441
- coords = np.stack(np.meshgrid(
1442
- np.arange(z_start, z_end),
1443
- np.arange(y_start, y_end),
1444
- np.arange(x_start, x_end),
1445
- indexing='ij'
1446
- )).reshape(3, -1).T
1519
+ if self.mem_lock:
1520
+ # Create coordinates for this chunk efficiently
1521
+ coords = [z_start, z_end, y_start, y_end, x_start, x_end]
1522
+ chunks.append(coords)
1523
+
1524
+ else:
1525
+ # Consider moving this to process chunk ??
1526
+ coords = np.stack(np.meshgrid(
1527
+ np.arange(z_start, z_end),
1528
+ np.arange(y_start, y_end),
1529
+ np.arange(x_start, x_end),
1530
+ indexing='ij'
1531
+ )).reshape(3, -1).T
1447
1532
 
1448
- chunks.append(list(map(tuple, coords)))
1533
+ chunks.append(list(map(tuple, coords)))
1534
+
1535
+
1536
+
1449
1537
  else:
1450
1538
  chunks = create_2d_chunks()
1451
1539
  self.feature_cache = None #Decided this should not maintain training data for segmenting 2D
@@ -1476,7 +1564,10 @@ class InteractiveSegmenter:
1476
1564
  fore, back = self.process_chunk(chunk)
1477
1565
  foreground_coords.update(fore)
1478
1566
  background_coords.update(back)
1479
- chunk[i] = None #Help garbage collection
1567
+ try:
1568
+ chunk[i] = None #Help garbage collection
1569
+ except:
1570
+ pass
1480
1571
  print(f"Processed {i}/{len(chunks)} chunks")
1481
1572
 
1482
1573
  return foreground_coords, background_coords
@@ -1518,10 +1609,7 @@ class InteractiveSegmenter:
1518
1609
  self.prev_z = z
1519
1610
 
1520
1611
 
1521
- def get_realtime_chunks(self, chunk_size = 64):
1522
- print("Computing some overhead...")
1523
-
1524
-
1612
+ def get_realtime_chunks(self, chunk_size = 49):
1525
1613
 
1526
1614
  # Determine if we need to chunk XY planes
1527
1615
  small_dims = (self.image_3d.shape[1] <= chunk_size and
@@ -1544,16 +1632,9 @@ class InteractiveSegmenter:
1544
1632
  # Create chunks for each Z plane
1545
1633
  for z in range(self.image_3d.shape[0]):
1546
1634
  if small_dims:
1547
- # One chunk per Z
1548
- coords = np.stack(np.meshgrid(
1549
- [z],
1550
- np.arange(self.image_3d.shape[1]),
1551
- np.arange(self.image_3d.shape[2]),
1552
- indexing='ij'
1553
- )).reshape(3, -1).T
1554
1635
 
1555
1636
  chunk_dict[(z, 0, 0)] = {
1556
- 'coords': list(map(tuple, coords)),
1637
+ 'coords': [0, self.image_3d.shape[1], 0, self.image_3d.shape[2]],
1557
1638
  'processed': False,
1558
1639
  'z': z
1559
1640
  }
@@ -1566,15 +1647,8 @@ class InteractiveSegmenter:
1566
1647
  y_end = min(y_start + chunk_size_xy, self.image_3d.shape[1])
1567
1648
  x_end = min(x_start + chunk_size_xy, self.image_3d.shape[2])
1568
1649
 
1569
- coords = np.stack(np.meshgrid(
1570
- [z],
1571
- np.arange(y_start, y_end),
1572
- np.arange(x_start, x_end),
1573
- indexing='ij'
1574
- )).reshape(3, -1).T
1575
-
1576
1650
  chunk_dict[(z, y_start, x_start)] = {
1577
- 'coords': list(map(tuple, coords)),
1651
+ 'coords': [y_start, y_end, x_start, x_end],
1578
1652
  'processed': False,
1579
1653
  'z': z
1580
1654
  }
@@ -1606,15 +1680,15 @@ class InteractiveSegmenter:
1606
1680
  def get_nearest_unprocessed_chunk(self):
1607
1681
  """Get nearest unprocessed chunk prioritizing current Z"""
1608
1682
  curr_z = self.current_z if self.current_z is not None else self.image_3d.shape[0] // 2
1609
- curr_y = self.current_x if self.current_x is not None else self.image_3d.shape[1] // 2
1610
- curr_x = self.current_y if self.current_y is not None else self.image_3d.shape[2] // 2
1683
+ curr_y = self.current_y if self.current_y is not None else self.image_3d.shape[1] // 2
1684
+ curr_x = self.current_x if self.current_x is not None else self.image_3d.shape[2] // 2
1611
1685
 
1612
1686
  # First try to find chunks at current Z
1613
1687
  current_z_chunks = [(pos, info) for pos, info in chunk_dict.items()
1614
- if info['z'] == curr_z and not info['processed']]
1688
+ if pos[0] == curr_z and not info['processed']]
1615
1689
 
1616
1690
  if current_z_chunks:
1617
- # Find nearest chunk in current Z plane
1691
+ # Find nearest chunk in current Z plane using the chunk positions from the key
1618
1692
  nearest = min(current_z_chunks,
1619
1693
  key=lambda x: ((x[0][1] - curr_y) ** 2 +
1620
1694
  (x[0][2] - curr_x) ** 2))
@@ -1631,7 +1705,7 @@ class InteractiveSegmenter:
1631
1705
  target_z = available_z[0][0]
1632
1706
  # Find nearest chunk in target Z plane
1633
1707
  z_chunks = [(pos, info) for pos, info in chunk_dict.items()
1634
- if info['z'] == target_z and not info['processed']]
1708
+ if pos[0] == target_z and not info['processed']]
1635
1709
  nearest = min(z_chunks,
1636
1710
  key=lambda x: ((x[0][1] - curr_y) ** 2 +
1637
1711
  (x[0][2] - curr_x) ** 2))
@@ -1640,45 +1714,38 @@ class InteractiveSegmenter:
1640
1714
  return None
1641
1715
 
1642
1716
 
1643
- with ThreadPoolExecutor() as executor:
1644
- futures = {}
1645
- import multiprocessing
1646
- total_cores = multiprocessing.cpu_count()
1647
- #available_workers = max(1, min(4, total_cores // 2)) # Use half cores, max of 4
1648
- available_workers = 1
1717
+ while True:
1718
+ # Find nearest unprocessed chunk using class attributes
1719
+ chunk_idx = get_nearest_unprocessed_chunk(self)
1720
+ if chunk_idx is None:
1721
+ break
1722
+
1723
+ # Process the chunk directly
1724
+ chunk = chunk_dict[chunk_idx]
1725
+ chunk['processed'] = True
1726
+ coords = chunk['coords']
1727
+
1728
+ coords = np.stack(np.meshgrid(
1729
+ [chunk['z']],
1730
+ np.arange(coords[0], coords[1]),
1731
+ np.arange(coords[2], coords[3]),
1732
+ indexing='ij'
1733
+ )).reshape(3, -1).T
1649
1734
 
1650
- while True:
1651
- # Find nearest unprocessed chunk using class attributes
1652
- chunk_idx = get_nearest_unprocessed_chunk(self) # Pass self
1653
- if chunk_idx is None:
1654
- break
1655
-
1656
- while (len(futures) < available_workers and
1657
- (chunk_idx := get_nearest_unprocessed_chunk(self))): # Pass self
1658
- chunk = chunk_dict[chunk_idx]
1659
- if gpu:
1660
- try:
1661
- futures = [executor.submit(self.process_chunk_GPU, chunk) for chunk in chunks]
1662
- except:
1663
- futures = [executor.submit(self.process_chunk, chunk) for chunk in chunks]
1664
- else:
1665
- future = executor.submit(self.process_chunk, chunk['coords'])
1735
+ coords = list(map(tuple, coords))
1666
1736
 
1667
- futures[future] = chunk_idx
1668
- chunk['processed'] = True
1669
-
1670
- # Check completed futures
1671
- done, _ = concurrent.futures.wait(
1672
- futures.keys(),
1673
- timeout=0.1,
1674
- return_when=concurrent.futures.FIRST_COMPLETED
1675
- )
1676
-
1677
- # Process completed chunks
1678
- for future in done:
1679
- fore, back = future.result()
1680
- del futures[future]
1681
- yield fore, back
1737
+
1738
+ # Process the chunk directly based on whether GPU is available
1739
+ if gpu:
1740
+ try:
1741
+ fore, back = self.process_chunk_GPU(coords)
1742
+ except:
1743
+ fore, back = self.process_chunk(coords)
1744
+ else:
1745
+ fore, back = self.process_chunk(coords)
1746
+
1747
+ # Yield the results
1748
+ yield fore, back
1682
1749
 
1683
1750
 
1684
1751
  def cleanup(self):
@@ -1700,6 +1767,8 @@ class InteractiveSegmenter:
1700
1767
  self.realtimechunks = None #dump ram
1701
1768
  self.feature_cache = None
1702
1769
 
1770
+ if not use_two:
1771
+ self.use_two = False
1703
1772
 
1704
1773
  self.mem_lock = mem_lock
1705
1774
 
@@ -1791,7 +1860,7 @@ class InteractiveSegmenter:
1791
1860
 
1792
1861
  elif mem_lock: #Forces ram efficiency
1793
1862
 
1794
- box_size = 32
1863
+ box_size = self.master_chunk
1795
1864
 
1796
1865
  # Memory-efficient approach: compute features only for necessary subarrays
1797
1866
  foreground_features = []