nettracer3d 1.2.4__py3-none-any.whl → 1.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,12 +3,15 @@ import numpy as np
3
3
  from scipy.ndimage import binary_dilation, distance_transform_edt
4
4
  from scipy.ndimage import gaussian_filter
5
5
  from scipy import ndimage
6
- from concurrent.futures import ThreadPoolExecutor, as_completed
6
+ from concurrent.futures import ThreadPoolExecutor, as_completed, ProcessPoolExecutor
7
+ from skimage.segmentation import watershed
7
8
  import cv2
8
9
  import os
10
+ import edt
9
11
  import math
10
12
  import re
11
13
  from . import nettracer
14
+ from multiprocessing import shared_memory
12
15
  import multiprocessing as mp
13
16
  try:
14
17
  import cupy as cp
@@ -177,6 +180,7 @@ def dilate_3D_old(tiff_array, dilated_x=3, dilated_y=3, dilated_z=3):
177
180
 
178
181
  return dilated_array.astype(np.uint8)
179
182
 
183
+
180
184
  def dilate_3D_dt(array, search_distance, xy_scaling=1.0, z_scaling=1.0, GPU = False):
181
185
  """
182
186
  Dilate a 3D array using distance transform method. Dt dilation produces perfect results but only works in euclidean geometry and lags in big arrays.
@@ -286,74 +290,30 @@ def process_chunk(start_idx, end_idx, nodes, ring_mask, nearest_label_indices):
286
290
 
287
291
  return dilated_nodes_with_labels_chunk
288
292
 
289
- def smart_dilate(nodes, dilate_xy, dilate_z, directory = None, GPU = True, fast_dil = True, predownsample = None, use_dt_dil_amount = None, xy_scale = 1, z_scale = 1):
290
-
291
- original_shape = nodes.shape
292
-
293
+ def smart_dilate(nodes, dilate_xy = 0, dilate_z = 0, directory = None, GPU = True, fast_dil = True, predownsample = None, use_dt_dil_amount = None, xy_scale = 1, z_scale = 1):
293
294
 
294
- #Dilate the binarized array
295
295
  if fast_dil:
296
- # Step : Binarize the labeled array
297
- binary_nodes = binarize(nodes)
298
- dilated_binary_nodes = dilate_3D(binary_nodes, dilate_xy, dilate_xy, dilate_z)
296
+ dilated = nettracer.dilate_3D_dt(nodes, use_dt_dil_amount, xy_scale, z_scale, fast_dil = True)
297
+ return smart_label_watershed(dilated, nodes, directory = None, remove_template = False)
298
+
299
299
  else:
300
- dilated_binary_nodes, nearest_label_indices, nodes = dilate_3D_dt(nodes, use_dt_dil_amount, GPU = GPU, xy_scaling = xy_scale, z_scaling = z_scale)
301
- binary_nodes = binarize(nodes)
300
+ return smart_dilate_short(nodes, use_dt_dil_amount, directory, xy_scale, z_scale)
302
301
 
303
- # Step 3: Isolate the ring (binary dilated mask minus original binary mask)
304
- ring_mask = dilated_binary_nodes & invert_array(binary_nodes)
305
302
 
306
- del binary_nodes
307
303
 
308
- print("Preforming distance transform for smart search... this step may take some time if computed on CPU...")
309
304
 
310
- if fast_dil:
311
305
 
312
- try:
306
+ def smart_dilate_short(nodes, amount = None, directory = None, xy_scale = 1, z_scale = 1):
313
307
 
314
- if GPU == True and cp.cuda.runtime.getDeviceCount() > 0:
315
- print("GPU detected. Using CuPy for distance transform.")
316
-
317
- try:
318
-
319
- if predownsample is None:
320
-
321
- # Step 4: Find the nearest label for each voxel in the ring
322
- nearest_label_indices = compute_distance_transform_GPU(invert_array(nodes))
323
-
324
- else:
325
- gotoexcept = 1/0
326
-
327
- except (cp.cuda.memory.OutOfMemoryError, ZeroDivisionError) as e:
328
- if predownsample is None:
329
- down_factor = catch_memory(e) #Obtain downsample amount based on memory missing
330
- else:
331
- down_factor = (predownsample)**3
332
-
333
- while True:
334
- downsample_needed = down_factor**(1./3.)
335
- small_nodes = nettracer.downsample(nodes, downsample_needed) #Apply downsample
336
- try:
337
- nearest_label_indices = compute_distance_transform_GPU(invert_array(small_nodes)) #Retry dt on downsample
338
- print(f"Using {down_factor} downsample ({downsample_needed} in each dim - Largest possible with this GPU unless user specified downsample)")
339
- break
340
- except cp.cuda.memory.OutOfMemoryError:
341
- down_factor += 1
342
- binary_nodes = binarize(small_nodes) #Recompute variables for downsample
343
- dilated_mask = dilated_binary_nodes #Need this for later to stamp out the correct output
344
- dilated_binary_nodes = dilate_3D(binary_nodes, 2 + round_to_odd(dilate_xy/downsample_needed), 2 + round_to_odd(dilate_xy/downsample_needed), 2 + round_to_odd(dilate_z/downsample_needed)) #Mod dilation to recompute variables for downsample while also over dilatiing
345
-
346
- ring_mask = dilated_binary_nodes & invert_array(binary_nodes)
347
- nodes = small_nodes
348
- del small_nodes
349
- else:
350
- goto_except = 1/0
351
- except Exception as e:
352
- print("GPU dt failed or did not detect GPU (cupy must be installed with a CUDA toolkit setup...). Computing CPU distance transform instead.")
353
- if GPU:
354
- print(f"Error message: {str(e)}")
355
- nearest_label_indices = compute_distance_transform(invert_array(nodes))
308
+ original_shape = nodes.shape
309
+
310
+ print("Performing distance transform for smart search...")
356
311
 
312
+ dilated_binary_nodes, nearest_label_indices, nodes = dilate_3D_dt(nodes, amount, xy_scaling = xy_scale, z_scaling = z_scale)
313
+ binary_nodes = binarize(nodes)
314
+ ring_mask = dilated_binary_nodes & (~binary_nodes)
315
+ del dilated_binary_nodes
316
+ del binary_nodes
357
317
 
358
318
  # Step 5: Process in parallel chunks using ThreadPoolExecutor
359
319
  num_cores = mp.cpu_count() # Use all available CPU cores
@@ -370,11 +330,6 @@ def smart_dilate(nodes, dilate_xy, dilate_z, directory = None, GPU = True, fast_
370
330
  # Combine results from chunks
371
331
  dilated_nodes_with_labels = np.concatenate(results, axis=1)
372
332
 
373
-
374
- if (dilated_nodes_with_labels.shape[1] < original_shape[1]) and fast_dil: #If downsample was used, upsample output
375
- dilated_nodes_with_labels = nettracer.upsample_with_padding(dilated_nodes_with_labels, downsample_needed, original_shape)
376
- dilated_nodes_with_labels = dilated_nodes_with_labels * dilated_mask
377
-
378
333
  if directory is not None:
379
334
  try:
380
335
  tifffile.imwrite(f"{directory}/search_region.tif", dilated_nodes_with_labels)
@@ -393,7 +348,63 @@ def round_to_odd(number):
393
348
  rounded -= 1
394
349
  return rounded
395
350
 
396
- def smart_label(binary_array, label_array, directory = None, GPU = True, predownsample = None, remove_template = False):
351
+ def smart_label_watershed(binary_array, label_array, directory = None, remove_template = False):
352
+ """
353
+ Watershed-based version - much lower memory footprint
354
+ """
355
+ original_shape = binary_array.shape
356
+
357
+ if type(binary_array) == str or type(label_array) == str:
358
+ string_bool = True
359
+ else:
360
+ string_bool = None
361
+ if type(binary_array) == str:
362
+ binary_array = tifffile.imread(binary_array)
363
+ if type(label_array) == str:
364
+ label_array = tifffile.imread(label_array)
365
+
366
+ # Binarize
367
+ binary_array = binarize(binary_array)
368
+
369
+ print("Performing watershed label propagation...")
370
+
371
+ # Watershed approach: propagate existing labels into the dilated region
372
+ # The labels themselves are the "markers" (seeds)
373
+ # We use the binary mask to define where labels can spread
374
+
375
+ # Simple elevation map: distance from edges (lower = closer to labeled regions)
376
+ # This makes watershed flow from labeled regions outward
377
+ elevation = binary_array.astype(np.float32)
378
+
379
+ # Apply watershed - labels propagate into binary_array region
380
+ dilated_nodes_with_labels = watershed(
381
+ elevation, # Elevation map (flat works fine)
382
+ markers=label_array, # Seed labels
383
+ mask=binary_array, # Where to propagate
384
+ compactness=0 # Pure distance-based (not shape-based)
385
+ )
386
+
387
+ if remove_template:
388
+ dilated_nodes_with_labels *= binary_array
389
+
390
+ if string_bool:
391
+ if directory is not None:
392
+ try:
393
+ tifffile.imwrite(f"{directory}/smart_labelled_array.tif", dilated_nodes_with_labels)
394
+ except Exception as e:
395
+ print(f"Could not save search region file to {directory}")
396
+ else:
397
+ try:
398
+ tifffile.imwrite("smart_labelled_array.tif", dilated_nodes_with_labels)
399
+ except Exception as e:
400
+ print(f"Could not save search region file to active directory")
401
+
402
+ return dilated_nodes_with_labels
403
+
404
+ def smart_label(binary_array, label_array, directory = None, GPU = True, predownsample = None, remove_template = False, mode = 0):
405
+
406
+ if mode == 1:
407
+ return smart_label_watershed(binary_array, label_array, directory, remove_template)
397
408
 
398
409
  original_shape = binary_array.shape
399
410
 
@@ -406,79 +417,93 @@ def smart_label(binary_array, label_array, directory = None, GPU = True, predown
406
417
  if type(label_array) == str:
407
418
  label_array = tifffile.imread(label_array)
408
419
 
409
- # Step 1: Binarize the labeled array
410
- binary_core = binarize(label_array)
420
+ # Binarize inputs
411
421
  binary_array = binarize(binary_array)
422
+
423
+ print("Performing distance transform for smart label...")
412
424
 
413
- # Step 3: Isolate the ring (binary dilated mask minus original binary mask)
414
- ring_mask = binary_array & invert_array(binary_core)
415
-
416
-
425
+ downsample_needed = None # Track if we downsampled
426
+
417
427
  try:
418
-
419
428
  if GPU == True and cp.cuda.runtime.getDeviceCount() > 0:
420
429
  print("GPU detected. Using CuPy for distance transform.")
421
430
 
422
431
  try:
423
-
424
432
  if predownsample is None:
425
-
426
- # Step 4: Find the nearest label for each voxel in the ring
433
+ # Compute binary_core only when needed
434
+ binary_core = binarize(label_array)
427
435
  nearest_label_indices = compute_distance_transform_GPU(invert_array(binary_core))
428
-
436
+ del binary_core # Free immediately after use
429
437
  else:
430
- gotoexcept = 1/0
438
+ raise ZeroDivisionError # Force downsample path
431
439
 
432
440
  except (cp.cuda.memory.OutOfMemoryError, ZeroDivisionError) as e:
433
441
  if predownsample is None:
434
- down_factor = catch_memory(e) #Obtain downsample amount based on memory missing
442
+ down_factor = catch_memory(e)
435
443
  else:
436
444
  down_factor = (predownsample)**3
437
445
 
438
446
  while True:
439
447
  downsample_needed = down_factor**(1./3.)
440
- small_array = nettracer.downsample(label_array, downsample_needed) #Apply downsample
448
+ small_array = nettracer.downsample(label_array, downsample_needed)
441
449
  try:
442
- nearest_label_indices = compute_distance_transform_GPU(invert_array(small_array)) #Retry dt on downsample
443
- print(f"Using {down_factor} downsample ({downsample_needed} in each dim - Largest possible with this GPU unless user specified downsample)")
450
+ binary_core = binarize(small_array)
451
+ nearest_label_indices = compute_distance_transform_GPU(invert_array(binary_core))
452
+ print(f"Using {down_factor} downsample ({downsample_needed} in each dim)")
453
+ del small_array # Don't need small_array anymore
444
454
  break
445
455
  except cp.cuda.memory.OutOfMemoryError:
456
+ del small_array, binary_core # Clean up before retry
446
457
  down_factor += 1
447
- binary_core = binarize(small_array)
448
- label_array = small_array
458
+
459
+ # Update label_array for later use
460
+ label_array = nettracer.downsample(label_array, downsample_needed)
449
461
  binary_small = nettracer.downsample(binary_array, downsample_needed)
450
462
  binary_small = nettracer.dilate_3D_old(binary_small)
451
463
  ring_mask = binary_small & invert_array(binary_core)
452
-
464
+ del binary_small, binary_core # Free after creating ring_mask
453
465
  else:
454
- goto_except = 1/0
466
+ raise Exception("GPU not available")
467
+
455
468
  except Exception as e:
456
469
  if GPU:
457
- print("GPU dt failed or did not detect GPU (cupy must be installed with a CUDA toolkit setup...). Computing CPU distance transform instead.")
470
+ print("GPU dt failed or did not detect GPU. Computing CPU distance transform instead.")
458
471
  print(f"Error message: {str(e)}")
459
472
  import traceback
460
473
  print(traceback.format_exc())
461
- nearest_label_indices = compute_distance_transform(invert_array(label_array))
474
+ binary_core = binarize(label_array)
475
+ nearest_label_indices = compute_distance_transform(invert_array(binary_core))
476
+ del binary_core
462
477
 
463
- print("Preforming distance transform for smart label...")
464
-
465
- # Step 5: Process in parallel chunks using ThreadPoolExecutor
466
- num_cores = mp.cpu_count() # Use all available CPU cores
467
- chunk_size = label_array.shape[1] // num_cores # Divide the array into chunks along the z-axis
478
+ # Compute ring_mask only if not already computed in downsample path
479
+ if 'ring_mask' not in locals():
480
+ binary_core = binarize(label_array)
481
+ ring_mask = binary_array & invert_array(binary_core)
482
+ del binary_core
468
483
 
484
+ # Step 5: Process in parallel chunks
485
+ num_cores = mp.cpu_count()
486
+ chunk_size = label_array.shape[1] // num_cores
469
487
 
470
488
  with ThreadPoolExecutor(max_workers=num_cores) as executor:
471
- args_list = [(i * chunk_size, (i + 1) * chunk_size if i != num_cores - 1 else label_array.shape[1], label_array, ring_mask, nearest_label_indices) for i in range(num_cores)]
489
+ args_list = [(i * chunk_size, (i + 1) * chunk_size if i != num_cores - 1 else label_array.shape[1],
490
+ label_array, ring_mask, nearest_label_indices) for i in range(num_cores)]
472
491
  results = list(executor.map(lambda args: process_chunk(*args), args_list))
473
492
 
474
- # Combine results from chunks
493
+ # Free large arrays no longer needed
494
+ del label_array, ring_mask, nearest_label_indices
495
+
496
+ # Combine results
475
497
  dilated_nodes_with_labels = np.concatenate(results, axis=1)
498
+ del results # Free the list of chunks
476
499
 
477
- if label_array.shape[1] < original_shape[1]: #If downsample was used, upsample output
500
+ if downsample_needed is not None: # If downsample was used
478
501
  dilated_nodes_with_labels = nettracer.upsample_with_padding(dilated_nodes_with_labels, downsample_needed, original_shape)
479
- dilated_nodes_with_labels = dilated_nodes_with_labels * binary_array
502
+ dilated_nodes_with_labels *= binary_array # In-place multiply if possible
480
503
  elif remove_template:
481
- dilated_nodes_with_labels = dilated_nodes_with_labels * binary_array
504
+ dilated_nodes_with_labels *= binary_array # In-place multiply if possible
505
+
506
+ del binary_array # Done with this
482
507
 
483
508
  if string_bool:
484
509
  if directory is not None:
@@ -492,7 +517,6 @@ def smart_label(binary_array, label_array, directory = None, GPU = True, predown
492
517
  except Exception as e:
493
518
  print(f"Could not save search region file to active directory")
494
519
 
495
-
496
520
  return dilated_nodes_with_labels
497
521
 
498
522
  def smart_label_single(binary_array, label_array):
@@ -613,24 +637,93 @@ def compute_distance_transform_distance_GPU(nodes, sampling = [1, 1, 1]):
613
637
  return distance
614
638
 
615
639
 
616
- def compute_distance_transform_distance(nodes, sampling = [1, 1, 1]):
617
-
618
- #print("(Now doing distance transform...)")
640
+ def _run_edt_in_process_shm(input_shm_name, output_shm_name, shape, dtype_str, sampling_tuple):
641
+ """Helper function to run edt in a separate process using shared memory."""
642
+ import edt # Import here to ensure it's available in child process
643
+
644
+ input_shm = shared_memory.SharedMemory(name=input_shm_name)
645
+ output_shm = shared_memory.SharedMemory(name=output_shm_name)
646
+
647
+ try:
648
+ nodes_arr = np.ndarray(shape, dtype=dtype_str, buffer=input_shm.buf)
649
+
650
+ n_cores = mp.cpu_count()
651
+ result = edt.edt(
652
+ nodes_arr.astype(bool),
653
+ anisotropy=sampling_tuple,
654
+ parallel=n_cores
655
+ )
656
+
657
+ result_array = np.ndarray(result.shape, dtype=result.dtype, buffer=output_shm.buf)
658
+ np.copyto(result_array, result)
659
+
660
+ return result.shape, str(result.dtype)
661
+ finally:
662
+ input_shm.close()
663
+ output_shm.close()
619
664
 
665
+ def compute_distance_transform_distance(nodes, sampling=[1, 1, 1], fast_dil=False):
666
+ """
667
+ Compute distance transform with automatic parallelization when available.
668
+
669
+ Args:
670
+ nodes: Binary array (True/1 for objects)
671
+ sampling: Voxel spacing [z, y, x] for anisotropic data
672
+
673
+ Returns:
674
+ Distance transform array
675
+ """
620
676
  is_pseudo_3d = nodes.shape[0] == 1
677
+
621
678
  if is_pseudo_3d:
622
- nodes = np.squeeze(nodes) # Convert to 2D for processing
679
+ nodes = np.squeeze(nodes)
623
680
  sampling = [sampling[1], sampling[2]]
624
-
625
- # Fallback to CPU if there's an issue with GPU computation
626
- distance = distance_transform_edt(nodes, sampling = sampling)
681
+
682
+ if fast_dil:
683
+ try:
684
+ # Use shared memory for all array sizes
685
+ input_shm = shared_memory.SharedMemory(create=True, size=nodes.nbytes)
686
+ output_size = nodes.size * np.dtype(np.float64).itemsize
687
+ output_shm = shared_memory.SharedMemory(create=True, size=output_size)
688
+
689
+ try:
690
+ shm_array = np.ndarray(nodes.shape, dtype=nodes.dtype, buffer=input_shm.buf)
691
+ np.copyto(shm_array, nodes)
692
+
693
+ with ProcessPoolExecutor(max_workers=1) as executor:
694
+ future = executor.submit(
695
+ _run_edt_in_process_shm,
696
+ input_shm.name,
697
+ output_shm.name,
698
+ nodes.shape,
699
+ str(nodes.dtype),
700
+ tuple(sampling)
701
+ )
702
+ result_shape, result_dtype = future.result(timeout=300) # Add timeout
703
+
704
+ distance = np.ndarray(result_shape, dtype=result_dtype, buffer=output_shm.buf).copy()
705
+
706
+ finally:
707
+ input_shm.close()
708
+ input_shm.unlink()
709
+ output_shm.close()
710
+ output_shm.unlink()
711
+
712
+ except Exception as e:
713
+ print(f"Parallel distance transform failed ({e}), falling back to scipy")
714
+ import traceback
715
+ traceback.print_exc() # See the full error
716
+ distance = distance_transform_edt(nodes, sampling=sampling)
717
+ else:
718
+ distance = distance_transform_edt(nodes, sampling=sampling)
719
+
627
720
  if is_pseudo_3d:
628
- distance = np.expand_dims(distance, axis = 0)
721
+ distance = np.expand_dims(distance, axis=0)
722
+
629
723
  return distance
630
724
 
631
725
 
632
726
 
633
-
634
727
  def gaussian(search_region, GPU = True):
635
728
  try:
636
729
  if GPU == True and cp.cuda.runtime.getDeviceCount() > 0:
nettracer3d/tutorial.py CHANGED
@@ -1098,8 +1098,6 @@ def setup_start_tutorial(window):
1098
1098
  )
1099
1099
 
1100
1100
 
1101
-
1102
-
1103
1101
 
1104
1102
  # Step 9: Close dialog and finish
1105
1103
  def close_dialog():
@@ -1111,7 +1109,6 @@ def setup_start_tutorial(window):
1111
1109
  None,
1112
1110
  "That's it for the Intro tutorial! Select the Basic Interface Tour next to see how to use the main GUI elements.",
1113
1111
  message_position="bottom",
1114
- pre_action=MenuHelper.create_widget_interaction(tutorial, 'properties_dialog', 'xy_scale', 'close()'),
1115
1112
  action=close_dialog
1116
1113
  )
1117
1114
 
@@ -1646,33 +1643,13 @@ def setup_connectivity_tutorial(window):
1646
1643
  action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'down_factor', 'setText("")')
1647
1644
  )
1648
1645
 
1649
- """ # <-- so I am trying out removing these because their use cases are confusing
1650
-
1651
- tutorial.add_step(
1652
- MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'GPU_downsample'),
1653
- "If you want to try and use the GPU, you can likewise enter an arbitrary integer downsample factor here to speed it up. Note the GPU calculation can be greedy with VRAM and will automatically try to downsample itself in a lot of cases."
1654
- highlight_type=None,
1655
- message_position="beside",
1656
- pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'GPU_downsample', 'setText("INTEGER!")'),
1657
- action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'GPU_downsample', 'setText("")')
1658
- )
1659
-
1660
- tutorial.add_step(
1661
- MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'GPU'),
1662
- "Enable this to have your system attempt to use the GPU. You will need a CUDA toolkit and a corresponding cupy package installed. Note that I consider this function somewhat experimental. In short, the cupy implementation uses a distance transform calculation that can be very greedy with VRAM. If it overflows, it will attempt to iteratively downsample itself until the calculation works (specifically containing to calculating the 'node search' volume). Note this risks kicking out small nodes from your image. Furthermore, it is only really applicable of 'fast dilation' is enabled. Therefore, generally skip using this option. However, it can be a way to rapidly assess the general network structure of a large image.",
1663
- highlight_type=None,
1664
- message_position="beside",
1665
- pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'GPU', 'click()'),
1666
- action = MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'GPU', 'toggle()'))
1667
-
1668
1646
  tutorial.add_step(
1669
1647
  MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'fastdil'),
1670
- "Enable this to have the algorithm use fast dilation. Fast dilation "
1671
- highlight_type=None,
1648
+ "Enable the fast search button to use a slightly alternate algorithm for the node search step that is faster. This algorithm uses a parallelized distance transform to create a binary search region which is a lot faster if you have a lot of CPU cores. It then uses flooding to label the binary search region, which leads to slightly rough labeling where two search regions meet. When disabled, a non-parallel distance transform is used, which can be slower but always has exact labels where two search regions meet. I recommend enabling this for larger images and disabling it for smaller ones.", highlight_type=None,
1672
1649
  message_position="beside",
1673
1650
  pre_action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'fastdil', 'click()'),
1674
- action = MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'fastdil', 'toggle()'))
1675
- """
1651
+ action=MenuHelper.create_widget_interaction(tutorial, 'con_dialog', 'fastdil', 'toggle()')
1652
+ )
1676
1653
 
1677
1654
  tutorial.add_step(
1678
1655
  MenuHelper.create_widget_getter(tutorial, 'con_dialog', 'overlays'),
@@ -1765,35 +1742,6 @@ def setup_branch_tutorial(window):
1765
1742
  pre_action=open_dialog
1766
1743
  )
1767
1744
 
1768
- """
1769
- tutorial.add_step(
1770
- MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'fix'),
1771
- "This first auto-correction option is designed if you feel like the branch labels are generally too busy. Selecting this will have the program attempt to collapse overly-dense regions of branches into a single label. Note that this behavior is somewhat tricky to predict so I generally don't use it but feel free to give it a shot and see how it looks.",
1772
- highlight_type=None,
1773
- message_position="beside",
1774
- pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix', 'click()'),
1775
- action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix', 'toggle()')
1776
- )
1777
-
1778
- tutorial.add_step(
1779
- MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'fix_val'),
1780
- "This integer value tells the above parameter (if enabled) what degree of branch-busyness should get merged. In short, a lower value is more aggressive with merging while a higher value only merges very busy regions. By default it is set to 4.",
1781
- highlight_type=None,
1782
- message_position="beside",
1783
- pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix_val', 'selectAll()'),
1784
- action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'fix_val', 'deselect()')
1785
- )
1786
-
1787
- tutorial.add_step(
1788
- MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'seed'),
1789
- "The random seed for grouping branches above can be changed here with an integer value, if the behavior of the above option is desired to be tweaked somewhat. It will use 42 by default.",
1790
- highlight_type=None,
1791
- message_position="beside",
1792
- pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'seed', 'setText("INTEGER!")'),
1793
- action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'seed', 'setText("")')
1794
- )
1795
-
1796
- """
1797
1745
 
1798
1746
  tutorial.add_step(
1799
1747
  MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'fix2'),
@@ -1838,6 +1786,15 @@ def setup_branch_tutorial(window):
1838
1786
  action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'down_factor', 'deselect()')
1839
1787
  )
1840
1788
 
1789
+ tutorial.add_step(
1790
+ MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'mode'),
1791
+ "The Algorithm Dropdown lets you choose between the standard algorithm, which provides more exact labels along branch borders, and the faster labeling algorithm, which uses flooding to label the binary branches, leading to slightly rough labeling where two branches meet. I recommend using the standard for smaller images and the fast for larger images where computation time becomes an issue.",
1792
+ highlight_type=None,
1793
+ message_position="beside",
1794
+ pre_action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'mode', 'showPopup()'),
1795
+ action=MenuHelper.create_widget_interaction(tutorial, 'branch_dialog', 'mode', 'hidePopup()')
1796
+ )
1797
+
1841
1798
 
1842
1799
  tutorial.add_step(
1843
1800
  MenuHelper.create_widget_getter(tutorial, 'branch_dialog', 'compute'),
@@ -1881,16 +1838,6 @@ def setup_branch_tutorial(window):
1881
1838
  pre_action=open_dialog
1882
1839
  )
1883
1840
 
1884
- tutorial.add_step(
1885
- MenuHelper.create_widget_getter(tutorial, 'gen_dialog', 'down_factor'),
1886
- "This integer value can be used to temporarily downsample the image while creating branchpoints. Aside from speeding up the process, this may alter branch detection, possibly performing a cleaner branch appraisal of very thick branches but losing network identification of smaller branches (Much like in the prior menu - note that any value entered in the prior menu will be applied by default here for consistency, and you won't see this option). It is disabled by default. Larger values will downsample more aggressively.",
1887
- highlight_type=None,
1888
- message_position="beside",
1889
- pre_action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'down_factor', 'selectAll()'),
1890
- action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'down_factor', 'deselect()')
1891
- )
1892
-
1893
-
1894
1841
  tutorial.add_step(
1895
1842
  MenuHelper.create_widget_getter(tutorial, 'gen_dialog', 'branch_removal'),
1896
1843
  "IMPORTANT - This branch removal parameter (Skeleton voxel branch to remove...) is something I would consider entering a value for. This is the length of terminal branches that will be removed prior to any vertex/branch labeling. Any branch shorter than the value here will be removed, but only if it is a terminal branch. For more jagged segmentations, this may be a necessity to prevent branchpoints from arising from spine-like artifacts. More internal branches will not be removed, so as a test it is generally safe to enter a large value here, which will preserve the majority of the branch schema and just risk losing occasional terminal branches.",
@@ -1918,6 +1865,26 @@ def setup_branch_tutorial(window):
1918
1865
  action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'comp_dil', 'setText("")')
1919
1866
  )
1920
1867
 
1868
+ tutorial.add_step(
1869
+ MenuHelper.create_widget_getter(tutorial, 'gen_dialog', 'fast_dil'),
1870
+ "Enable fast dilation to use a parallelized distance transform to do 3D dilation which is a lot faster if you have a lot of CPU cores. Note that this only applies if you have chosen to merge your nodes.",
1871
+ highlight_type=None,
1872
+ message_position="beside",
1873
+ pre_action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'fast_dil', 'click()'),
1874
+ action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'fast_dil', 'toggle()')
1875
+ )
1876
+
1877
+ tutorial.add_step(
1878
+ MenuHelper.create_widget_getter(tutorial, 'gen_dialog', 'down_factor'),
1879
+ "This integer value can be used to temporarily downsample the image while creating branchpoints. Aside from speeding up the process, this may alter branch detection, possibly performing a cleaner branch appraisal of very thick branches but losing network identification of smaller branches (Much like in the prior menu - note that any value entered in the prior menu will be applied by default here for consistency, and you won't see this option). It is disabled by default. Larger values will downsample more aggressively.",
1880
+ highlight_type=None,
1881
+ message_position="beside",
1882
+ pre_action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'down_factor', 'selectAll()'),
1883
+ action=MenuHelper.create_widget_interaction(tutorial, 'gen_dialog', 'down_factor', 'deselect()')
1884
+ )
1885
+
1886
+
1887
+
1921
1888
  def close_dialog():
1922
1889
  if hasattr(tutorial, 'gen_dialog') and tutorial.gen_dialog:
1923
1890
  tutorial.gen_dialog.close()
@@ -1,10 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nettracer3d
3
- Version: 1.2.4
3
+ Version: 1.2.7
4
4
  Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
5
5
  Author-email: Liam McLaughlin <liamm@wustl.edu>
6
6
  Project-URL: Documentation, https://nettracer3d.readthedocs.io/en/latest/
7
- Project-URL: Reference_Citation_For_Use, https://doi.org/10.1101/2024.07.29.605633
7
+ Project-URL: Youtube_Tutorial, https://www.youtube.com/watch?v=_4uDy0mzG94&list=PLsrhxiimzKJMZ3_gTWkfrcAdJQQobUhj7
8
+ Project-URL: Downloadable_Version, https://doi.org/10.5281/zenodo.17873800
8
9
  Classifier: Programming Language :: Python :: 3
9
10
  Classifier: License :: Other/Proprietary License
10
11
  Classifier: Operating System :: OS Independent
@@ -24,9 +25,9 @@ Requires-Dist: tifffile
24
25
  Requires-Dist: qtrangeslider
25
26
  Requires-Dist: PyQt6
26
27
  Requires-Dist: scikit-learn
27
- Requires-Dist: nibabel
28
28
  Requires-Dist: setuptools
29
29
  Requires-Dist: umap-learn
30
+ Requires-Dist: edt
30
31
  Provides-Extra: cuda11
31
32
  Requires-Dist: cupy-cuda11x; extra == "cuda11"
32
33
  Provides-Extra: cuda12
@@ -44,13 +45,20 @@ Dynamic: license-file
44
45
 
45
46
  NetTracer3D is a python package developed for both 2D and 3D analysis of microscopic images in the .tif file format. It supports generation of 3D networks showing the relationships between objects (or nodes) in three dimensional space, either based on their own proximity or connectivity via connecting objects such as nerves or blood vessels. In addition to these functionalities are several advanced 3D data processing algorithms, such as labeling of branched structures or abstraction of branched structures into networks. Note that nettracer3d uses segmented data, which can be segmented from other softwares such as ImageJ and imported into NetTracer3D, although it does offer its own segmentation via intensity and volumetric thresholding, or random forest machine learning segmentation. NetTracer3D currently has a fully functional GUI. To use the GUI, after installing the nettracer3d package via pip, enter the command 'nettracer3d' in your command prompt:
46
47
 
48
+
47
49
  --- Documentation ---
48
50
 
49
51
  Please see: https://nettracer3d.readthedocs.io/en/latest/
50
52
 
53
+
54
+ --- Video Tutorial ---
55
+
56
+ Please see: https://www.youtube.com/watch?v=_4uDy0mzG94&list=PLsrhxiimzKJMZ3_gTWkfrcAdJQQobUhj7
57
+
58
+
51
59
  --- Installation ---
52
60
 
53
- To install nettracer3d, simply install Python and use this command in your command terminal:
61
+ To install nettracer3d, simply install Python. Make sure the Python installation installs pip, and that both Python and pip are available on your PATH. Next, use this command in your command terminal:
54
62
 
55
63
  pip install nettracer3d
56
64
 
@@ -59,6 +67,10 @@ I recommend installing the program as an Anaconda package to ensure its modules
59
67
 
60
68
  https://www.anaconda.com/download?utm_source=anacondadocs&utm_medium=documentation&utm_campaign=download&utm_content=installwindows
61
69
 
70
+ Alternatively, you can download a compiled .exe of version 1.2.7 here: https://doi.org/10.5281/zenodo.17873800
71
+
72
+ Unzip the folder, then double click the NetTracer3D executable to run the program. Note that this version will be missing a few features compared to the Python package, namely GPU segmentation support and the ability to print updates to the command window. It will also not be updated as often.
73
+
62
74
  Optional Packages
63
75
  ~~~~~~~~~~~~~~~~~~
64
76
  I recommend including Napari (Chi-Li Chiu, Nathan Clack, the napari community, napari: a Python Multi-Dimensional Image Viewer Platform for the Research Community, Microscopy and Microanalysis, Volume 28, Issue S1, 1 August 2022, Pages 1576–1577, https://doi.org/10.1017/S1431927622006328) in the download as well, which allows NetTracer3D to use 3D displays. The standard package only comes with its native 2D slice display window.
@@ -100,16 +112,18 @@ While not related to NetTracer3D, if you want to use Cellpose3 (for which GPU-us
100
112
  This gui is built from the PyQt6 package and therefore may not function on dockers or virtual envs that are unable to support PyQt6 displays.
101
113
 
102
114
 
103
- NetTracer3D is free to use/fork for academic/nonprofit use so long as citation is provided, and is available for commercial use at a fee (see license file for information).
104
- The current citation is here:
115
+ NetTracer3D is freely available for academic and nonprofit use and can obtained from pip (pip install nettracer3d), provided that citation is included in any abstract, paper, or presentation utilizing NetTracer3D.
105
116
 
106
- McLaughlin, L., Zhang, B., Sharma, S. et al. Three dimensional multiscalar neurovascular nephron connectivity map of the human kidney across the lifespan. Nat Commun 16, 5161 (2025). https://doi.org/10.1038/s41467-025-60435-8
117
+ (The official paper to cite is coming soon)
107
118
 
108
119
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
109
120
 
110
- -- Version 1.2.4 Updates --
121
+ -- Version 1.2.7 Updates --
111
122
 
112
- * Fixed bug for calculating surface area when the surface was on the edge of the image.
113
- * Removed video tutorial from youtube for being outdated. May add new one later.
123
+ * Added a faster parallelized option for all distance transform calculations.
124
+ * Similarly, added flooding as a faster but slightly rougher option for propagating labels. This and the above can be combined to do much faster calculations for bigger images.
125
+ * Now depends on the 'edt' package (which does the above).
126
+ * Removed dependency on nibabel (which was just being used to open .nii files). .nii files can still be opened if nibabel is installed manually.
127
+ * Added option to not show numerical labels when displaying network graph.
114
128
 
115
129