nettracer3d 0.6.8__tar.gz → 0.6.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nettracer3d might be problematic. Click here for more details.
- {nettracer3d-0.6.8/src/nettracer3d.egg-info → nettracer3d-0.6.9}/PKG-INFO +40 -9
- nettracer3d-0.6.9/README.md +45 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/pyproject.toml +5 -4
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/modularity.py +23 -24
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/morphology.py +12 -9
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/nettracer.py +95 -44
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/nettracer_gui.py +264 -59
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/proximity.py +49 -13
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/segmenter.py +1 -1
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/smart_dilate.py +19 -20
- {nettracer3d-0.6.8 → nettracer3d-0.6.9/src/nettracer3d.egg-info}/PKG-INFO +40 -9
- nettracer3d-0.6.8/README.md +0 -15
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/LICENSE +0 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/setup.cfg +0 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/__init__.py +0 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/community_extractor.py +0 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/network_analysis.py +0 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/network_draw.py +0 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/node_draw.py +0 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/run.py +0 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d/simple_network.py +0 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d.egg-info/SOURCES.txt +0 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d.egg-info/dependency_links.txt +0 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d.egg-info/entry_points.txt +0 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d.egg-info/requires.txt +0 -0
- {nettracer3d-0.6.8 → nettracer3d-0.6.9}/src/nettracer3d.egg-info/top_level.txt +0 -0
|
@@ -1,14 +1,15 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nettracer3d
|
|
3
|
-
Version: 0.6.
|
|
3
|
+
Version: 0.6.9
|
|
4
4
|
Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
|
|
5
|
-
Author-email: Liam McLaughlin <
|
|
6
|
-
Project-URL:
|
|
5
|
+
Author-email: Liam McLaughlin <liamm@wustl.edu>
|
|
6
|
+
Project-URL: Documentation, https://nettracer3d.readthedocs.io/en/latest/
|
|
7
|
+
Project-URL: Video_Tutorial, https://www.youtube.com/watch?v=cRatn5VTWDY
|
|
7
8
|
Project-URL: Reference_Citation_For_Use, https://doi.org/10.1101/2024.07.29.605633
|
|
8
9
|
Classifier: Programming Language :: Python :: 3
|
|
9
10
|
Classifier: License :: Other/Proprietary License
|
|
10
11
|
Classifier: Operating System :: OS Independent
|
|
11
|
-
Requires-Python:
|
|
12
|
+
Requires-Python: ==3.11
|
|
12
13
|
Description-Content-Type: text/markdown
|
|
13
14
|
License-File: LICENSE
|
|
14
15
|
Requires-Dist: numpy==1.26.4
|
|
@@ -38,16 +39,46 @@ Dynamic: license-file
|
|
|
38
39
|
|
|
39
40
|
NetTracer3D is a python package developed for both 2D and 3D analysis of microscopic images in the .tif file format. It supports generation of 3D networks showing the relationships between objects (or nodes) in three dimensional space, either based on their own proximity or connectivity via connecting objects such as nerves or blood vessels. In addition to these functionalities are several advanced 3D data processing algorithms, such as labeling of branched structures or abstraction of branched structures into networks. Note that nettracer3d uses segmented data, which can be segmented from other softwares such as ImageJ and imported into NetTracer3D, although it does offer its own segmentation via intensity and volumetric thresholding, or random forest machine learning segmentation. NetTracer3D currently has a fully functional GUI. To use the GUI, after installing the nettracer3d package via pip, enter the command 'nettracer3d' in your command prompt:
|
|
40
41
|
|
|
42
|
+
--- Documentation ---
|
|
41
43
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
+
Please see: https://nettracer3d.readthedocs.io/en/latest/
|
|
45
|
+
|
|
46
|
+
--- Installation ---
|
|
47
|
+
|
|
48
|
+
To install nettracer3d, simply install Python and use this command in your command terminal:
|
|
49
|
+
|
|
50
|
+
pip install nettracer3d
|
|
51
|
+
|
|
52
|
+
I recommend installing the program as an Anaconda package to ensure its modules are work together on your specific system:
|
|
53
|
+
(Install anaconda at the link below, set up a new python env for nettracer3d, then use the same pip command).
|
|
54
|
+
|
|
55
|
+
https://www.anaconda.com/download?utm_source=anacondadocs&utm_medium=documentation&utm_campaign=download&utm_content=installwindows
|
|
56
|
+
|
|
57
|
+
nettracer3d mostly utilizes the CPU for processing and visualization, although it does have a few GPU-aided options. If you would like to use the GPU for these, you will need an NVIDIA GPU and a corresponding CUDA toolkit which can be installed here:
|
|
58
|
+
https://developer.nvidia.com/cuda-toolkit
|
|
59
|
+
|
|
60
|
+
To install nettracer3d with associated GPU-supporting packages, please use:
|
|
61
|
+
|
|
62
|
+
If your CUDA toolkit is version 11: pip install nettracer3d[CUDA11]
|
|
63
|
+
If your CUDA toolkit is version 12: pip install nettracer3d[CUDA12]
|
|
64
|
+
If you just want the entire cupy library: pip install nettracer3d[cupy]
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
This gui is built from the PyQt6 package and therefore may not function on dockers or virtual envs that are unable to support PyQt6 displays.
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
For a (slightly outdated) video tutorial on using the GUI: https://www.youtube.com/watch?v=cRatn5VTWDY
|
|
44
71
|
|
|
45
72
|
NetTracer3D is free to use/fork for academic/nonprofit use so long as citation is provided, and is available for commercial use at a fee (see license file for information).
|
|
46
73
|
|
|
47
74
|
NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
|
|
48
75
|
|
|
49
|
-
-- Version 0.6.
|
|
76
|
+
-- Version 0.6.9 updates --
|
|
77
|
+
|
|
78
|
+
1. Adjusted all distance transform-based dilation/radius calculating methods to simply use the already supported scipy.ndimage.distance_transform_edt() sampling parameter to account for differentially scaled axis (previously the image was being resampled but now it no longer will need to do that).
|
|
79
|
+
|
|
80
|
+
2. Added new right click option to extract highlighted regions and implant their data onto a separate image or into a new empty image.
|
|
50
81
|
|
|
51
|
-
|
|
82
|
+
3. General bug fixes and improvements.
|
|
52
83
|
|
|
53
|
-
|
|
84
|
+
4. Now specifies python 3.11.
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
NetTracer3D is a python package developed for both 2D and 3D analysis of microscopic images in the .tif file format. It supports generation of 3D networks showing the relationships between objects (or nodes) in three dimensional space, either based on their own proximity or connectivity via connecting objects such as nerves or blood vessels. In addition to these functionalities are several advanced 3D data processing algorithms, such as labeling of branched structures or abstraction of branched structures into networks. Note that nettracer3d uses segmented data, which can be segmented from other softwares such as ImageJ and imported into NetTracer3D, although it does offer its own segmentation via intensity and volumetric thresholding, or random forest machine learning segmentation. NetTracer3D currently has a fully functional GUI. To use the GUI, after installing the nettracer3d package via pip, enter the command 'nettracer3d' in your command prompt:
|
|
2
|
+
|
|
3
|
+
--- Documentation ---
|
|
4
|
+
|
|
5
|
+
Please see: https://nettracer3d.readthedocs.io/en/latest/
|
|
6
|
+
|
|
7
|
+
--- Installation ---
|
|
8
|
+
|
|
9
|
+
To install nettracer3d, simply install Python and use this command in your command terminal:
|
|
10
|
+
|
|
11
|
+
pip install nettracer3d
|
|
12
|
+
|
|
13
|
+
I recommend installing the program as an Anaconda package to ensure its modules are work together on your specific system:
|
|
14
|
+
(Install anaconda at the link below, set up a new python env for nettracer3d, then use the same pip command).
|
|
15
|
+
|
|
16
|
+
https://www.anaconda.com/download?utm_source=anacondadocs&utm_medium=documentation&utm_campaign=download&utm_content=installwindows
|
|
17
|
+
|
|
18
|
+
nettracer3d mostly utilizes the CPU for processing and visualization, although it does have a few GPU-aided options. If you would like to use the GPU for these, you will need an NVIDIA GPU and a corresponding CUDA toolkit which can be installed here:
|
|
19
|
+
https://developer.nvidia.com/cuda-toolkit
|
|
20
|
+
|
|
21
|
+
To install nettracer3d with associated GPU-supporting packages, please use:
|
|
22
|
+
|
|
23
|
+
If your CUDA toolkit is version 11: pip install nettracer3d[CUDA11]
|
|
24
|
+
If your CUDA toolkit is version 12: pip install nettracer3d[CUDA12]
|
|
25
|
+
If you just want the entire cupy library: pip install nettracer3d[cupy]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
This gui is built from the PyQt6 package and therefore may not function on dockers or virtual envs that are unable to support PyQt6 displays.
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
For a (slightly outdated) video tutorial on using the GUI: https://www.youtube.com/watch?v=cRatn5VTWDY
|
|
32
|
+
|
|
33
|
+
NetTracer3D is free to use/fork for academic/nonprofit use so long as citation is provided, and is available for commercial use at a fee (see license file for information).
|
|
34
|
+
|
|
35
|
+
NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
|
|
36
|
+
|
|
37
|
+
-- Version 0.6.9 updates --
|
|
38
|
+
|
|
39
|
+
1. Adjusted all distance transform-based dilation/radius calculating methods to simply use the already supported scipy.ndimage.distance_transform_edt() sampling parameter to account for differentially scaled axis (previously the image was being resampled but now it no longer will need to do that).
|
|
40
|
+
|
|
41
|
+
2. Added new right click option to extract highlighted regions and implant their data onto a separate image or into a new empty image.
|
|
42
|
+
|
|
43
|
+
3. General bug fixes and improvements.
|
|
44
|
+
|
|
45
|
+
4. Now specifies python 3.11.
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "nettracer3d"
|
|
3
|
-
version = "0.6.
|
|
3
|
+
version = "0.6.9"
|
|
4
4
|
authors = [
|
|
5
|
-
{ name="Liam McLaughlin", email="
|
|
5
|
+
{ name="Liam McLaughlin", email="liamm@wustl.edu" },
|
|
6
6
|
]
|
|
7
7
|
description = "Scripts for intializing and analyzing networks from segmentations of three dimensional images."
|
|
8
8
|
|
|
@@ -27,7 +27,7 @@ dependencies = [
|
|
|
27
27
|
]
|
|
28
28
|
|
|
29
29
|
readme = "README.md"
|
|
30
|
-
requires-python = "
|
|
30
|
+
requires-python = "==3.11"
|
|
31
31
|
classifiers = [
|
|
32
32
|
"Programming Language :: Python :: 3",
|
|
33
33
|
"License :: Other/Proprietary License",
|
|
@@ -49,5 +49,6 @@ cupy = [
|
|
|
49
49
|
nettracer3d = "nettracer3d.run:main"
|
|
50
50
|
|
|
51
51
|
[project.urls]
|
|
52
|
-
|
|
52
|
+
Documentation = "https://nettracer3d.readthedocs.io/en/latest/"
|
|
53
|
+
Video_Tutorial = "https://www.youtube.com/watch?v=cRatn5VTWDY"
|
|
53
54
|
Reference_Citation_For_Use = "https://doi.org/10.1101/2024.07.29.605633"
|
|
@@ -446,31 +446,11 @@ def community_partition(master_list, weighted = False, style = 0, dostats = True
|
|
|
446
446
|
except:
|
|
447
447
|
pass
|
|
448
448
|
|
|
449
|
-
try:
|
|
450
|
-
# Per-community statistics
|
|
451
|
-
for i, com in enumerate(communities):
|
|
452
|
-
subgraph = G.subgraph(com)
|
|
453
|
-
|
|
454
|
-
# Basic community metrics
|
|
455
|
-
stats[f'Community {i+1} Density'] = nx.density(subgraph)
|
|
456
|
-
stats[f'Community {i+1} Conductance'] = nx.conductance(G, com)
|
|
457
|
-
stats[f'Community {i+1} Avg Clustering'] = nx.average_clustering(subgraph)
|
|
458
|
-
|
|
459
|
-
# Degree centrality
|
|
460
|
-
degree_cent = nx.degree_centrality(subgraph)
|
|
461
|
-
stats[f'Community {i+1} Avg Degree Centrality'] = np.mean(list(degree_cent.values()))
|
|
462
|
-
|
|
463
|
-
# Average path length (only for connected subgraphs)
|
|
464
|
-
if nx.is_connected(subgraph):
|
|
465
|
-
stats[f'Community {i+1} Avg Path Length'] = nx.average_shortest_path_length(subgraph)
|
|
466
|
-
except:
|
|
467
|
-
pass
|
|
468
|
-
|
|
469
|
-
try:
|
|
449
|
+
#try:
|
|
470
450
|
# Add some Louvain-specific statistics
|
|
471
|
-
stats['Partition Resolution'] = 1.0 # Default resolution parameter
|
|
472
|
-
except:
|
|
473
|
-
pass
|
|
451
|
+
#stats['Partition Resolution'] = 1.0 # Default resolution parameter
|
|
452
|
+
#except:
|
|
453
|
+
#pass
|
|
474
454
|
try:
|
|
475
455
|
stats['Number of Iterations'] = len(set(partition.values()))
|
|
476
456
|
except:
|
|
@@ -514,6 +494,25 @@ def community_partition(master_list, weighted = False, style = 0, dostats = True
|
|
|
514
494
|
except:
|
|
515
495
|
pass
|
|
516
496
|
|
|
497
|
+
try:
|
|
498
|
+
# Per-community statistics
|
|
499
|
+
for i, com in enumerate(communities):
|
|
500
|
+
subgraph = G.subgraph(com)
|
|
501
|
+
|
|
502
|
+
# Basic community metrics
|
|
503
|
+
stats[f'Community {i+1} Density'] = nx.density(subgraph)
|
|
504
|
+
stats[f'Community {i+1} Conductance'] = nx.conductance(G, com)
|
|
505
|
+
stats[f'Community {i+1} Avg Clustering'] = nx.average_clustering(subgraph)
|
|
506
|
+
|
|
507
|
+
# Degree centrality
|
|
508
|
+
degree_cent = nx.degree_centrality(subgraph)
|
|
509
|
+
stats[f'Community {i+1} Avg Degree Centrality'] = np.mean(list(degree_cent.values()))
|
|
510
|
+
|
|
511
|
+
# Average path length (only for connected subgraphs)
|
|
512
|
+
if nx.is_connected(subgraph):
|
|
513
|
+
stats[f'Community {i+1} Avg Path Length'] = nx.average_shortest_path_length(subgraph)
|
|
514
|
+
except:
|
|
515
|
+
pass
|
|
517
516
|
|
|
518
517
|
return stats
|
|
519
518
|
|
|
@@ -354,6 +354,7 @@ def process_object_cpu(label, objects, labeled_array, xy_scale = 1, z_scale = 1)
|
|
|
354
354
|
mask = (subarray == label)
|
|
355
355
|
|
|
356
356
|
|
|
357
|
+
"""
|
|
357
358
|
# Determine which dimension needs resampling
|
|
358
359
|
if (z_scale > xy_scale) and mask.shape[0] != 1:
|
|
359
360
|
# Z dimension needs to be stretched
|
|
@@ -371,13 +372,13 @@ def process_object_cpu(label, objects, labeled_array, xy_scale = 1, z_scale = 1)
|
|
|
371
372
|
# Resample the mask if needed
|
|
372
373
|
if zoom_factor:
|
|
373
374
|
mask = ndimage.zoom(mask, zoom_factor, order=0) # Use order=0 for binary masks
|
|
374
|
-
|
|
375
|
+
"""
|
|
375
376
|
|
|
376
377
|
# Compute distance transform on the smaller mask
|
|
377
|
-
dist_transform = compute_distance_transform_distance(mask)
|
|
378
|
+
dist_transform = compute_distance_transform_distance(mask, sampling = [z_scale, xy_scale, xy_scale])
|
|
378
379
|
|
|
379
380
|
# Filter out small values near the edge to focus on more central regions
|
|
380
|
-
radius = np.max(dist_transform)
|
|
381
|
+
radius = np.max(dist_transform)
|
|
381
382
|
|
|
382
383
|
return label, radius
|
|
383
384
|
|
|
@@ -474,6 +475,7 @@ def estimate_object_radii_gpu(labeled_array, xy_scale = 1, z_scale = 1):
|
|
|
474
475
|
# Create binary mask for this object (directly on GPU)
|
|
475
476
|
mask_gpu = (labeled_array_gpu[tuple(padded_slices)] == label)
|
|
476
477
|
|
|
478
|
+
"""
|
|
477
479
|
# Determine which dimension needs resampling
|
|
478
480
|
if (z_scale > xy_scale) and mask_gpu.shape[0] != 1:
|
|
479
481
|
# Z dimension needs to be stretched
|
|
@@ -491,11 +493,12 @@ def estimate_object_radii_gpu(labeled_array, xy_scale = 1, z_scale = 1):
|
|
|
491
493
|
# Resample the mask if needed
|
|
492
494
|
if zoom_factor:
|
|
493
495
|
mask_gpu = cpx.zoom(mask_gpu, zoom_factor, order=0) # Use order=0 for binary masks
|
|
496
|
+
"""
|
|
494
497
|
|
|
495
498
|
# Compute distance transform on GPU
|
|
496
|
-
dist_transform_gpu = compute_distance_transform_distance_GPU(mask_gpu)
|
|
499
|
+
dist_transform_gpu = compute_distance_transform_distance_GPU(mask_gpu, sampling = [z_scale, xy_scale, xy_scale])
|
|
497
500
|
|
|
498
|
-
radius = float(cp.max(dist_transform_gpu).get())
|
|
501
|
+
radius = float(cp.max(dist_transform_gpu).get())
|
|
499
502
|
|
|
500
503
|
|
|
501
504
|
# Store the radius and the scaled radius
|
|
@@ -510,14 +513,14 @@ def estimate_object_radii_gpu(labeled_array, xy_scale = 1, z_scale = 1):
|
|
|
510
513
|
print(f"GPU calculation failed, trying CPU instead -> {e}")
|
|
511
514
|
return estimate_object_radii_cpu(labeled_array)
|
|
512
515
|
|
|
513
|
-
def compute_distance_transform_distance_GPU(nodes):
|
|
516
|
+
def compute_distance_transform_distance_GPU(nodes, sampling = [1,1,1]):
|
|
514
517
|
|
|
515
518
|
is_pseudo_3d = nodes.shape[0] == 1
|
|
516
519
|
if is_pseudo_3d:
|
|
517
520
|
nodes = cp.squeeze(nodes) # Convert to 2D for processing
|
|
518
521
|
|
|
519
522
|
# Compute the distance transform on the GPU
|
|
520
|
-
distance = cpx.distance_transform_edt(nodes)
|
|
523
|
+
distance = cpx.distance_transform_edt(nodes, sampling = sampling)
|
|
521
524
|
|
|
522
525
|
if is_pseudo_3d:
|
|
523
526
|
cp.expand_dims(distance, axis = 0)
|
|
@@ -525,14 +528,14 @@ def compute_distance_transform_distance_GPU(nodes):
|
|
|
525
528
|
return distance
|
|
526
529
|
|
|
527
530
|
|
|
528
|
-
def compute_distance_transform_distance(nodes):
|
|
531
|
+
def compute_distance_transform_distance(nodes, sampling = [1,1,1]):
|
|
529
532
|
|
|
530
533
|
is_pseudo_3d = nodes.shape[0] == 1
|
|
531
534
|
if is_pseudo_3d:
|
|
532
535
|
nodes = np.squeeze(nodes) # Convert to 2D for processing
|
|
533
536
|
|
|
534
537
|
# Fallback to CPU if there's an issue with GPU computation
|
|
535
|
-
distance = ndimage.distance_transform_edt(nodes)
|
|
538
|
+
distance = ndimage.distance_transform_edt(nodes, sampling = sampling)
|
|
536
539
|
if is_pseudo_3d:
|
|
537
540
|
np.expand_dims(distance, axis = 0)
|
|
538
541
|
return distance
|
|
@@ -987,6 +987,7 @@ def dilate_3D_dt(array, search_distance, xy_scaling=1.0, z_scaling=1.0):
|
|
|
987
987
|
|
|
988
988
|
del array
|
|
989
989
|
|
|
990
|
+
"""
|
|
990
991
|
# Determine which dimension needs resampling
|
|
991
992
|
if (z_scaling > xy_scaling):
|
|
992
993
|
# Z dimension needs to be stretched
|
|
@@ -1007,17 +1008,18 @@ def dilate_3D_dt(array, search_distance, xy_scaling=1.0, z_scaling=1.0):
|
|
|
1007
1008
|
# Resample the mask if needed
|
|
1008
1009
|
if zoom_factor:
|
|
1009
1010
|
inv = ndimage.zoom(inv, zoom_factor, order=0) # Use order=0 for binary masks
|
|
1010
|
-
|
|
1011
|
+
"""
|
|
1012
|
+
|
|
1011
1013
|
# Compute distance transform (Euclidean)
|
|
1012
|
-
inv = smart_dilate.compute_distance_transform_distance(inv)
|
|
1014
|
+
inv = smart_dilate.compute_distance_transform_distance(inv, sampling = [z_scaling, xy_scaling, xy_scaling])
|
|
1013
1015
|
|
|
1014
|
-
inv = inv * cardinal
|
|
1016
|
+
#inv = inv * cardinal
|
|
1015
1017
|
|
|
1016
1018
|
# Threshold the distance transform to get dilated result
|
|
1017
1019
|
inv = inv <= search_distance
|
|
1018
1020
|
|
|
1019
|
-
if rev_factor:
|
|
1020
|
-
inv = ndimage.zoom(inv, rev_factor, order=0) # Use order=0 for binary masks
|
|
1021
|
+
#if rev_factor:
|
|
1022
|
+
#inv = ndimage.zoom(inv, rev_factor, order=0) # Use order=0 for binary masks
|
|
1021
1023
|
|
|
1022
1024
|
return inv.astype(np.uint8)
|
|
1023
1025
|
|
|
@@ -1043,6 +1045,7 @@ def erode_3D_dt(array, search_distance, xy_scaling=1.0, z_scaling=1.0):
|
|
|
1043
1045
|
|
|
1044
1046
|
# For erosion, we work directly with the foreground (no inversion needed)
|
|
1045
1047
|
|
|
1048
|
+
"""
|
|
1046
1049
|
# Determine which dimension needs resampling
|
|
1047
1050
|
if (z_scaling > xy_scaling):
|
|
1048
1051
|
# Z dimension needs to be stretched
|
|
@@ -1063,21 +1066,22 @@ def erode_3D_dt(array, search_distance, xy_scaling=1.0, z_scaling=1.0):
|
|
|
1063
1066
|
# Resample the mask if needed
|
|
1064
1067
|
if zoom_factor:
|
|
1065
1068
|
array = ndimage.zoom(array, zoom_factor, order=0) # Use order=0 for binary masks
|
|
1069
|
+
"""
|
|
1066
1070
|
|
|
1067
1071
|
print("Computing a distance transform for a perfect erosion...")
|
|
1068
1072
|
|
|
1069
|
-
array = smart_dilate.compute_distance_transform_distance(array)
|
|
1073
|
+
array = smart_dilate.compute_distance_transform_distance(array, sampling = [z_scaling, xy_scaling, xy_scaling])
|
|
1070
1074
|
|
|
1071
1075
|
# Apply scaling factor
|
|
1072
|
-
array = array * cardinal
|
|
1076
|
+
#array = array * cardinal
|
|
1073
1077
|
|
|
1074
1078
|
# Threshold the distance transform to get eroded result
|
|
1075
1079
|
# For erosion, we keep only the points that are at least search_distance from the boundary
|
|
1076
1080
|
array = array >= search_distance
|
|
1077
1081
|
|
|
1078
1082
|
# Resample back to original dimensions if needed
|
|
1079
|
-
if rev_factor:
|
|
1080
|
-
array = ndimage.zoom(array, rev_factor, order=0) # Use order=0 for binary masks
|
|
1083
|
+
#if rev_factor:
|
|
1084
|
+
#array = ndimage.zoom(array, rev_factor, order=0) # Use order=0 for binary masks
|
|
1081
1085
|
|
|
1082
1086
|
return array.astype(np.uint8)
|
|
1083
1087
|
|
|
@@ -2061,7 +2065,24 @@ def mask(image, mask, directory = None):
|
|
|
2061
2065
|
|
|
2062
2066
|
mask = mask != 0
|
|
2063
2067
|
|
|
2064
|
-
|
|
2068
|
+
if len(image.shape) == 3:
|
|
2069
|
+
|
|
2070
|
+
image = image * mask
|
|
2071
|
+
else:
|
|
2072
|
+
# Split into separate color channels
|
|
2073
|
+
channels = [image[..., i] for i in range(3)]
|
|
2074
|
+
masked_channels = []
|
|
2075
|
+
|
|
2076
|
+
for image in channels:
|
|
2077
|
+
# Upsample each channel separately
|
|
2078
|
+
if len(image.shape) == 2:
|
|
2079
|
+
np.expand_dims(image, axis = 0)
|
|
2080
|
+
image = image * mask
|
|
2081
|
+
masked_channels.append(image)
|
|
2082
|
+
|
|
2083
|
+
# Stack the channels back together
|
|
2084
|
+
image = np.stack(masked_channels, axis=-1)
|
|
2085
|
+
|
|
2065
2086
|
|
|
2066
2087
|
if string_bool:
|
|
2067
2088
|
if directory is None:
|
|
@@ -3363,16 +3384,17 @@ class Network_3D:
|
|
|
3363
3384
|
:param skeletonize: (Optional - Val = False, boolean) - A boolean of whether to skeletonize the edges when using them.
|
|
3364
3385
|
"""
|
|
3365
3386
|
|
|
3366
|
-
|
|
3367
|
-
|
|
3387
|
+
if directory is not None:
|
|
3388
|
+
directory = encapsulate()
|
|
3368
3389
|
|
|
3369
3390
|
self._xy_scale = xy_scale
|
|
3370
3391
|
self._z_scale = z_scale
|
|
3371
3392
|
|
|
3372
|
-
|
|
3373
|
-
|
|
3374
|
-
|
|
3375
|
-
|
|
3393
|
+
if directory is not None:
|
|
3394
|
+
try:
|
|
3395
|
+
self.save_scaling(directory)
|
|
3396
|
+
except:
|
|
3397
|
+
pass
|
|
3376
3398
|
|
|
3377
3399
|
if search is None and ignore_search_region == False:
|
|
3378
3400
|
search = 0
|
|
@@ -3383,19 +3405,23 @@ class Network_3D:
|
|
|
3383
3405
|
self._nodes = nodes
|
|
3384
3406
|
del nodes
|
|
3385
3407
|
|
|
3408
|
+
if self._nodes.shape[0] == 1:
|
|
3409
|
+
fast_dil = True #Set this to true because the 2D algo always uses the distance transform and doesnt need this special ver
|
|
3410
|
+
|
|
3386
3411
|
if label_nodes:
|
|
3387
3412
|
self._nodes, num_nodes = label_objects(self._nodes)
|
|
3388
3413
|
if other_nodes is not None:
|
|
3389
3414
|
self.merge_nodes(other_nodes, label_nodes)
|
|
3390
3415
|
|
|
3391
|
-
|
|
3392
|
-
|
|
3393
|
-
|
|
3394
|
-
|
|
3395
|
-
|
|
3396
|
-
|
|
3397
|
-
|
|
3398
|
-
|
|
3416
|
+
if directory is not None:
|
|
3417
|
+
try:
|
|
3418
|
+
self.save_nodes(directory)
|
|
3419
|
+
except:
|
|
3420
|
+
pass
|
|
3421
|
+
try:
|
|
3422
|
+
self.save_node_identities(directory)
|
|
3423
|
+
except:
|
|
3424
|
+
pass
|
|
3399
3425
|
|
|
3400
3426
|
if not ignore_search_region:
|
|
3401
3427
|
self.calculate_search_region(search, GPU = GPU, fast_dil = fast_dil, GPU_downsample = GPU_downsample)
|
|
@@ -3408,31 +3434,35 @@ class Network_3D:
|
|
|
3408
3434
|
|
|
3409
3435
|
self.calculate_edges(edges, diledge = diledge, inners = inners, hash_inner_edges = hash_inners, search = search, remove_edgetrunk = remove_trunk, GPU = GPU, fast_dil = fast_dil, skeletonized = skeletonize)
|
|
3410
3436
|
del edges
|
|
3411
|
-
|
|
3412
|
-
|
|
3413
|
-
|
|
3414
|
-
|
|
3437
|
+
if directory is not None:
|
|
3438
|
+
try:
|
|
3439
|
+
self.save_edges(directory)
|
|
3440
|
+
except:
|
|
3441
|
+
pass
|
|
3415
3442
|
|
|
3416
3443
|
self.calculate_network(search = search, ignore_search_region = ignore_search_region)
|
|
3417
3444
|
|
|
3418
|
-
|
|
3419
|
-
|
|
3420
|
-
|
|
3421
|
-
|
|
3445
|
+
if directory is not None:
|
|
3446
|
+
try:
|
|
3447
|
+
self.save_network(directory)
|
|
3448
|
+
except:
|
|
3449
|
+
pass
|
|
3422
3450
|
|
|
3423
3451
|
if self._nodes is None:
|
|
3424
3452
|
self.load_nodes(directory)
|
|
3425
3453
|
|
|
3426
3454
|
self.calculate_node_centroids(down_factor)
|
|
3427
|
-
|
|
3428
|
-
|
|
3429
|
-
|
|
3430
|
-
|
|
3455
|
+
if directory is not None:
|
|
3456
|
+
try:
|
|
3457
|
+
self.save_node_centroids(directory)
|
|
3458
|
+
except:
|
|
3459
|
+
pass
|
|
3431
3460
|
self.calculate_edge_centroids(down_factor)
|
|
3432
|
-
|
|
3433
|
-
|
|
3434
|
-
|
|
3435
|
-
|
|
3461
|
+
if directory is not None:
|
|
3462
|
+
try:
|
|
3463
|
+
self.save_edge_centroids(directory)
|
|
3464
|
+
except:
|
|
3465
|
+
pass
|
|
3436
3466
|
|
|
3437
3467
|
|
|
3438
3468
|
def draw_network(self, directory = None, down_factor = None, GPU = False):
|
|
@@ -4506,11 +4536,28 @@ class Network_3D:
|
|
|
4506
4536
|
|
|
4507
4537
|
|
|
4508
4538
|
|
|
4509
|
-
def kd_network(self, distance = 100, targets = None):
|
|
4539
|
+
def kd_network(self, distance = 100, targets = None, make_array = False):
|
|
4540
|
+
|
|
4541
|
+
centroids = copy.deepcopy(self._node_centroids)
|
|
4542
|
+
|
|
4543
|
+
if self._xy_scale == self._z_scale:
|
|
4544
|
+
upsample = None
|
|
4545
|
+
distance = distance/self._xy_scale # Account for scaling
|
|
4546
|
+
else:
|
|
4547
|
+
upsample = [self._xy_scale, self._z_scale] # This means resolutions have to be normalized
|
|
4548
|
+
if self._xy_scale < self._z_scale:
|
|
4549
|
+
distance = distance/self._xy_scale # We always upsample to normalize
|
|
4550
|
+
refactor = self._z_scale/self._xy_scale
|
|
4551
|
+
for node, centroid in centroids.items():
|
|
4552
|
+
centroids[node] = [centroid[0] * refactor, centroid[1], centroid[2]]
|
|
4553
|
+
elif self._z_scale < self._xy_scale:
|
|
4554
|
+
distance = distance/self._z_scale
|
|
4555
|
+
refactor = self._xy_scale/self._z_scale
|
|
4556
|
+
for node, centroid in centroids.items():
|
|
4557
|
+
centroids[node] = [centroid[0], centroid[1] * refactor, centroid[2] * refactor]
|
|
4510
4558
|
|
|
4511
|
-
array = self.centroid_array()
|
|
4512
4559
|
|
|
4513
|
-
neighbors = proximity.find_neighbors_kdtree(
|
|
4560
|
+
neighbors = proximity.find_neighbors_kdtree(distance, targets = targets, centroids = centroids)
|
|
4514
4561
|
|
|
4515
4562
|
network = create_and_save_dataframe(neighbors)
|
|
4516
4563
|
|
|
@@ -4520,7 +4567,11 @@ class Network_3D:
|
|
|
4520
4567
|
|
|
4521
4568
|
self.remove_edge_weights()
|
|
4522
4569
|
|
|
4523
|
-
|
|
4570
|
+
if make_array:
|
|
4571
|
+
|
|
4572
|
+
array = self.centroid_array()
|
|
4573
|
+
|
|
4574
|
+
return array
|
|
4524
4575
|
|
|
4525
4576
|
|
|
4526
4577
|
|