nettracer3d 0.6.4__tar.gz → 0.6.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nettracer3d-0.6.4/src/nettracer3d.egg-info → nettracer3d-0.6.6}/PKG-INFO +5 -11
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/README.md +3 -10
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/pyproject.toml +4 -2
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/morphology.py +207 -5
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/nettracer.py +48 -6
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/nettracer_gui.py +291 -76
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/segmenter.py +239 -164
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/smart_dilate.py +2 -31
- {nettracer3d-0.6.4 → nettracer3d-0.6.6/src/nettracer3d.egg-info}/PKG-INFO +5 -11
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d.egg-info/requires.txt +1 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/LICENSE +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/setup.cfg +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/__init__.py +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/community_extractor.py +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/modularity.py +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/network_analysis.py +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/network_draw.py +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/node_draw.py +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/proximity.py +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/run.py +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d/simple_network.py +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d.egg-info/SOURCES.txt +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d.egg-info/dependency_links.txt +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d.egg-info/entry_points.txt +0 -0
- {nettracer3d-0.6.4 → nettracer3d-0.6.6}/src/nettracer3d.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nettracer3d
|
|
3
|
-
Version: 0.6.
|
|
3
|
+
Version: 0.6.6
|
|
4
4
|
Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
|
|
5
5
|
Author-email: Liam McLaughlin <mclaughlinliam99@gmail.com>
|
|
6
6
|
Project-URL: User_Tutorial, https://www.youtube.com/watch?v=cRatn5VTWDY
|
|
@@ -27,6 +27,7 @@ Requires-Dist: qtrangeslider==0.1.5
|
|
|
27
27
|
Requires-Dist: PyQt6==6.8.0
|
|
28
28
|
Requires-Dist: scikit-learn==1.6.1
|
|
29
29
|
Requires-Dist: nibabel==5.2.0
|
|
30
|
+
Requires-Dist: setuptools>=65.0.0
|
|
30
31
|
Provides-Extra: cuda11
|
|
31
32
|
Requires-Dist: cupy-cuda11x; extra == "cuda11"
|
|
32
33
|
Provides-Extra: cuda12
|
|
@@ -45,15 +46,8 @@ NetTracer3D is free to use/fork for academic/nonprofit use so long as citation i
|
|
|
45
46
|
|
|
46
47
|
NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
|
|
47
48
|
|
|
48
|
-
-- Version 0.6.
|
|
49
|
+
-- Version 0.6.6 updates --
|
|
49
50
|
|
|
50
|
-
1.
|
|
51
|
+
1. Updated flexibility of the fill holes method for user with varying use cases.
|
|
51
52
|
|
|
52
|
-
2.
|
|
53
|
-
|
|
54
|
-
3. Removed attempted trendline fitting from degree distribution
|
|
55
|
-
|
|
56
|
-
4. Added new feature to skeletonization (and corresponding branch labeler/gennodes)
|
|
57
|
-
Now you can have the program attempt to auto-correct 3D skeletonization loop artifacts through a method that just runs the 3d fill holes algo and then attempts to reskeletonize the output. This worked well in my own testing.
|
|
58
|
-
|
|
59
|
-
5. Other minor fixes/improvements
|
|
53
|
+
2. Greatly improved memory efficiency of segmenter. Now works comfortably with 3.5 GB array on my machine for example (my machine has 64 GB RAM and this occupied around 20% of it I would say). Removed the non-memory efficient option (now always prioritizes mem - the former wasn't even that much faster anyway), removed GPU option (would need an entire cupy-centric build, does not make sense to be sharing a script with the CPU version).
|
|
@@ -8,15 +8,8 @@ NetTracer3D is free to use/fork for academic/nonprofit use so long as citation i
|
|
|
8
8
|
|
|
9
9
|
NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
|
|
10
10
|
|
|
11
|
-
-- Version 0.6.
|
|
11
|
+
-- Version 0.6.6 updates --
|
|
12
12
|
|
|
13
|
-
1.
|
|
13
|
+
1. Updated flexibility of the fill holes method for user with varying use cases.
|
|
14
14
|
|
|
15
|
-
2.
|
|
16
|
-
|
|
17
|
-
3. Removed attempted trendline fitting from degree distribution
|
|
18
|
-
|
|
19
|
-
4. Added new feature to skeletonization (and corresponding branch labeler/gennodes)
|
|
20
|
-
Now you can have the program attempt to auto-correct 3D skeletonization loop artifacts through a method that just runs the 3d fill holes algo and then attempts to reskeletonize the output. This worked well in my own testing.
|
|
21
|
-
|
|
22
|
-
5. Other minor fixes/improvements
|
|
15
|
+
2. Greatly improved memory efficiency of segmenter. Now works comfortably with 3.5 GB array on my machine for example (my machine has 64 GB RAM and this occupied around 20% of it I would say). Removed the non-memory efficient option (now always prioritizes mem - the former wasn't even that much faster anyway), removed GPU option (would need an entire cupy-centric build, does not make sense to be sharing a script with the CPU version).
|
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "nettracer3d"
|
|
3
|
-
version = "0.6.
|
|
3
|
+
version = "0.6.6"
|
|
4
4
|
authors = [
|
|
5
5
|
{ name="Liam McLaughlin", email="mclaughlinliam99@gmail.com" },
|
|
6
6
|
]
|
|
7
7
|
description = "Scripts for intializing and analyzing networks from segmentations of three dimensional images."
|
|
8
|
+
|
|
8
9
|
dependencies = [
|
|
9
10
|
"numpy == 1.26.4",
|
|
10
11
|
"scipy == 1.14.1",
|
|
@@ -21,7 +22,8 @@ dependencies = [
|
|
|
21
22
|
"qtrangeslider == 0.1.5",
|
|
22
23
|
"PyQt6 == 6.8.0",
|
|
23
24
|
"scikit-learn == 1.6.1",
|
|
24
|
-
"nibabel == 5.2.0"
|
|
25
|
+
"nibabel == 5.2.0",
|
|
26
|
+
"setuptools >= 65.0.0"
|
|
25
27
|
]
|
|
26
28
|
|
|
27
29
|
readme = "README.md"
|
|
@@ -6,8 +6,17 @@ import multiprocessing as mp
|
|
|
6
6
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
7
7
|
import tifffile
|
|
8
8
|
from functools import partial
|
|
9
|
-
import
|
|
9
|
+
import concurrent.futures
|
|
10
|
+
from functools import partial
|
|
10
11
|
from scipy import ndimage
|
|
12
|
+
import pandas as pd
|
|
13
|
+
# Import CuPy conditionally for GPU support
|
|
14
|
+
try:
|
|
15
|
+
import cupy as cp
|
|
16
|
+
import cupyx.scipy.ndimage as cpx
|
|
17
|
+
HAS_CUPY = True
|
|
18
|
+
except ImportError:
|
|
19
|
+
HAS_CUPY = False
|
|
11
20
|
|
|
12
21
|
def get_reslice_indices(slice_obj, dilate_xy, dilate_z, array_shape):
|
|
13
22
|
"""Convert slice object to padded indices accounting for dilation and boundaries"""
|
|
@@ -279,9 +288,6 @@ def search_neighbor_ids(nodes, targets, id_dict, neighborhood_dict, totals, sear
|
|
|
279
288
|
|
|
280
289
|
|
|
281
290
|
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
291
|
def get_search_space_dilate(target, centroids, id_dict, search, scaling = 1):
|
|
286
292
|
|
|
287
293
|
ymax = np.max(centroids[:, 0])
|
|
@@ -308,4 +314,200 @@ def get_search_space_dilate(target, centroids, id_dict, search, scaling = 1):
|
|
|
308
314
|
|
|
309
315
|
|
|
310
316
|
|
|
311
|
-
return array
|
|
317
|
+
return array
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
# Methods pertaining to getting radii:
|
|
321
|
+
|
|
322
|
+
def process_object_cpu(label, objects, labeled_array):
|
|
323
|
+
"""
|
|
324
|
+
Process a single labeled object to estimate its radius (CPU version).
|
|
325
|
+
This function is designed to be called in parallel.
|
|
326
|
+
|
|
327
|
+
Parameters:
|
|
328
|
+
-----------
|
|
329
|
+
label : int
|
|
330
|
+
The label ID to process
|
|
331
|
+
objects : list
|
|
332
|
+
List of slice objects from ndimage.find_objects
|
|
333
|
+
labeled_array : numpy.ndarray
|
|
334
|
+
The full 3D labeled array
|
|
335
|
+
|
|
336
|
+
Returns:
|
|
337
|
+
--------
|
|
338
|
+
tuple: (label, radius, mask_volume, dimensions)
|
|
339
|
+
"""
|
|
340
|
+
# Get the slice object (bounding box) for this label
|
|
341
|
+
# Index is label-1 because find_objects returns 0-indexed results
|
|
342
|
+
obj_slice = objects[label-1]
|
|
343
|
+
|
|
344
|
+
if obj_slice is None:
|
|
345
|
+
return label, 0, 0, np.array([0, 0, 0])
|
|
346
|
+
|
|
347
|
+
# Extract subarray containing just this object (plus padding)
|
|
348
|
+
# Create padded slices to ensure there's background around the object
|
|
349
|
+
padded_slices = []
|
|
350
|
+
for dim_idx, dim_slice in enumerate(obj_slice):
|
|
351
|
+
start = max(0, dim_slice.start - 1)
|
|
352
|
+
stop = min(labeled_array.shape[dim_idx], dim_slice.stop + 1)
|
|
353
|
+
padded_slices.append(slice(start, stop))
|
|
354
|
+
|
|
355
|
+
# Extract the subarray
|
|
356
|
+
subarray = labeled_array[tuple(padded_slices)]
|
|
357
|
+
|
|
358
|
+
# Create binary mask for this object within the subarray
|
|
359
|
+
mask = (subarray == label)
|
|
360
|
+
|
|
361
|
+
# Compute distance transform on the smaller mask
|
|
362
|
+
dist_transform = compute_distance_transform_distance(mask)
|
|
363
|
+
|
|
364
|
+
# Filter out small values near the edge to focus on more central regions
|
|
365
|
+
radius = np.max(dist_transform)
|
|
366
|
+
|
|
367
|
+
# Calculate basic shape metrics
|
|
368
|
+
volume = np.sum(mask)
|
|
369
|
+
|
|
370
|
+
# Calculate bounding box dimensions
|
|
371
|
+
x_len = obj_slice[0].stop - obj_slice[0].start
|
|
372
|
+
y_len = obj_slice[1].stop - obj_slice[1].start
|
|
373
|
+
z_len = obj_slice[2].stop - obj_slice[2].start
|
|
374
|
+
dimensions = np.array([x_len, y_len, z_len])
|
|
375
|
+
|
|
376
|
+
return label, radius, volume, dimensions
|
|
377
|
+
|
|
378
|
+
def estimate_object_radii_cpu(labeled_array, n_jobs=None):
|
|
379
|
+
"""
|
|
380
|
+
Estimate the radii of labeled objects in a 3D numpy array using distance transform.
|
|
381
|
+
CPU parallel implementation.
|
|
382
|
+
|
|
383
|
+
Parameters:
|
|
384
|
+
-----------
|
|
385
|
+
labeled_array : numpy.ndarray
|
|
386
|
+
3D array where each object has a unique integer label (0 is background)
|
|
387
|
+
n_jobs : int or None
|
|
388
|
+
Number of parallel jobs. If None, uses all available cores.
|
|
389
|
+
|
|
390
|
+
Returns:
|
|
391
|
+
--------
|
|
392
|
+
dict: Dictionary mapping object labels to estimated radii
|
|
393
|
+
dict: (optional) Dictionary of shape statistics for each label
|
|
394
|
+
"""
|
|
395
|
+
# Find bounding box for each labeled object
|
|
396
|
+
objects = ndimage.find_objects(labeled_array)
|
|
397
|
+
|
|
398
|
+
unique_labels = np.unique(labeled_array)
|
|
399
|
+
unique_labels = unique_labels[unique_labels != 0] # Remove background
|
|
400
|
+
|
|
401
|
+
# Create a partial function for parallel processing
|
|
402
|
+
process_func = partial(process_object_cpu, objects=objects, labeled_array=labeled_array)
|
|
403
|
+
|
|
404
|
+
# Process objects in parallel
|
|
405
|
+
results = []
|
|
406
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=n_jobs) as executor:
|
|
407
|
+
# Submit all jobs
|
|
408
|
+
future_to_label = {executor.submit(process_func, label): label for label in unique_labels}
|
|
409
|
+
|
|
410
|
+
# Collect results as they complete
|
|
411
|
+
for future in concurrent.futures.as_completed(future_to_label):
|
|
412
|
+
results.append(future.result())
|
|
413
|
+
|
|
414
|
+
# Organize results
|
|
415
|
+
radii = {}
|
|
416
|
+
|
|
417
|
+
for label, radius, volume, dimensions in results:
|
|
418
|
+
radii[label] = radius
|
|
419
|
+
|
|
420
|
+
return radii
|
|
421
|
+
|
|
422
|
+
def estimate_object_radii_gpu(labeled_array):
|
|
423
|
+
"""
|
|
424
|
+
Estimate the radii of labeled objects in a 3D numpy array using distance transform.
|
|
425
|
+
GPU implementation using CuPy.
|
|
426
|
+
|
|
427
|
+
Parameters:
|
|
428
|
+
-----------
|
|
429
|
+
labeled_array : numpy.ndarray
|
|
430
|
+
3D array where each object has a unique integer label (0 is background)
|
|
431
|
+
|
|
432
|
+
Returns:
|
|
433
|
+
--------
|
|
434
|
+
dict: Dictionary mapping object labels to estimated radii
|
|
435
|
+
dict: (optional) Dictionary of shape statistics for each label
|
|
436
|
+
"""
|
|
437
|
+
|
|
438
|
+
try:
|
|
439
|
+
if not HAS_CUPY:
|
|
440
|
+
raise ImportError("CuPy is required for GPU acceleration")
|
|
441
|
+
|
|
442
|
+
# Find bounding box for each labeled object (on CPU)
|
|
443
|
+
objects = ndimage.find_objects(labeled_array)
|
|
444
|
+
|
|
445
|
+
# Transfer entire labeled array to GPU once
|
|
446
|
+
labeled_array_gpu = cp.asarray(labeled_array)
|
|
447
|
+
|
|
448
|
+
unique_labels = cp.unique(labeled_array_gpu)
|
|
449
|
+
unique_labels = cp.asnumpy(unique_labels)
|
|
450
|
+
unique_labels = unique_labels[unique_labels != 0] # Remove background
|
|
451
|
+
|
|
452
|
+
radii = {}
|
|
453
|
+
|
|
454
|
+
for label in unique_labels:
|
|
455
|
+
# Get the slice object (bounding box) for this label
|
|
456
|
+
obj_slice = objects[label-1]
|
|
457
|
+
|
|
458
|
+
if obj_slice is None:
|
|
459
|
+
continue
|
|
460
|
+
|
|
461
|
+
# Extract subarray from GPU array
|
|
462
|
+
padded_slices = []
|
|
463
|
+
for dim_idx, dim_slice in enumerate(obj_slice):
|
|
464
|
+
start = max(0, dim_slice.start - 1)
|
|
465
|
+
stop = min(labeled_array.shape[dim_idx], dim_slice.stop + 1)
|
|
466
|
+
padded_slices.append(slice(start, stop))
|
|
467
|
+
|
|
468
|
+
# Create binary mask for this object (directly on GPU)
|
|
469
|
+
mask_gpu = (labeled_array_gpu[tuple(padded_slices)] == label)
|
|
470
|
+
|
|
471
|
+
# Compute distance transform on GPU
|
|
472
|
+
dist_transform_gpu = compute_distance_transform_distance_GPU(mask_gpu)
|
|
473
|
+
|
|
474
|
+
radius = float(cp.max(dist_transform_gpu).get())
|
|
475
|
+
|
|
476
|
+
# Store the radius
|
|
477
|
+
radii[label] = radius
|
|
478
|
+
|
|
479
|
+
# Clean up GPU memory
|
|
480
|
+
del labeled_array_gpu
|
|
481
|
+
|
|
482
|
+
return radii
|
|
483
|
+
|
|
484
|
+
except Exception as e:
|
|
485
|
+
print(f"GPU calculation failed, trying CPU instead -> {e}")
|
|
486
|
+
return estimate_object_radii_cpu(labeled_array)
|
|
487
|
+
|
|
488
|
+
def compute_distance_transform_distance_GPU(nodes):
|
|
489
|
+
|
|
490
|
+
is_pseudo_3d = nodes.shape[0] == 1
|
|
491
|
+
if is_pseudo_3d:
|
|
492
|
+
nodes = cp.squeeze(nodes) # Convert to 2D for processing
|
|
493
|
+
|
|
494
|
+
# Compute the distance transform on the GPU
|
|
495
|
+
distance = cpx.distance_transform_edt(nodes)
|
|
496
|
+
|
|
497
|
+
if is_pseudo_3d:
|
|
498
|
+
cp.expand_dims(distance, axis = 0)
|
|
499
|
+
|
|
500
|
+
return distance
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+
def compute_distance_transform_distance(nodes):
|
|
504
|
+
|
|
505
|
+
is_pseudo_3d = nodes.shape[0] == 1
|
|
506
|
+
if is_pseudo_3d:
|
|
507
|
+
nodes = np.squeeze(nodes) # Convert to 2D for processing
|
|
508
|
+
|
|
509
|
+
# Fallback to CPU if there's an issue with GPU computation
|
|
510
|
+
distance = ndimage.distance_transform_edt(nodes)
|
|
511
|
+
if is_pseudo_3d:
|
|
512
|
+
np.expand_dims(distance, axis = 0)
|
|
513
|
+
return distance
|
|
@@ -547,6 +547,43 @@ def remove_branches(skeleton, length):
|
|
|
547
547
|
return image_copy
|
|
548
548
|
|
|
549
549
|
|
|
550
|
+
def estimate_object_radii(labeled_array, gpu=False, n_jobs=None):
|
|
551
|
+
"""
|
|
552
|
+
Estimate the radii of labeled objects in a 3D numpy array.
|
|
553
|
+
Dispatches to appropriate implementation based on parameters.
|
|
554
|
+
|
|
555
|
+
Parameters:
|
|
556
|
+
-----------
|
|
557
|
+
labeled_array : numpy.ndarray
|
|
558
|
+
3D array where each object has a unique integer label (0 is background)
|
|
559
|
+
gpu : bool
|
|
560
|
+
Whether to use GPU acceleration via CuPy (if available)
|
|
561
|
+
n_jobs : int or None
|
|
562
|
+
Number of parallel jobs for CPU version. If None, uses all available cores.
|
|
563
|
+
|
|
564
|
+
Returns:
|
|
565
|
+
--------
|
|
566
|
+
dict: Dictionary mapping object labels to estimated radii
|
|
567
|
+
dict: (optional) Dictionary of shape statistics for each label
|
|
568
|
+
"""
|
|
569
|
+
# Check if GPU is requested but not available
|
|
570
|
+
try:
|
|
571
|
+
import cupy as cp
|
|
572
|
+
import cupyx.scipy.ndimage as cpx
|
|
573
|
+
HAS_CUPY = True
|
|
574
|
+
except ImportError:
|
|
575
|
+
HAS_CUPY = False
|
|
576
|
+
|
|
577
|
+
if gpu and not HAS_CUPY:
|
|
578
|
+
print("Warning: GPU acceleration requested but CuPy not available. Falling back to CPU.")
|
|
579
|
+
gpu = False
|
|
580
|
+
|
|
581
|
+
if gpu:
|
|
582
|
+
return morphology.estimate_object_radii_gpu(labeled_array)
|
|
583
|
+
else:
|
|
584
|
+
return morphology.estimate_object_radii_cpu(labeled_array, n_jobs)
|
|
585
|
+
|
|
586
|
+
|
|
550
587
|
def break_and_label_skeleton(skeleton, peaks = 1, branch_removal = 0, comp_dil = 0, max_vol = 0, directory = None, return_skele = False, nodes = None):
|
|
551
588
|
"""Internal method to break open a skeleton at its branchpoints and label the remaining components, for an 8bit binary array"""
|
|
552
589
|
|
|
@@ -707,9 +744,9 @@ def z_project(array3d, method='max'):
|
|
|
707
744
|
else:
|
|
708
745
|
raise ValueError("Method must be one of: 'max', 'mean', 'min', 'sum', 'std'")
|
|
709
746
|
|
|
710
|
-
def fill_holes_3d(array):
|
|
747
|
+
def fill_holes_3d(array, head_on = False, fill_borders = True):
|
|
711
748
|
|
|
712
|
-
def process_slice(slice_2d, border_threshold=0.08):
|
|
749
|
+
def process_slice(slice_2d, border_threshold=0.08, fill_borders = True):
|
|
713
750
|
"""
|
|
714
751
|
Process a 2D slice, considering components that touch less than border_threshold
|
|
715
752
|
of any border length as potential holes.
|
|
@@ -720,6 +757,9 @@ def fill_holes_3d(array):
|
|
|
720
757
|
"""
|
|
721
758
|
slice_2d = slice_2d.astype(np.uint8)
|
|
722
759
|
labels, num_features = ndimage.label(slice_2d)
|
|
760
|
+
|
|
761
|
+
if not fill_borders:
|
|
762
|
+
border_threshold = 0 #Testing
|
|
723
763
|
|
|
724
764
|
if num_features == 0:
|
|
725
765
|
return np.zeros_like(slice_2d)
|
|
@@ -744,8 +784,10 @@ def fill_holes_3d(array):
|
|
|
744
784
|
|
|
745
785
|
# Create mask of components that either don't touch borders
|
|
746
786
|
# or touch less than the threshold proportion
|
|
787
|
+
|
|
747
788
|
background_labels = {label for label, prop in border_proportions.items()
|
|
748
789
|
if prop > border_threshold}
|
|
790
|
+
|
|
749
791
|
|
|
750
792
|
holes_mask = ~np.isin(labels, list(background_labels))
|
|
751
793
|
|
|
@@ -765,19 +807,19 @@ def fill_holes_3d(array):
|
|
|
765
807
|
|
|
766
808
|
# Process XY plane
|
|
767
809
|
for z in range(inv_array.shape[0]):
|
|
768
|
-
array_xy[z] = process_slice(inv_array[z])
|
|
810
|
+
array_xy[z] = process_slice(inv_array[z], fill_borders = fill_borders)
|
|
769
811
|
|
|
770
|
-
if array.shape[0] > 3: #only use these dimensions for sufficiently large zstacks
|
|
812
|
+
if (array.shape[0] > 3) and not head_on: #only use these dimensions for sufficiently large zstacks
|
|
771
813
|
|
|
772
814
|
# Process XZ plane
|
|
773
815
|
for y in range(inv_array.shape[1]):
|
|
774
816
|
slice_xz = inv_array[:, y, :]
|
|
775
|
-
array_xz[:, y, :] = process_slice(slice_xz)
|
|
817
|
+
array_xz[:, y, :] = process_slice(slice_xz, fill_borders = fill_borders)
|
|
776
818
|
|
|
777
819
|
# Process YZ plane
|
|
778
820
|
for x in range(inv_array.shape[2]):
|
|
779
821
|
slice_yz = inv_array[:, :, x]
|
|
780
|
-
array_yz[:, :, x] = process_slice(slice_yz)
|
|
822
|
+
array_yz[:, :, x] = process_slice(slice_yz, fill_borders = fill_borders)
|
|
781
823
|
|
|
782
824
|
# Combine results from all three planes
|
|
783
825
|
filled = (array_xy | array_xz | array_yz) * 255
|