nettracer3d 1.0.0__tar.gz → 1.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. {nettracer3d-1.0.0/src/nettracer3d.egg-info → nettracer3d-1.0.2}/PKG-INFO +4 -4
  2. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/README.md +3 -3
  3. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/pyproject.toml +1 -1
  4. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/community_extractor.py +24 -8
  5. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/neighborhoods.py +193 -66
  6. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/nettracer.py +71 -3
  7. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/nettracer_gui.py +666 -135
  8. {nettracer3d-1.0.0 → nettracer3d-1.0.2/src/nettracer3d.egg-info}/PKG-INFO +4 -4
  9. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/LICENSE +0 -0
  10. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/setup.cfg +0 -0
  11. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/__init__.py +0 -0
  12. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/cellpose_manager.py +0 -0
  13. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/excelotron.py +0 -0
  14. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/modularity.py +0 -0
  15. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/morphology.py +0 -0
  16. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/network_analysis.py +0 -0
  17. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/network_draw.py +0 -0
  18. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/node_draw.py +0 -0
  19. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/painting.py +0 -0
  20. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/proximity.py +0 -0
  21. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/run.py +0 -0
  22. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/segmenter.py +0 -0
  23. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/segmenter_GPU.py +0 -0
  24. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/simple_network.py +0 -0
  25. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d/smart_dilate.py +0 -0
  26. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d.egg-info/SOURCES.txt +0 -0
  27. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d.egg-info/dependency_links.txt +0 -0
  28. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d.egg-info/entry_points.txt +0 -0
  29. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d.egg-info/requires.txt +0 -0
  30. {nettracer3d-1.0.0 → nettracer3d-1.0.2}/src/nettracer3d.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nettracer3d
3
- Version: 1.0.0
3
+ Version: 1.0.2
4
4
  Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
5
5
  Author-email: Liam McLaughlin <liamm@wustl.edu>
6
6
  Project-URL: Documentation, https://nettracer3d.readthedocs.io/en/latest/
@@ -110,7 +110,7 @@ McLaughlin, L., Zhang, B., Sharma, S. et al. Three dimensional multiscalar neuro
110
110
 
111
111
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
112
112
 
113
- -- Version 1.0.0 Updates --
113
+ -- Version 1.0.2 Updates --
114
114
 
115
- * The 'network selection' table is now auto-populated when using the multiple-identity selector, and when using the node thresholder.
116
- * And other minor adjustments/bug fixes
115
+ * Minor fixes
116
+ * Added ability to generate violin plots using the table generated from merging node identities, showing the relative expression of markers for multiple channels for the nodes belonging to some channel or community/neighborhood
@@ -65,7 +65,7 @@ McLaughlin, L., Zhang, B., Sharma, S. et al. Three dimensional multiscalar neuro
65
65
 
66
66
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
67
67
 
68
- -- Version 1.0.0 Updates --
68
+ -- Version 1.0.2 Updates --
69
69
 
70
- * The 'network selection' table is now auto-populated when using the multiple-identity selector, and when using the node thresholder.
71
- * And other minor adjustments/bug fixes
70
+ * Minor fixes
71
+ * Added ability to generate violin plots using the table generated from merging node identities, showing the relative expression of markers for multiple channels for the nodes belonging to some channel or community/neighborhood
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "nettracer3d"
3
- version = "1.0.0"
3
+ version = "1.0.2"
4
4
  authors = [
5
5
  { name="Liam McLaughlin", email="liamm@wustl.edu" },
6
6
  ]
@@ -733,17 +733,33 @@ def assign_node_colors(node_list: List[int], labeled_array: np.ndarray) -> Tuple
733
733
  return rgba_array, node_to_color_names
734
734
 
735
735
  def assign_community_colors(community_dict: Dict[int, int], labeled_array: np.ndarray) -> Tuple[np.ndarray, Dict[int, str]]:
736
- """fast version using lookup table approach."""
736
+ """Fast version using lookup table approach with brown outliers for community 0."""
737
+
738
+ # Separate outliers (community 0) from regular communities
739
+ outliers = {node: comm for node, comm in community_dict.items() if comm == 0}
740
+ non_outlier_dict = {node: comm for node, comm in community_dict.items() if comm != 0}
737
741
 
738
- # Same setup as before
739
- communities = set(community_dict.values())
740
- community_sizes = Counter(community_dict.values())
741
- sorted_communities = sorted(communities, key=lambda x: community_sizes[x], reverse=True)
742
+ # Get communities excluding outliers
743
+ communities = set(non_outlier_dict.values()) if non_outlier_dict else set()
742
744
 
743
- colors = generate_distinct_colors(len(communities))
745
+ # Generate colors for non-outlier communities only
746
+ colors = generate_distinct_colors(len(communities)) if communities else []
744
747
  colors_rgba = np.array([(r, g, b, 255) for r, g, b in colors], dtype=np.uint8)
745
748
 
746
- community_to_color = {comm: colors_rgba[i] for i, comm in enumerate(sorted_communities)}
749
+ # Sort communities by size for consistent color assignment
750
+ if non_outlier_dict:
751
+ community_sizes = Counter(non_outlier_dict.values())
752
+ sorted_communities = sorted(communities, key=lambda x: (-community_sizes[x], x))
753
+ community_to_color = {comm: colors_rgba[i] for i, comm in enumerate(sorted_communities)}
754
+ else:
755
+ community_to_color = {}
756
+
757
+ # Add brown color for outliers (community 0)
758
+ brown_rgba = np.array([139, 69, 19, 255], dtype=np.uint8) # Brown color
759
+ if outliers:
760
+ community_to_color[0] = brown_rgba
761
+
762
+ # Create node to color mapping using original community_dict
747
763
  node_to_color = {node: community_to_color[comm] for node, comm in community_dict.items()}
748
764
 
749
765
  # Create lookup table - this is the key optimization
@@ -756,7 +772,7 @@ def assign_community_colors(community_dict: Dict[int, int], labeled_array: np.nd
756
772
  # Single vectorized operation - this is much faster!
757
773
  rgba_array = color_lut[labeled_array]
758
774
 
759
- # Rest remains the same
775
+ # Convert to RGB for color names (including brown for outliers)
760
776
  community_to_color_rgb = {k: tuple(v[:3]) for k, v in community_to_color.items()}
761
777
  node_to_color_names = convert_node_colors_to_names(community_to_color_rgb)
762
778
 
@@ -8,7 +8,8 @@ from matplotlib.colors import LinearSegmentedColormap
8
8
  from sklearn.cluster import DBSCAN
9
9
  from sklearn.neighbors import NearestNeighbors
10
10
  import matplotlib.colors as mcolors
11
-
11
+ from collections import Counter
12
+ from . import community_extractor
12
13
 
13
14
 
14
15
  import os
@@ -347,7 +348,8 @@ def visualize_cluster_composition_umap(cluster_data: Dict[int, np.ndarray],
347
348
  id_dictionary: Optional[Dict[int, str]] = None,
348
349
  graph_label = "Community ID",
349
350
  title = 'UMAP Visualization of Community Compositions',
350
- neighborhoods: Optional[Dict[int, int]] = None):
351
+ neighborhoods: Optional[Dict[int, int]] = None,
352
+ original_communities = None):
351
353
  """
352
354
  Convert cluster composition data to UMAP visualization.
353
355
 
@@ -394,37 +396,50 @@ def visualize_cluster_composition_umap(cluster_data: Dict[int, np.ndarray],
394
396
  embedding = reducer.fit_transform(compositions)
395
397
 
396
398
  # Determine coloring scheme based on parameters
397
- if neighborhoods is not None:
399
+ if neighborhoods is not None and original_communities is not None:
398
400
  # Use neighborhood coloring - import the community extractor methods
399
401
  from . import community_extractor
402
+ from collections import Counter
403
+
404
+ # Use original_communities (which is {node: neighborhood}) for color generation
405
+ # This ensures we use the proper node counts for sorting
400
406
 
401
- # Filter neighborhoods to only include cluster_ids that exist in our data
402
- filtered_neighborhoods = {node_id: neighborhood_id
403
- for node_id, neighborhood_id in neighborhoods.items()
404
- if node_id in cluster_ids}
407
+ # Separate outliers (neighborhood 0) from regular neighborhoods in ORIGINAL structure
408
+ outlier_neighborhoods = {node: neighborhood for node, neighborhood in original_communities.items() if neighborhood == 0}
409
+ non_outlier_neighborhoods = {node: neighborhood for node, neighborhood in original_communities.items() if neighborhood != 0}
405
410
 
406
- # Create a dummy labeled array just for the coloring function
407
- # We only need the coloring logic, not actual clustering
408
- dummy_array = np.array(cluster_ids)
411
+ # Get neighborhoods excluding outliers
412
+ unique_neighborhoods = set(non_outlier_neighborhoods.values()) if non_outlier_neighborhoods else set()
409
413
 
410
- # Get colors using the community coloration method
411
- _, neighborhood_color_names = community_extractor.assign_community_colors(
412
- filtered_neighborhoods, dummy_array
413
- )
414
+ # Generate colors for non-outlier neighborhoods only (same as assign_community_colors)
415
+ colors = community_extractor.generate_distinct_colors(len(unique_neighborhoods)) if unique_neighborhoods else []
416
+
417
+ # Sort neighborhoods by size for consistent color assignment (same logic as assign_community_colors)
418
+ # Use the ORIGINAL node counts from original_communities
419
+ if non_outlier_neighborhoods:
420
+ neighborhood_sizes = Counter(non_outlier_neighborhoods.values())
421
+ sorted_neighborhoods = sorted(unique_neighborhoods, key=lambda x: (-neighborhood_sizes[x], x))
422
+ neighborhood_to_color = {neighborhood: colors[i] for i, neighborhood in enumerate(sorted_neighborhoods)}
423
+ else:
424
+ neighborhood_to_color = {}
414
425
 
415
- # Create color mapping for our points
416
- unique_neighborhoods = sorted(list(set(filtered_neighborhoods.values())))
417
- colors = community_extractor.generate_distinct_colors(len(unique_neighborhoods))
418
- neighborhood_to_color = {neighborhood: colors[i] for i, neighborhood in enumerate(unique_neighborhoods)}
426
+ # Add brown color for outliers (neighborhood 0) - same as assign_community_colors
427
+ if outlier_neighborhoods:
428
+ neighborhood_to_color[0] = (139, 69, 19) # Brown color (RGB, not RGBA here)
419
429
 
420
- # Map each cluster to its neighborhood color
430
+ # Map each cluster to its neighborhood color using 'neighborhoods' ({community: neighborhood}) for assignment
421
431
  point_colors = []
422
432
  neighborhood_labels = []
423
433
  for cluster_id in cluster_ids:
424
- if cluster_id in filtered_neighborhoods:
425
- neighborhood_id = filtered_neighborhoods[cluster_id]
426
- point_colors.append(neighborhood_to_color[neighborhood_id])
427
- neighborhood_labels.append(neighborhood_id)
434
+ if cluster_id in neighborhoods:
435
+ neighborhood_id = neighborhoods[cluster_id] # This is {community: neighborhood}
436
+ if neighborhood_id in neighborhood_to_color:
437
+ point_colors.append(neighborhood_to_color[neighborhood_id])
438
+ neighborhood_labels.append(neighborhood_id)
439
+ else:
440
+ # Default color for neighborhoods not found
441
+ point_colors.append((128, 128, 128)) # Gray
442
+ neighborhood_labels.append("Unknown")
428
443
  else:
429
444
  # Default color for nodes not in any neighborhood
430
445
  point_colors.append((128, 128, 128)) # Gray
@@ -432,6 +447,10 @@ def visualize_cluster_composition_umap(cluster_data: Dict[int, np.ndarray],
432
447
 
433
448
  # Normalize RGB values for matplotlib (0-1 range)
434
449
  point_colors = [(r/255.0, g/255.0, b/255.0) for r, g, b in point_colors]
450
+
451
+ # Get unique neighborhoods for legend
452
+ unique_neighborhoods_for_legend = sorted(list(set(neighborhood_to_color.keys())))
453
+
435
454
  use_neighborhood_coloring = True
436
455
 
437
456
  elif id_dictionary is not None:
@@ -467,8 +486,8 @@ def visualize_cluster_composition_umap(cluster_data: Dict[int, np.ndarray],
467
486
  # Add cluster ID labels
468
487
  for i, cluster_id in enumerate(cluster_ids):
469
488
  display_label = f'{cluster_id}'
470
- if use_neighborhood_coloring and cluster_id in filtered_neighborhoods:
471
- neighborhood_id = filtered_neighborhoods[cluster_id]
489
+ if use_neighborhood_coloring and cluster_id in neighborhoods:
490
+ neighborhood_id = neighborhoods[cluster_id]
472
491
  display_label = f'{cluster_id}\n(N{neighborhood_id})'
473
492
  elif id_dictionary is not None:
474
493
  identity = id_dictionary.get(cluster_id, "Unknown")
@@ -483,7 +502,7 @@ def visualize_cluster_composition_umap(cluster_data: Dict[int, np.ndarray],
483
502
  if use_neighborhood_coloring:
484
503
  # Create custom legend for neighborhoods
485
504
  legend_elements = []
486
- for neighborhood_id in unique_neighborhoods:
505
+ for neighborhood_id in unique_neighborhoods_for_legend:
487
506
  color = neighborhood_to_color[neighborhood_id]
488
507
  norm_color = (color[0]/255.0, color[1]/255.0, color[2]/255.0)
489
508
  legend_elements.append(
@@ -530,8 +549,8 @@ def visualize_cluster_composition_umap(cluster_data: Dict[int, np.ndarray],
530
549
  # Add cluster ID labels
531
550
  for i, cluster_id in enumerate(cluster_ids):
532
551
  display_label = f'C{cluster_id}'
533
- if use_neighborhood_coloring and cluster_id in filtered_neighborhoods:
534
- neighborhood_id = filtered_neighborhoods[cluster_id]
552
+ if use_neighborhood_coloring and cluster_id in neighborhoods:
553
+ neighborhood_id = neighborhoods[cluster_id]
535
554
  display_label = f'C{cluster_id}\n(N{neighborhood_id})'
536
555
  elif id_dictionary is not None:
537
556
  identity = id_dictionary.get(cluster_id, "Unknown")
@@ -554,7 +573,7 @@ def visualize_cluster_composition_umap(cluster_data: Dict[int, np.ndarray],
554
573
  if use_neighborhood_coloring:
555
574
  # Create custom legend for neighborhoods
556
575
  legend_elements = []
557
- for neighborhood_id in unique_neighborhoods:
576
+ for neighborhood_id in unique_neighborhoods_for_legend:
558
577
  color = neighborhood_to_color[neighborhood_id]
559
578
  norm_color = (color[0]/255.0, color[1]/255.0, color[2]/255.0)
560
579
  legend_elements.append(
@@ -585,8 +604,8 @@ def visualize_cluster_composition_umap(cluster_data: Dict[int, np.ndarray],
585
604
  for i, cluster_id in enumerate(cluster_ids):
586
605
  composition = compositions[i]
587
606
  additional_info = ""
588
- if use_neighborhood_coloring and cluster_id in filtered_neighborhoods:
589
- neighborhood_id = filtered_neighborhoods[cluster_id]
607
+ if use_neighborhood_coloring and cluster_id in neighborhoods:
608
+ neighborhood_id = neighborhoods[cluster_id]
590
609
  additional_info = f" (Neighborhood: {neighborhood_id})"
591
610
  elif id_dictionary is not None:
592
611
  identity = id_dictionary.get(cluster_id, "Unknown")
@@ -974,60 +993,63 @@ def create_node_heatmap(node_intensity, node_centroids, shape=None, is_3d=True,
974
993
  node_to_intensity[node_id] = node_intensity_clean[node_id]
975
994
 
976
995
  # Create colormap function (RdBu_r - red for high, blue for low, yellow/white for middle)
977
- def intensity_to_rgb(intensity, min_val, max_val):
978
- """Convert intensity value to RGB using RdBu_r colormap logic, centered at 0"""
996
+ def intensity_to_rgba(intensity, min_val, max_val):
997
+ """Convert intensity value to RGBA using RdBu_r colormap logic, centered at 0"""
979
998
 
980
999
  # Handle edge case where all values are the same
981
1000
  if max_val == min_val:
982
1001
  if intensity == 0:
983
- return np.array([255, 255, 255], dtype=np.uint8) # White for 0
1002
+ return np.array([255, 255, 255, 0], dtype=np.uint8) # Transparent white for 0
984
1003
  elif intensity > 0:
985
- return np.array([255, 200, 200], dtype=np.uint8) # Light red for positive
1004
+ return np.array([255, 200, 200, 255], dtype=np.uint8) # Opaque light red for positive
986
1005
  else:
987
- return np.array([200, 200, 255], dtype=np.uint8) # Light blue for negative
1006
+ return np.array([200, 200, 255, 255], dtype=np.uint8) # Opaque light blue for negative
988
1007
 
989
1008
  # Find the maximum absolute value for symmetric scaling around 0
990
1009
  max_abs = max(abs(min_val), abs(max_val))
991
1010
 
992
- # If max_abs is 0, everything is 0, so return white
1011
+ # If max_abs is 0, everything is 0, so return transparent
993
1012
  if max_abs == 0:
994
- return np.array([255, 255, 255], dtype=np.uint8) # White
1013
+ return np.array([255, 255, 255, 0], dtype=np.uint8) # Transparent white
995
1014
 
996
1015
  # Normalize intensity to -1 to 1 range, centered at 0
997
1016
  normalized = intensity / max_abs
998
1017
  normalized = np.clip(normalized, -1, 1)
999
1018
 
1000
1019
  if normalized > 0:
1001
- # Positive values: white to red (intensity 0 = white, max positive = red)
1020
+ # Positive values: white to red (intensity 0 = transparent, max positive = red)
1002
1021
  r = 255
1003
1022
  g = int(255 * (1 - normalized))
1004
1023
  b = int(255 * (1 - normalized))
1024
+ alpha = 255 # Fully opaque for all non-zero values
1005
1025
  elif normalized < 0:
1006
- # Negative values: white to blue (intensity 0 = white, max negative = blue)
1026
+ # Negative values: white to blue (intensity 0 = transparent, max negative = blue)
1007
1027
  r = int(255 * (1 + normalized))
1008
1028
  g = int(255 * (1 + normalized))
1009
1029
  b = 255
1030
+ alpha = 255 # Fully opaque for all non-zero values
1010
1031
  else:
1011
- # Exactly 0: white
1012
- r, g, b = 255, 255, 255
1032
+ # Exactly 0: transparent
1033
+ r, g, b, alpha = 255, 255, 255, 0
1013
1034
 
1014
- return np.array([r, g, b], dtype=np.uint8)
1015
-
1016
- # Create lookup table for RGB colors
1035
+ return np.array([r, g, b, alpha], dtype=np.uint8)
1036
+
1037
+ # Modified usage in your main function:
1038
+ # Create lookup table for RGBA colors (note the 4 channels now)
1017
1039
  max_label = max(max(labeled_array.flat), max(node_to_intensity.keys()) if node_to_intensity else 0)
1018
- color_lut = np.zeros((max_label + 1, 3), dtype=np.uint8) # Default to black (0,0,0)
1019
-
1020
- # Fill lookup table with RGB colors based on intensity
1040
+ color_lut = np.zeros((max_label + 1, 4), dtype=np.uint8) # Default to transparent (0,0,0,0)
1041
+
1042
+ # Fill lookup table with RGBA colors based on intensity
1021
1043
  for node_id, intensity in node_to_intensity.items():
1022
- rgb_color = intensity_to_rgb(intensity, min_intensity, max_intensity)
1023
- color_lut[int(node_id)] = rgb_color
1024
-
1044
+ rgba_color = intensity_to_rgba(intensity, min_intensity, max_intensity)
1045
+ color_lut[int(node_id)] = rgba_color
1046
+
1025
1047
  # Apply lookup table to labeled array - single vectorized operation
1026
1048
  if is_3d:
1027
- # Return full 3D RGB array [Z, Y, X, 3]
1049
+ # Return full 3D RGBA array [Z, Y, X, 4]
1028
1050
  heatmap_array = color_lut[labeled_array]
1029
1051
  else:
1030
- # Return 2D RGB array
1052
+ # Return 2D RGBA array
1031
1053
  if labeled_array.ndim == 3:
1032
1054
  # Take middle slice for 2D representation
1033
1055
  middle_slice = labeled_array.shape[0] // 2
@@ -1035,7 +1057,7 @@ def create_node_heatmap(node_intensity, node_centroids, shape=None, is_3d=True,
1035
1057
  else:
1036
1058
  # Already 2D
1037
1059
  heatmap_array = color_lut[labeled_array]
1038
-
1060
+
1039
1061
  return heatmap_array
1040
1062
 
1041
1063
  else:
@@ -1104,19 +1126,124 @@ def create_node_heatmap(node_intensity, node_centroids, shape=None, is_3d=True,
1104
1126
  plt.tight_layout()
1105
1127
  plt.show()
1106
1128
 
1107
- # Example usage:
1108
- if __name__ == "__main__":
1109
- # Sample data for demonstration
1110
- sample_dict = {
1111
- 'category_A': np.array([0.1, 0.5, 0.8, 0.3, 0.9]),
1112
- 'category_B': np.array([0.7, 0.2, 0.6, 0.4, 0.1]),
1113
- 'category_C': np.array([0.9, 0.8, 0.2, 0.7, 0.5])
1114
- }
1129
+ def create_violin_plots(data_dict, graph_title="Violin Plots"):
1130
+ """
1131
+ Create violin plots from dictionary data with distinct colors.
1115
1132
 
1116
- sample_id_set = ['feature_1', 'feature_2', 'feature_3', 'feature_4', 'feature_5']
1133
+ Parameters:
1134
+ data_dict (dict): Dictionary where keys are column headers (strings) and
1135
+ values are lists of floats
1136
+ graph_title (str): Title for the overall plot
1137
+ """
1138
+ if not data_dict:
1139
+ print("No data to plot")
1140
+ return
1141
+
1142
+ # Prepare data
1143
+ labels = list(data_dict.keys())
1144
+ data_lists = list(data_dict.values())
1145
+
1146
+ # Generate colors using the community color strategy
1147
+ try:
1148
+ # Create a mock community dict for color generation
1149
+ mock_community_dict = {i: i+1 for i in range(len(labels))} # No outliers for simplicity
1150
+
1151
+ # Get distinct colors
1152
+ n_colors = len(labels)
1153
+ colors_rgb = community_extractor.generate_distinct_colors(n_colors)
1154
+
1155
+ # Sort by data size for consistent color assignment (like community sizes)
1156
+ data_sizes = [(i, len(data_lists[i])) for i in range(len(data_lists))]
1157
+ sorted_indices = sorted(data_sizes, key=lambda x: (-x[1], x[0]))
1158
+
1159
+ # Create color mapping
1160
+ colors = []
1161
+ for i, _ in sorted_indices:
1162
+ color_idx = sorted_indices.index((i, _))
1163
+ if color_idx < len(colors_rgb):
1164
+ # Convert RGB (0-255) to matplotlib format (0-1)
1165
+ rgb_normalized = tuple(c/255.0 for c in colors_rgb[color_idx])
1166
+ colors.append(rgb_normalized)
1167
+ else:
1168
+ colors.append('gray') # Fallback color
1169
+
1170
+ # Reorder colors to match original label order
1171
+ final_colors = ['gray'] * len(labels)
1172
+ for idx, (original_idx, _) in enumerate(sorted_indices):
1173
+ final_colors[original_idx] = colors[idx]
1174
+
1175
+ except Exception as e:
1176
+ print(f"Color generation failed, using default colors: {e}")
1177
+ # Fallback to default matplotlib colors
1178
+ final_colors = plt.cm.Set3(np.linspace(0, 1, len(labels)))
1117
1179
 
1118
- # Create the heatmap
1119
- fig, ax = plot_dict_heatmap(sample_dict, sample_id_set,
1120
- title="Sample Heatmap Visualization")
1180
+ # Create the plot
1181
+ fig, ax = plt.subplots(figsize=(max(8, len(labels) * 1.5), 6))
1182
+
1183
+ # Create violin plots
1184
+ violin_parts = ax.violinplot(data_lists, positions=range(len(labels)),
1185
+ showmeans=False, showmedians=True, showextrema=True)
1186
+
1187
+ # Color the violins
1188
+ for i, pc in enumerate(violin_parts['bodies']):
1189
+ if i < len(final_colors):
1190
+ pc.set_facecolor(final_colors[i])
1191
+ pc.set_alpha(0.7)
1192
+
1193
+ # Color the other violin elements
1194
+ for partname in ('cbars', 'cmins', 'cmaxes', 'cmedians'):
1195
+ if partname in violin_parts:
1196
+ violin_parts[partname].set_edgecolor('black')
1197
+ violin_parts[partname].set_linewidth(1)
1198
+
1199
+ # Add data points as scatter plot overlay with much lower transparency
1200
+ """
1201
+ for i, data in enumerate(data_lists):
1202
+ y = data
1203
+ # Add some jitter to x positions for better visibility
1204
+ x = np.random.normal(i, 0.04, size=len(y))
1205
+ ax.scatter(x, y, alpha=0.2, s=15, color='black', edgecolors='none', zorder=3) # No borders, more transparent
1206
+ """
1121
1207
 
1208
+ # Calculate reasonable y-axis limits to focus on the bulk of the data
1209
+ all_data = [val for sublist in data_lists for val in sublist]
1210
+ if all_data:
1211
+ # Use percentiles to exclude extreme outliers from the view
1212
+ y_min = np.percentile(all_data, 5) # 5th percentile
1213
+ y_max = np.percentile(all_data, 95) # 95th percentile
1214
+
1215
+ # Add some padding
1216
+ y_range = y_max - y_min
1217
+ y_padding = y_range * 0.15
1218
+ ax.set_ylim(y_min - y_padding, y_max + y_padding)
1219
+
1220
+ # Add IQR and median text annotations BELOW the violins
1221
+ for i, data in enumerate(data_lists):
1222
+ if len(data) > 0:
1223
+ q1, median, q3 = np.percentile(data, [25, 50, 75])
1224
+ iqr = q3 - q1
1225
+
1226
+ # Position text below the violin (using current y-axis limits)
1227
+ y_min_current = ax.get_ylim()[0]
1228
+ y_text = y_min_current - (ax.get_ylim()[1] - ax.get_ylim()[0]) * 0.15
1229
+
1230
+ ax.text(i, y_text, f'Median: {median:.2f}\nIQR: {iqr:.2f}',
1231
+ horizontalalignment='center', fontsize=8,
1232
+ bbox=dict(boxstyle='round,pad=0.3', facecolor='white', alpha=0.8))
1233
+
1234
+ # Customize the plot
1235
+ ax.set_xticks(range(len(labels)))
1236
+ ax.set_xticklabels(labels, rotation=45, ha='right')
1237
+ ax.set_title(graph_title, fontsize=14, fontweight='bold')
1238
+ ax.set_ylabel('Normalized Values (Z-score-like)', fontsize=12)
1239
+ ax.grid(True, alpha=0.3)
1240
+
1241
+ # Add a horizontal line at y=0 (the identity centerpoint)
1242
+ ax.axhline(y=0, color='red', linestyle='--', alpha=0.5, linewidth=1,
1243
+ label='Identity Centerpoint')
1244
+ ax.legend(loc='upper right')
1245
+
1246
+ # Adjust layout to prevent label cutoff and accommodate bottom text
1247
+ plt.subplots_adjust(bottom=0.2) # Extra space for bottom text
1248
+ plt.tight_layout()
1122
1249
  plt.show()
@@ -35,6 +35,7 @@ from . import proximity
35
35
  from skimage.segmentation import watershed as water
36
36
 
37
37
 
38
+
38
39
  #These next several methods relate to searching with 3D objects by dilating each one in a subarray around their neighborhood although I don't explicitly use this anywhere... can call them deprecated although I may want to use them later again so I have them still written out here.
39
40
 
40
41
 
@@ -992,6 +993,61 @@ def z_project(array3d, method='max'):
992
993
  raise ValueError("Method must be one of: 'max', 'mean', 'min', 'sum', 'std'")
993
994
 
994
995
  def fill_holes_3d(array, head_on = False, fill_borders = True):
996
+ def process_slice(slice_2d, border_threshold=0.08, fill_borders = True):
997
+ """
998
+ Process a 2D slice, considering components that touch less than border_threshold
999
+ of any border length as potential holes.
1000
+
1001
+ Args:
1002
+ slice_2d: 2D binary array
1003
+ border_threshold: proportion of border that must be touched to be considered background
1004
+ """
1005
+ from scipy.ndimage import binary_fill_holes
1006
+
1007
+ slice_2d = slice_2d.astype(np.uint8)
1008
+
1009
+ # Apply scipy's binary_fill_holes to the result
1010
+ slice_2d = binary_fill_holes(slice_2d)
1011
+
1012
+ return slice_2d
1013
+
1014
+ print("Filling Holes...")
1015
+
1016
+ array = binarize(array)
1017
+ #inv_array = invert_array(array)
1018
+
1019
+ # Create arrays for all three planes
1020
+ array_xy = np.zeros_like(array, dtype=np.uint8)
1021
+ array_xz = np.zeros_like(array, dtype=np.uint8)
1022
+ array_yz = np.zeros_like(array, dtype=np.uint8)
1023
+
1024
+ # Process XY plane
1025
+ for z in range(array.shape[0]):
1026
+ array_xy[z] = process_slice(array[z], fill_borders = fill_borders)
1027
+
1028
+ if (array.shape[0] > 3) and not head_on: #only use these dimensions for sufficiently large zstacks
1029
+
1030
+ # Process XZ plane
1031
+ for y in range(array.shape[1]):
1032
+ slice_xz = array[:, y, :]
1033
+ array_xz[:, y, :] = process_slice(slice_xz, fill_borders = fill_borders)
1034
+
1035
+ # Process YZ plane
1036
+ for x in range(array.shape[2]):
1037
+ slice_yz = array[:, :, x]
1038
+ array_yz[:, :, x] = process_slice(slice_yz, fill_borders = fill_borders)
1039
+
1040
+ # Combine results from all three planes
1041
+ filled = (array_xy | array_xz | array_yz) * 255
1042
+ return array + filled
1043
+ else:
1044
+ # Apply scipy's binary_fill_holes to each XY slice
1045
+ from scipy.ndimage import binary_fill_holes
1046
+ for z in range(array_xy.shape[0]):
1047
+ array_xy[z] = binary_fill_holes(array_xy[z])
1048
+ return array_xy * 255
1049
+
1050
+ def fill_holes_3d_old(array, head_on = False, fill_borders = True):
995
1051
 
996
1052
  def process_slice(slice_2d, border_threshold=0.08, fill_borders = True):
997
1053
  """
@@ -5659,7 +5715,8 @@ class Network_3D:
5659
5715
  neighbor_group[com] = neighbors[node]
5660
5716
  except:
5661
5717
  neighbor_group[com] = 0
5662
- neighborhoods.visualize_cluster_composition_umap(umap_dict, id_set, neighborhoods = neighbor_group)
5718
+ print(neighbors)
5719
+ neighborhoods.visualize_cluster_composition_umap(umap_dict, id_set, neighborhoods = neighbor_group, original_communities = neighbors)
5663
5720
  elif label == 1:
5664
5721
  neighborhoods.visualize_cluster_composition_umap(umap_dict, id_set, label = True)
5665
5722
  else:
@@ -6064,14 +6121,25 @@ class Network_3D:
6064
6121
 
6065
6122
  for node, iden in self.node_identities.items():
6066
6123
 
6067
- if iden == root:
6124
+ if iden == root: # Standard behavior
6068
6125
 
6069
6126
  root_set.append(node)
6070
6127
 
6071
- elif (iden == targ) or (targ == 'All Others (Excluding Self)'):
6128
+ elif '[' in iden and root != "All (Excluding Targets)": # For multiple nodes
6129
+ if root in iden:
6130
+ root_set.append(node)
6131
+
6132
+ elif (iden == targ) or (targ == 'All Others (Excluding Self)'): # The other group
6072
6133
 
6073
6134
  compare_set.append(node)
6074
6135
 
6136
+ elif '[' in iden: # The other group, for multiple nodes
6137
+ if targ in iden:
6138
+ compare_set.append(node)
6139
+
6140
+ elif root == "All (Excluding Targets)": # If not assigned to the other group but the comprehensive root option is used
6141
+ root_set.append(node)
6142
+
6075
6143
  if root == targ:
6076
6144
 
6077
6145
  compare_set = root_set