nettracer3d 1.0.1__tar.gz → 1.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nettracer3d might be problematic. Click here for more details.

Files changed (30) hide show
  1. {nettracer3d-1.0.1/src/nettracer3d.egg-info → nettracer3d-1.0.3}/PKG-INFO +3 -3
  2. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/README.md +2 -2
  3. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/pyproject.toml +1 -1
  4. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/neighborhoods.py +145 -36
  5. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/nettracer.py +30 -13
  6. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/nettracer_gui.py +465 -82
  7. {nettracer3d-1.0.1 → nettracer3d-1.0.3/src/nettracer3d.egg-info}/PKG-INFO +3 -3
  8. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/LICENSE +0 -0
  9. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/setup.cfg +0 -0
  10. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/__init__.py +0 -0
  11. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/cellpose_manager.py +0 -0
  12. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/community_extractor.py +0 -0
  13. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/excelotron.py +0 -0
  14. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/modularity.py +0 -0
  15. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/morphology.py +0 -0
  16. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/network_analysis.py +0 -0
  17. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/network_draw.py +0 -0
  18. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/node_draw.py +0 -0
  19. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/painting.py +0 -0
  20. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/proximity.py +0 -0
  21. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/run.py +0 -0
  22. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/segmenter.py +0 -0
  23. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/segmenter_GPU.py +0 -0
  24. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/simple_network.py +0 -0
  25. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d/smart_dilate.py +0 -0
  26. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d.egg-info/SOURCES.txt +0 -0
  27. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d.egg-info/dependency_links.txt +0 -0
  28. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d.egg-info/entry_points.txt +0 -0
  29. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d.egg-info/requires.txt +0 -0
  30. {nettracer3d-1.0.1 → nettracer3d-1.0.3}/src/nettracer3d.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nettracer3d
3
- Version: 1.0.1
3
+ Version: 1.0.3
4
4
  Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
5
5
  Author-email: Liam McLaughlin <liamm@wustl.edu>
6
6
  Project-URL: Documentation, https://nettracer3d.readthedocs.io/en/latest/
@@ -110,6 +110,6 @@ McLaughlin, L., Zhang, B., Sharma, S. et al. Three dimensional multiscalar neuro
110
110
 
111
111
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
112
112
 
113
- -- Version 1.0.1 Updates --
113
+ -- Version 1.0.3 Updates --
114
114
 
115
- * Bug fixes, mainly
115
+ * Some small bug fixes and adjustments
@@ -65,6 +65,6 @@ McLaughlin, L., Zhang, B., Sharma, S. et al. Three dimensional multiscalar neuro
65
65
 
66
66
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
67
67
 
68
- -- Version 1.0.1 Updates --
68
+ -- Version 1.0.3 Updates --
69
69
 
70
- * Bug fixes, mainly
70
+ * Some small bug fixes and adjustments
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "nettracer3d"
3
- version = "1.0.1"
3
+ version = "1.0.3"
4
4
  authors = [
5
5
  { name="Liam McLaughlin", email="liamm@wustl.edu" },
6
6
  ]
@@ -8,7 +8,8 @@ from matplotlib.colors import LinearSegmentedColormap
8
8
  from sklearn.cluster import DBSCAN
9
9
  from sklearn.neighbors import NearestNeighbors
10
10
  import matplotlib.colors as mcolors
11
-
11
+ from collections import Counter
12
+ from . import community_extractor
12
13
 
13
14
 
14
15
  import os
@@ -992,60 +993,63 @@ def create_node_heatmap(node_intensity, node_centroids, shape=None, is_3d=True,
992
993
  node_to_intensity[node_id] = node_intensity_clean[node_id]
993
994
 
994
995
  # Create colormap function (RdBu_r - red for high, blue for low, yellow/white for middle)
995
- def intensity_to_rgb(intensity, min_val, max_val):
996
- """Convert intensity value to RGB using RdBu_r colormap logic, centered at 0"""
996
+ def intensity_to_rgba(intensity, min_val, max_val):
997
+ """Convert intensity value to RGBA using RdBu_r colormap logic, centered at 0"""
997
998
 
998
999
  # Handle edge case where all values are the same
999
1000
  if max_val == min_val:
1000
1001
  if intensity == 0:
1001
- return np.array([255, 255, 255], dtype=np.uint8) # White for 0
1002
+ return np.array([255, 255, 255, 0], dtype=np.uint8) # Transparent white for 0
1002
1003
  elif intensity > 0:
1003
- return np.array([255, 200, 200], dtype=np.uint8) # Light red for positive
1004
+ return np.array([255, 200, 200, 255], dtype=np.uint8) # Opaque light red for positive
1004
1005
  else:
1005
- return np.array([200, 200, 255], dtype=np.uint8) # Light blue for negative
1006
+ return np.array([200, 200, 255, 255], dtype=np.uint8) # Opaque light blue for negative
1006
1007
 
1007
1008
  # Find the maximum absolute value for symmetric scaling around 0
1008
1009
  max_abs = max(abs(min_val), abs(max_val))
1009
1010
 
1010
- # If max_abs is 0, everything is 0, so return white
1011
+ # If max_abs is 0, everything is 0, so return transparent
1011
1012
  if max_abs == 0:
1012
- return np.array([255, 255, 255], dtype=np.uint8) # White
1013
+ return np.array([255, 255, 255, 0], dtype=np.uint8) # Transparent white
1013
1014
 
1014
1015
  # Normalize intensity to -1 to 1 range, centered at 0
1015
1016
  normalized = intensity / max_abs
1016
1017
  normalized = np.clip(normalized, -1, 1)
1017
1018
 
1018
1019
  if normalized > 0:
1019
- # Positive values: white to red (intensity 0 = white, max positive = red)
1020
+ # Positive values: white to red (intensity 0 = transparent, max positive = red)
1020
1021
  r = 255
1021
1022
  g = int(255 * (1 - normalized))
1022
1023
  b = int(255 * (1 - normalized))
1024
+ alpha = 255 # Fully opaque for all non-zero values
1023
1025
  elif normalized < 0:
1024
- # Negative values: white to blue (intensity 0 = white, max negative = blue)
1026
+ # Negative values: white to blue (intensity 0 = transparent, max negative = blue)
1025
1027
  r = int(255 * (1 + normalized))
1026
1028
  g = int(255 * (1 + normalized))
1027
1029
  b = 255
1030
+ alpha = 255 # Fully opaque for all non-zero values
1028
1031
  else:
1029
- # Exactly 0: white
1030
- r, g, b = 255, 255, 255
1032
+ # Exactly 0: transparent
1033
+ r, g, b, alpha = 255, 255, 255, 0
1031
1034
 
1032
- return np.array([r, g, b], dtype=np.uint8)
1033
-
1034
- # Create lookup table for RGB colors
1035
+ return np.array([r, g, b, alpha], dtype=np.uint8)
1036
+
1037
+ # Modified usage in your main function:
1038
+ # Create lookup table for RGBA colors (note the 4 channels now)
1035
1039
  max_label = max(max(labeled_array.flat), max(node_to_intensity.keys()) if node_to_intensity else 0)
1036
- color_lut = np.zeros((max_label + 1, 3), dtype=np.uint8) # Default to black (0,0,0)
1037
-
1038
- # Fill lookup table with RGB colors based on intensity
1040
+ color_lut = np.zeros((max_label + 1, 4), dtype=np.uint8) # Default to transparent (0,0,0,0)
1041
+
1042
+ # Fill lookup table with RGBA colors based on intensity
1039
1043
  for node_id, intensity in node_to_intensity.items():
1040
- rgb_color = intensity_to_rgb(intensity, min_intensity, max_intensity)
1041
- color_lut[int(node_id)] = rgb_color
1042
-
1044
+ rgba_color = intensity_to_rgba(intensity, min_intensity, max_intensity)
1045
+ color_lut[int(node_id)] = rgba_color
1046
+
1043
1047
  # Apply lookup table to labeled array - single vectorized operation
1044
1048
  if is_3d:
1045
- # Return full 3D RGB array [Z, Y, X, 3]
1049
+ # Return full 3D RGBA array [Z, Y, X, 4]
1046
1050
  heatmap_array = color_lut[labeled_array]
1047
1051
  else:
1048
- # Return 2D RGB array
1052
+ # Return 2D RGBA array
1049
1053
  if labeled_array.ndim == 3:
1050
1054
  # Take middle slice for 2D representation
1051
1055
  middle_slice = labeled_array.shape[0] // 2
@@ -1053,7 +1057,7 @@ def create_node_heatmap(node_intensity, node_centroids, shape=None, is_3d=True,
1053
1057
  else:
1054
1058
  # Already 2D
1055
1059
  heatmap_array = color_lut[labeled_array]
1056
-
1060
+
1057
1061
  return heatmap_array
1058
1062
 
1059
1063
  else:
@@ -1122,19 +1126,124 @@ def create_node_heatmap(node_intensity, node_centroids, shape=None, is_3d=True,
1122
1126
  plt.tight_layout()
1123
1127
  plt.show()
1124
1128
 
1125
- # Example usage:
1126
- if __name__ == "__main__":
1127
- # Sample data for demonstration
1128
- sample_dict = {
1129
- 'category_A': np.array([0.1, 0.5, 0.8, 0.3, 0.9]),
1130
- 'category_B': np.array([0.7, 0.2, 0.6, 0.4, 0.1]),
1131
- 'category_C': np.array([0.9, 0.8, 0.2, 0.7, 0.5])
1132
- }
1129
+ def create_violin_plots(data_dict, graph_title="Violin Plots"):
1130
+ """
1131
+ Create violin plots from dictionary data with distinct colors.
1133
1132
 
1134
- sample_id_set = ['feature_1', 'feature_2', 'feature_3', 'feature_4', 'feature_5']
1133
+ Parameters:
1134
+ data_dict (dict): Dictionary where keys are column headers (strings) and
1135
+ values are lists of floats
1136
+ graph_title (str): Title for the overall plot
1137
+ """
1138
+ if not data_dict:
1139
+ print("No data to plot")
1140
+ return
1135
1141
 
1136
- # Create the heatmap
1137
- fig, ax = plot_dict_heatmap(sample_dict, sample_id_set,
1138
- title="Sample Heatmap Visualization")
1142
+ # Prepare data
1143
+ labels = list(data_dict.keys())
1144
+ data_lists = list(data_dict.values())
1139
1145
 
1146
+ # Generate colors using the community color strategy
1147
+ try:
1148
+ # Create a mock community dict for color generation
1149
+ mock_community_dict = {i: i+1 for i in range(len(labels))} # No outliers for simplicity
1150
+
1151
+ # Get distinct colors
1152
+ n_colors = len(labels)
1153
+ colors_rgb = community_extractor.generate_distinct_colors(n_colors)
1154
+
1155
+ # Sort by data size for consistent color assignment (like community sizes)
1156
+ data_sizes = [(i, len(data_lists[i])) for i in range(len(data_lists))]
1157
+ sorted_indices = sorted(data_sizes, key=lambda x: (-x[1], x[0]))
1158
+
1159
+ # Create color mapping
1160
+ colors = []
1161
+ for i, _ in sorted_indices:
1162
+ color_idx = sorted_indices.index((i, _))
1163
+ if color_idx < len(colors_rgb):
1164
+ # Convert RGB (0-255) to matplotlib format (0-1)
1165
+ rgb_normalized = tuple(c/255.0 for c in colors_rgb[color_idx])
1166
+ colors.append(rgb_normalized)
1167
+ else:
1168
+ colors.append('gray') # Fallback color
1169
+
1170
+ # Reorder colors to match original label order
1171
+ final_colors = ['gray'] * len(labels)
1172
+ for idx, (original_idx, _) in enumerate(sorted_indices):
1173
+ final_colors[original_idx] = colors[idx]
1174
+
1175
+ except Exception as e:
1176
+ print(f"Color generation failed, using default colors: {e}")
1177
+ # Fallback to default matplotlib colors
1178
+ final_colors = plt.cm.Set3(np.linspace(0, 1, len(labels)))
1179
+
1180
+ # Create the plot
1181
+ fig, ax = plt.subplots(figsize=(max(8, len(labels) * 1.5), 6))
1182
+
1183
+ # Create violin plots
1184
+ violin_parts = ax.violinplot(data_lists, positions=range(len(labels)),
1185
+ showmeans=False, showmedians=True, showextrema=True)
1186
+
1187
+ # Color the violins
1188
+ for i, pc in enumerate(violin_parts['bodies']):
1189
+ if i < len(final_colors):
1190
+ pc.set_facecolor(final_colors[i])
1191
+ pc.set_alpha(0.7)
1192
+
1193
+ # Color the other violin elements
1194
+ for partname in ('cbars', 'cmins', 'cmaxes', 'cmedians'):
1195
+ if partname in violin_parts:
1196
+ violin_parts[partname].set_edgecolor('black')
1197
+ violin_parts[partname].set_linewidth(1)
1198
+
1199
+ # Add data points as scatter plot overlay with much lower transparency
1200
+ """
1201
+ for i, data in enumerate(data_lists):
1202
+ y = data
1203
+ # Add some jitter to x positions for better visibility
1204
+ x = np.random.normal(i, 0.04, size=len(y))
1205
+ ax.scatter(x, y, alpha=0.2, s=15, color='black', edgecolors='none', zorder=3) # No borders, more transparent
1206
+ """
1207
+
1208
+ # Calculate reasonable y-axis limits to focus on the bulk of the data
1209
+ all_data = [val for sublist in data_lists for val in sublist]
1210
+ if all_data:
1211
+ # Use percentiles to exclude extreme outliers from the view
1212
+ y_min = np.percentile(all_data, 5) # 5th percentile
1213
+ y_max = np.percentile(all_data, 95) # 95th percentile
1214
+
1215
+ # Add some padding
1216
+ y_range = y_max - y_min
1217
+ y_padding = y_range * 0.15
1218
+ ax.set_ylim(y_min - y_padding, y_max + y_padding)
1219
+
1220
+ # Add IQR and median text annotations BELOW the violins
1221
+ for i, data in enumerate(data_lists):
1222
+ if len(data) > 0:
1223
+ q1, median, q3 = np.percentile(data, [25, 50, 75])
1224
+ iqr = q3 - q1
1225
+
1226
+ # Position text below the violin (using current y-axis limits)
1227
+ y_min_current = ax.get_ylim()[0]
1228
+ y_text = y_min_current - (ax.get_ylim()[1] - ax.get_ylim()[0]) * 0.15
1229
+
1230
+ ax.text(i, y_text, f'Median: {median:.2f}\nIQR: {iqr:.2f}',
1231
+ horizontalalignment='center', fontsize=8,
1232
+ bbox=dict(boxstyle='round,pad=0.3', facecolor='white', alpha=0.8))
1233
+
1234
+ # Customize the plot
1235
+ ax.set_xticks(range(len(labels)))
1236
+ ax.set_xticklabels(labels, rotation=45, ha='right')
1237
+ ax.set_title(graph_title, fontsize=14, fontweight='bold')
1238
+ ax.set_ylabel('Normalized Values (Z-score-like)', fontsize=12)
1239
+ ax.grid(True, alpha=0.3)
1240
+
1241
+ # Add a horizontal line at y=0 (the identity centerpoint)
1242
+ ax.axhline(y=0, color='red', linestyle='--', alpha=0.5, linewidth=1,
1243
+ label='Identity Centerpoint')
1244
+ ax.legend(loc='upper right')
1245
+
1246
+ # Adjust layout to prevent label cutoff and accommodate bottom text
1247
+ plt.subplots_adjust(bottom=0.2) # Extra space for bottom text
1248
+ plt.tight_layout()
1140
1249
  plt.show()
@@ -35,6 +35,7 @@ from . import proximity
35
35
  from skimage.segmentation import watershed as water
36
36
 
37
37
 
38
+
38
39
  #These next several methods relate to searching with 3D objects by dilating each one in a subarray around their neighborhood although I don't explicitly use this anywhere... can call them deprecated although I may want to use them later again so I have them still written out here.
39
40
 
40
41
 
@@ -3932,7 +3933,7 @@ class Network_3D:
3932
3933
  """
3933
3934
  self._nodes, num_nodes = label_objects(nodes, structure_3d)
3934
3935
 
3935
- def combine_nodes(self, root_nodes, other_nodes, other_ID, identity_dict, root_ID = None, centroids = False):
3936
+ def combine_nodes(self, root_nodes, other_nodes, other_ID, identity_dict, root_ID = None, centroids = False, down_factor = None):
3936
3937
 
3937
3938
  """Internal method to merge two labelled node arrays into one"""
3938
3939
 
@@ -3943,7 +3944,10 @@ class Network_3D:
3943
3944
  max_val = np.max(root_nodes)
3944
3945
  other_nodes[:] = np.where(mask, other_nodes + max_val, 0)
3945
3946
  if centroids:
3946
- new_dict = network_analysis._find_centroids(other_nodes)
3947
+ new_dict = network_analysis._find_centroids(other_nodes, down_factor = down_factor)
3948
+ if down_factor is not None:
3949
+ for item in new_dict:
3950
+ new_dict[item] = down_factor * new_dict[item]
3947
3951
  self.node_centroids.update(new_dict)
3948
3952
 
3949
3953
  if root_ID is not None:
@@ -3983,7 +3987,7 @@ class Network_3D:
3983
3987
 
3984
3988
  return nodes, identity_dict
3985
3989
 
3986
- def merge_nodes(self, addn_nodes_name, label_nodes = True, root_id = "Root_Nodes", centroids = False):
3990
+ def merge_nodes(self, addn_nodes_name, label_nodes = True, root_id = "Root_Nodes", centroids = False, down_factor = None):
3987
3991
  """
3988
3992
  Merges the self._nodes attribute with alternate labelled node images. The alternate nodes can be inputted as a string for a filepath to a tif,
3989
3993
  or as a directory address containing only tif images, which will merge the _nodes attribute with all tifs in the folder. The _node_identities attribute
@@ -4004,19 +4008,21 @@ class Network_3D:
4004
4008
  identity_dict = {} #A dictionary to deliniate the node identities
4005
4009
 
4006
4010
  if centroids:
4007
- self.node_centroids = network_analysis._find_centroids(self._nodes)
4008
-
4011
+ self.node_centroids = network_analysis._find_centroids(self._nodes, down_factor = down_factor)
4012
+ if down_factor is not None:
4013
+ for item in self.node_centroids:
4014
+ self.node_centroids[item] = down_factor * self.node_centroids[item]
4009
4015
 
4010
4016
  try: #Try presumes the input is a tif
4011
4017
  addn_nodes = tifffile.imread(addn_nodes_name) #If not this will fail and activate the except block
4012
4018
 
4013
4019
  if label_nodes is True:
4014
4020
  addn_nodes, num_nodes2 = label_objects(addn_nodes) # Label the node objects. Note this presumes no overlap between node masks.
4015
- node_labels, identity_dict = self.combine_nodes(self._nodes, addn_nodes, addn_nodes_name, identity_dict, nodes_name, centroids = centroids) #This method stacks labelled arrays
4021
+ node_labels, identity_dict = self.combine_nodes(self._nodes, addn_nodes, addn_nodes_name, identity_dict, nodes_name, centroids = centroids, down_factor = down_factor) #This method stacks labelled arrays
4016
4022
  num_nodes = np.max(node_labels)
4017
4023
 
4018
4024
  else: #If nodes already labelled
4019
- node_labels, identity_dict = self.combine_nodes(self._nodes, addn_nodes, addn_nodes_name, identity_dict, nodes_name, centroids = centroids)
4025
+ node_labels, identity_dict = self.combine_nodes(self._nodes, addn_nodes, addn_nodes_name, identity_dict, nodes_name, centroids = centroids, down_factor = down_factor)
4020
4026
  num_nodes = int(np.max(node_labels))
4021
4027
 
4022
4028
  except: #Exception presumes the input is a directory containing multiple tifs, to allow multi-node stackage.
@@ -4034,15 +4040,15 @@ class Network_3D:
4034
4040
  if label_nodes is True:
4035
4041
  addn_nodes, num_nodes2 = label_objects(addn_nodes) # Label the node objects. Note this presumes no overlap between node masks.
4036
4042
  if i == 0:
4037
- node_labels, identity_dict = self.combine_nodes(self._nodes, addn_nodes, addn_nodes_ID, identity_dict, nodes_name, centroids = centroids)
4043
+ node_labels, identity_dict = self.combine_nodes(self._nodes, addn_nodes, addn_nodes_ID, identity_dict, nodes_name, centroids = centroids, down_factor = down_factor)
4038
4044
  else:
4039
- node_labels, identity_dict = self.combine_nodes(node_labels, addn_nodes, addn_nodes_ID, identity_dict, centroids = centroids)
4045
+ node_labels, identity_dict = self.combine_nodes(node_labels, addn_nodes, addn_nodes_ID, identity_dict, centroids = centroids, down_factor = down_factor)
4040
4046
 
4041
4047
  else:
4042
4048
  if i == 0:
4043
- node_labels, identity_dict = self.combine_nodes(self._nodes, addn_nodes, addn_nodes_ID, identity_dict, nodes_name, centroids = centroids)
4049
+ node_labels, identity_dict = self.combine_nodes(self._nodes, addn_nodes, addn_nodes_ID, identity_dict, nodes_name, centroids = centroids, down_factor = down_factor)
4044
4050
  else:
4045
- node_labels, identity_dict = self.combine_nodes(node_labels, addn_nodes, addn_nodes_ID, identity_dict, centroids = centroids)
4051
+ node_labels, identity_dict = self.combine_nodes(node_labels, addn_nodes, addn_nodes_ID, identity_dict, centroids = centroids, down_factor = down_factor)
4046
4052
  except Exception as e:
4047
4053
  print("Could not open additional nodes, verify they are being inputted correctly...")
4048
4054
 
@@ -6120,14 +6126,25 @@ class Network_3D:
6120
6126
 
6121
6127
  for node, iden in self.node_identities.items():
6122
6128
 
6123
- if iden == root:
6129
+ if iden == root: # Standard behavior
6124
6130
 
6125
6131
  root_set.append(node)
6126
6132
 
6127
- elif (iden == targ) or (targ == 'All Others (Excluding Self)'):
6133
+ elif '[' in iden and root != "All (Excluding Targets)": # For multiple nodes
6134
+ if root in iden:
6135
+ root_set.append(node)
6136
+
6137
+ elif (iden == targ) or (targ == 'All Others (Excluding Self)'): # The other group
6128
6138
 
6129
6139
  compare_set.append(node)
6130
6140
 
6141
+ elif '[' in iden: # The other group, for multiple nodes
6142
+ if targ in iden:
6143
+ compare_set.append(node)
6144
+
6145
+ elif root == "All (Excluding Targets)": # If not assigned to the other group but the comprehensive root option is used
6146
+ root_set.append(node)
6147
+
6131
6148
  if root == targ:
6132
6149
 
6133
6150
  compare_set = root_set
@@ -4,7 +4,7 @@ from PyQt6.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QG
4
4
  QHBoxLayout, QSlider, QMenuBar, QMenu, QDialog,
5
5
  QFormLayout, QLineEdit, QPushButton, QFileDialog,
6
6
  QLabel, QComboBox, QMessageBox, QTableView, QInputDialog,
7
- QMenu, QTabWidget, QGroupBox)
7
+ QMenu, QTabWidget, QGroupBox, QCheckBox)
8
8
  from PyQt6.QtCore import (QPoint, Qt, QAbstractTableModel, QTimer, QThread, pyqtSignal, QObject, QCoreApplication, QEvent, QEventLoop)
9
9
  import numpy as np
10
10
  import time
@@ -511,12 +511,8 @@ class ImageViewerWindow(QMainWindow):
511
511
  data = df.iloc[:, 0].tolist() # First column as list
512
512
  value = None
513
513
 
514
- self.format_for_upperright_table(
515
- data=data,
516
- metric=metric,
517
- value=value,
518
- title=title
519
- )
514
+ df = self.format_for_upperright_table(data=data, metric=metric, value=value, title=title)
515
+ return df
520
516
  else:
521
517
  # Multiple columns: create dictionary as before
522
518
  # First column header (for metric parameter)
@@ -542,12 +538,8 @@ class ImageViewerWindow(QMainWindow):
542
538
  value = value[0]
543
539
 
544
540
  # Call the parent method
545
- self.format_for_upperright_table(
546
- data=data_dict,
547
- metric=metric,
548
- value=value,
549
- title=title
550
- )
541
+ df = self.format_for_upperright_table(data=data_dict, metric=metric, value=value, title=title)
542
+ return df
551
543
 
552
544
  QMessageBox.information(
553
545
  self,
@@ -4592,6 +4584,8 @@ class ImageViewerWindow(QMainWindow):
4592
4584
  rad_action.triggered.connect(self.show_rad_dialog)
4593
4585
  inter_action = stats_menu.addAction("Calculate Node < > Edge Interaction")
4594
4586
  inter_action.triggered.connect(self.show_interaction_dialog)
4587
+ violin_action = stats_menu.addAction("Show Identity Violins/UMAP")
4588
+ violin_action.triggered.connect(self.show_violin_dialog)
4595
4589
  overlay_menu = analysis_menu.addMenu("Data/Overlays")
4596
4590
  degree_action = overlay_menu.addAction("Get Degree Information")
4597
4591
  degree_action.triggered.connect(self.show_degree_dialog)
@@ -5047,6 +5041,8 @@ class ImageViewerWindow(QMainWindow):
5047
5041
  for column in range(table.model().columnCount(None)):
5048
5042
  table.resizeColumnToContents(column)
5049
5043
 
5044
+ return df
5045
+
5050
5046
  except:
5051
5047
  pass
5052
5048
 
@@ -5475,7 +5471,6 @@ class ImageViewerWindow(QMainWindow):
5475
5471
 
5476
5472
  elif sort == 'Merge Nodes':
5477
5473
  try:
5478
-
5479
5474
  if my_network.nodes is None:
5480
5475
  QMessageBox.critical(
5481
5476
  self,
@@ -5483,72 +5478,118 @@ class ImageViewerWindow(QMainWindow):
5483
5478
  "Please load your first set of nodes into the 'Nodes' channel first"
5484
5479
  )
5485
5480
  return
5486
-
5487
5481
  if len(np.unique(my_network.nodes)) < 3:
5488
5482
  self.show_label_dialog()
5489
-
5490
- # First ask user what they want to select
5491
- msg = QMessageBox()
5492
- msg.setWindowTitle("Selection Type")
5493
- msg.setText("Would you like to select a TIFF file or a directory?")
5494
- tiff_button = msg.addButton("TIFF File", QMessageBox.ButtonRole.AcceptRole)
5495
- dir_button = msg.addButton("Directory", QMessageBox.ButtonRole.AcceptRole)
5496
- msg.addButton("Cancel", QMessageBox.ButtonRole.RejectRole)
5497
-
5498
- msg.exec()
5499
-
5500
- # Also if they want centroids:
5501
- msg2 = QMessageBox()
5502
- msg2.setWindowTitle("Selection Type")
5503
- msg2.setText("Would you like to compute node centroids for each image prior to merging?")
5504
- yes_button = msg2.addButton("Yes", QMessageBox.ButtonRole.AcceptRole)
5505
- no_button = msg2.addButton("No", QMessageBox.ButtonRole.AcceptRole)
5506
- msg2.addButton("Cancel", QMessageBox.ButtonRole.RejectRole)
5507
-
5508
- msg2.exec()
5509
-
5510
- if msg2.clickedButton() == yes_button:
5511
- centroids = True
5512
- else:
5513
- centroids = False
5514
-
5515
- if msg.clickedButton() == tiff_button:
5516
- # Code for selecting TIFF files
5517
- filename, _ = QFileDialog.getOpenFileName(
5518
- self,
5519
- "Select TIFF file",
5520
- "",
5521
- "TIFF files (*.tiff *.tif)"
5522
- )
5523
- if filename:
5524
- selected_path = filename
5525
-
5526
- elif msg.clickedButton() == dir_button:
5527
- # Code for selecting directories
5528
- dialog = QFileDialog(self)
5529
- dialog.setOption(QFileDialog.Option.DontUseNativeDialog)
5530
- dialog.setOption(QFileDialog.Option.ReadOnly)
5531
- dialog.setFileMode(QFileDialog.FileMode.Directory)
5532
- dialog.setViewMode(QFileDialog.ViewMode.Detail)
5533
-
5534
- if dialog.exec() == QFileDialog.DialogCode.Accepted:
5535
- selected_path = dialog.directory().absolutePath()
5536
-
5537
- my_network.merge_nodes(selected_path, root_id = self.node_name, centroids = centroids)
5538
- self.load_channel(0, my_network.nodes, True)
5539
-
5540
-
5541
- if hasattr(my_network, 'node_identities') and my_network.node_identities is not None:
5483
+
5484
+ # Create custom dialog
5485
+ dialog = QDialog(self)
5486
+ dialog.setWindowTitle("Merge Nodes Configuration")
5487
+ dialog.setModal(True)
5488
+ dialog.resize(400, 200)
5489
+
5490
+ layout = QVBoxLayout(dialog)
5491
+
5492
+ # Selection type
5493
+ type_layout = QHBoxLayout()
5494
+ type_label = QLabel("Selection Type:")
5495
+ type_combo = QComboBox()
5496
+ type_combo.addItems(["TIFF File", "Directory"])
5497
+ type_layout.addWidget(type_label)
5498
+ type_layout.addWidget(type_combo)
5499
+ layout.addLayout(type_layout)
5500
+
5501
+ # Centroids checkbox
5502
+ centroids_layout = QHBoxLayout()
5503
+ centroids_check = QCheckBox("Compute node centroids for each image prior to merging")
5504
+ centroids_layout.addWidget(centroids_check)
5505
+ layout.addLayout(centroids_layout)
5506
+
5507
+ # Down factor for centroid calculation
5508
+ down_factor_layout = QHBoxLayout()
5509
+ down_factor_label = QLabel("Down Factor (for centroid calculation downsampling):")
5510
+ down_factor_edit = QLineEdit()
5511
+ down_factor_edit.setText("1") # Default value
5512
+ down_factor_edit.setPlaceholderText("Enter down factor (e.g., 1, 2, 4)")
5513
+ down_factor_layout.addWidget(down_factor_label)
5514
+ down_factor_layout.addWidget(down_factor_edit)
5515
+ layout.addLayout(down_factor_layout)
5516
+
5517
+ # Buttons
5518
+ button_layout = QHBoxLayout()
5519
+ accept_button = QPushButton("Accept")
5520
+ cancel_button = QPushButton("Cancel")
5521
+ button_layout.addWidget(accept_button)
5522
+ button_layout.addWidget(cancel_button)
5523
+ layout.addLayout(button_layout)
5524
+
5525
+ # Connect buttons
5526
+ accept_button.clicked.connect(dialog.accept)
5527
+ cancel_button.clicked.connect(dialog.reject)
5528
+
5529
+ # Execute dialog
5530
+ if dialog.exec() == QDialog.DialogCode.Accepted:
5531
+ # Get values from dialog
5532
+ selection_type = type_combo.currentText()
5533
+ centroids = centroids_check.isChecked()
5534
+
5535
+ # Validate and get down_factor
5542
5536
  try:
5543
- self.format_for_upperright_table(my_network.node_identities, 'NodeID', 'Identity', 'Node Identities')
5544
- except Exception as e:
5545
- print(f"Error loading node identity table: {e}")
5546
- if centroids:
5547
- self.format_for_upperright_table(my_network.node_centroids, 'NodeID', ['Z', 'Y', 'X'], 'Node Centroids')
5548
-
5549
-
5537
+ down_factor = int(down_factor_edit.text())
5538
+ if down_factor <= 0:
5539
+ raise ValueError("Down factor must be positive")
5540
+ except ValueError as e:
5541
+ QMessageBox.critical(
5542
+ self,
5543
+ "Invalid Input",
5544
+ f"Invalid down factor: {str(e)}"
5545
+ )
5546
+ return
5547
+
5548
+ # Handle file/directory selection based on combo box choice
5549
+ if selection_type == "TIFF File":
5550
+ filename, _ = QFileDialog.getOpenFileName(
5551
+ self,
5552
+ "Select TIFF file",
5553
+ "",
5554
+ "TIFF files (*.tiff *.tif)"
5555
+ )
5556
+ if filename:
5557
+ selected_path = filename
5558
+ else:
5559
+ return # User cancelled file selection
5560
+ else: # Directory
5561
+ file_dialog = QFileDialog(self)
5562
+ file_dialog.setOption(QFileDialog.Option.DontUseNativeDialog)
5563
+ file_dialog.setOption(QFileDialog.Option.ReadOnly)
5564
+ file_dialog.setFileMode(QFileDialog.FileMode.Directory)
5565
+ file_dialog.setViewMode(QFileDialog.ViewMode.Detail)
5566
+ if file_dialog.exec() == QFileDialog.DialogCode.Accepted:
5567
+ selected_path = file_dialog.directory().absolutePath()
5568
+ else:
5569
+ return # User cancelled directory selection
5570
+
5571
+ if down_factor == 1:
5572
+ down_factor = None
5573
+ # Call merge_nodes with all parameters
5574
+ my_network.merge_nodes(
5575
+ selected_path,
5576
+ root_id=self.node_name,
5577
+ centroids=centroids,
5578
+ down_factor=down_factor
5579
+ )
5580
+
5581
+ self.load_channel(0, my_network.nodes, True)
5582
+
5583
+ if hasattr(my_network, 'node_identities') and my_network.node_identities is not None:
5584
+ try:
5585
+ self.format_for_upperright_table(my_network.node_identities, 'NodeID', 'Identity', 'Node Identities')
5586
+ except Exception as e:
5587
+ print(f"Error loading node identity table: {e}")
5588
+
5589
+ if centroids:
5590
+ self.format_for_upperright_table(my_network.node_centroids, 'NodeID', ['Z', 'Y', 'X'], 'Node Centroids')
5591
+
5550
5592
  except Exception as e:
5551
-
5552
5593
  QMessageBox.critical(
5553
5594
  self,
5554
5595
  "Error Merging",
@@ -6068,7 +6109,7 @@ class ImageViewerWindow(QMainWindow):
6068
6109
 
6069
6110
  if self.shape == self.channel_data[channel_index].shape:
6070
6111
  preserve_zoom = (self.ax.get_xlim(), self.ax.get_ylim())
6071
- self.shape = self.channel_data[channel_index].shape
6112
+ self.shape = (self.channel_data[channel_index].shape[0], self.channel_data[channel_index].shape[1], self.channel_data[channel_index].shape[2])
6072
6113
  if self.shape[1] * self.shape[2] > 3000 * 3000 * self.downsample_factor:
6073
6114
  self.throttle = True
6074
6115
  else:
@@ -6826,6 +6867,10 @@ class ImageViewerWindow(QMainWindow):
6826
6867
  dialog = InteractionDialog(self)
6827
6868
  dialog.exec()
6828
6869
 
6870
+ def show_violin_dialog(self):
6871
+ dialog = ViolinDialog(self)
6872
+ dialog.show()
6873
+
6829
6874
  def show_degree_dialog(self):
6830
6875
  dialog = DegreeDialog(self)
6831
6876
  dialog.exec()
@@ -8356,7 +8401,7 @@ class MergeNodeIdDialog(QDialog):
8356
8401
  result = {key: np.array([d[key] for d in id_dicts]) for key in all_keys}
8357
8402
 
8358
8403
 
8359
- self.parent().format_for_upperright_table(result, 'NodeID', good_list, 'Mean Intensity')
8404
+ self.parent().format_for_upperright_table(result, 'NodeID', good_list, 'Mean Intensity (Save this Table for "Analyze -> Stats -> Show Violins")')
8360
8405
  if umap:
8361
8406
  my_network.identity_umap(result)
8362
8407
 
@@ -8364,7 +8409,7 @@ class MergeNodeIdDialog(QDialog):
8364
8409
  QMessageBox.information(
8365
8410
  self,
8366
8411
  "Success",
8367
- "Node Identities Merged. New IDs represent presence of corresponding img foreground with +, absence with -. Please save your new identities as csv, then use File -> Load -> Load From Excel Helper to bulk search and rename desired combinations. (Press Help [above] for more info)"
8412
+ "Node Identities Merged. New IDs represent presence of corresponding img foreground with +, absence with -. If desired, please save your new identities as csv, then use File -> Load -> Load From Excel Helper to bulk search and rename desired combinations. If desired, please save the outputted mean intensity table to use with 'Analyze -> Stats -> Show Violins'. (Press Help [above] for more info)"
8368
8413
  )
8369
8414
 
8370
8415
  self.accept()
@@ -9143,12 +9188,16 @@ class NearNeighDialog(QDialog):
9143
9188
  if my_network.node_identities is not None:
9144
9189
 
9145
9190
  self.root = QComboBox()
9146
- self.root.addItems(list(set(my_network.node_identities.values())))
9191
+ roots = list(set(my_network.node_identities.values()))
9192
+ roots.sort()
9193
+ roots.append("All (Excluding Targets)")
9194
+ self.root.addItems(roots)
9147
9195
  self.root.setCurrentIndex(0)
9148
9196
  identities_layout.addRow("Root Identity to Search for Neighbor's IDs?", self.root)
9149
9197
 
9150
9198
  self.targ = QComboBox()
9151
9199
  neighs = list(set(my_network.node_identities.values()))
9200
+ neighs.sort()
9152
9201
  neighs.append("All Others (Excluding Self)")
9153
9202
  self.targ.addItems(neighs)
9154
9203
  self.targ.setCurrentIndex(0)
@@ -9273,6 +9322,10 @@ class NearNeighDialog(QDialog):
9273
9322
  except:
9274
9323
  targ = None
9275
9324
 
9325
+ if root == "All (Excluding Targets)" and targ == 'All Others (Excluding Self)':
9326
+ root = None
9327
+ targ = None
9328
+
9276
9329
  heatmap = self.map.isChecked()
9277
9330
  threed = self.threed.isChecked()
9278
9331
  numpy = self.numpy.isChecked()
@@ -9892,6 +9945,294 @@ class InteractionDialog(QDialog):
9892
9945
  print(f"Error finding interactions: {e}")
9893
9946
 
9894
9947
 
9948
+ class ViolinDialog(QDialog):
9949
+
9950
+ def __init__(self, parent=None):
9951
+
9952
+ super().__init__(parent)
9953
+
9954
+ QMessageBox.critical(
9955
+ self,
9956
+ "Notice",
9957
+ "Please select spreadsheet (Should be table output of 'File -> Images -> Node Identities -> Assign Node Identities from Overlap with Other Images'. Make sure to save that table as .csv/.xlsx and then load it here to use this.)"
9958
+ )
9959
+
9960
+ try:
9961
+ try:
9962
+ self.df = self.parent().load_file()
9963
+ except:
9964
+ return
9965
+
9966
+ self.backup_df = copy.deepcopy(self.df)
9967
+ # Get all identity lists and normalize the dataframe
9968
+ identity_lists = self.get_all_identity_lists()
9969
+ self.df = self.normalize_df_with_identity_centerpoints(self.df, identity_lists)
9970
+
9971
+ self.setWindowTitle("Violin Parameters")
9972
+ self.setModal(False)
9973
+
9974
+ layout = QFormLayout(self)
9975
+
9976
+ if my_network.node_identities is not None:
9977
+
9978
+ self.idens = QComboBox()
9979
+ all_idens = list(set(my_network.node_identities.values()))
9980
+ idens = []
9981
+ for iden in all_idens:
9982
+ if '[' not in iden:
9983
+ idens.append(iden)
9984
+ idens.sort()
9985
+ idens.insert(0, "None")
9986
+ self.idens.addItems(idens)
9987
+ self.idens.setCurrentIndex(0)
9988
+ layout.addRow("Return Identity Violin Plots?", self.idens)
9989
+
9990
+ if my_network.communities is not None:
9991
+ self.coms = QComboBox()
9992
+ coms = list(set(my_network.communities.values()))
9993
+ coms.sort()
9994
+ coms.insert(0, "None")
9995
+ coms = [str(x) for x in coms]
9996
+ self.coms.addItems(coms)
9997
+ self.coms.setCurrentIndex(0)
9998
+ layout.addRow("Return Neighborhood/Community Violin Plots?", self.coms)
9999
+
10000
+ # Add Run button
10001
+ run_button = QPushButton("Show Z-score-like Violin")
10002
+ run_button.clicked.connect(self.run)
10003
+ layout.addWidget(run_button)
10004
+
10005
+ run_button2 = QPushButton("Show Z-score UMAP")
10006
+ run_button2.clicked.connect(self.run2)
10007
+ layout.addWidget(run_button2)
10008
+ except:
10009
+ QTimer.singleShot(0, self.close)
10010
+
10011
+ def get_all_identity_lists(self):
10012
+ """
10013
+ Get all identity lists for normalization purposes.
10014
+
10015
+ Returns:
10016
+ dict: Dictionary where keys are identity names and values are lists of node IDs
10017
+ """
10018
+ identity_lists = {}
10019
+
10020
+ # Get all unique identities
10021
+ all_identities = set()
10022
+ import ast
10023
+ for item in my_network.node_identities:
10024
+ try:
10025
+ parse = ast.literal_eval(my_network.node_identities[item])
10026
+ if isinstance(parse, (list, tuple, set)):
10027
+ all_identities.update(parse)
10028
+ else:
10029
+ all_identities.add(str(parse))
10030
+ except:
10031
+ all_identities.add(str(my_network.node_identities[item]))
10032
+
10033
+ # For each identity, get the list of nodes that have it
10034
+ for identity in all_identities:
10035
+ iden_list = []
10036
+ for item in my_network.node_identities:
10037
+ try:
10038
+ parse = ast.literal_eval(my_network.node_identities[item])
10039
+ if identity in parse:
10040
+ iden_list.append(item)
10041
+ except:
10042
+ if identity == str(my_network.node_identities[item]):
10043
+ iden_list.append(item)
10044
+
10045
+ if iden_list: # Only add if we found nodes
10046
+ identity_lists[identity] = iden_list
10047
+
10048
+ return identity_lists
10049
+
10050
+ def normalize_df_with_identity_centerpoints(self, df, identity_lists):
10051
+ """
10052
+ Normalize the entire dataframe using identity-specific centerpoints.
10053
+ Uses Z-score-like normalization with identity centerpoint as the "mean".
10054
+
10055
+ Parameters:
10056
+ df (pd.DataFrame): Original dataframe
10057
+ identity_lists (dict): Dictionary where keys are identity names and values are lists of node IDs
10058
+
10059
+ Returns:
10060
+ pd.DataFrame: Normalized dataframe
10061
+ """
10062
+ # Make a copy to avoid modifying the original dataframe
10063
+ df_copy = df.copy()
10064
+
10065
+ # Set the first column as the index (row headers)
10066
+ df_copy = df_copy.set_index(df_copy.columns[0])
10067
+
10068
+ # Convert all remaining columns to float type (batch conversion)
10069
+ df_copy = df_copy.astype(float)
10070
+
10071
+ # First, calculate the centerpoint for each column by finding the median across all identity groups
10072
+ column_centerpoints = {}
10073
+
10074
+ for column in df_copy.columns:
10075
+ centerpoint = None
10076
+
10077
+ for identity, node_list in identity_lists.items():
10078
+ # Get nodes that exist in both the identity list and the dataframe
10079
+ valid_nodes = [node for node in node_list if node in df_copy.index]
10080
+ if valid_nodes and ((str(identity) == str(column)) or str(identity) == f'{str(column)}+'):
10081
+ # Get the median value for this identity in this column
10082
+ identity_min = df_copy.loc[valid_nodes, column].median()
10083
+ centerpoint = identity_min
10084
+ break # Found the match, no need to continue
10085
+
10086
+ if centerpoint is not None:
10087
+ # Use the identity-specific centerpoint
10088
+ column_centerpoints[column] = centerpoint
10089
+ else:
10090
+ # Fallback: if no matching identity, use column median
10091
+ column_centerpoints[column] = df_copy[column].median()
10092
+
10093
+ # Now normalize each column using Z-score-like calculation with identity centerpoint
10094
+ df_normalized = df_copy.copy()
10095
+ for column in df_copy.columns:
10096
+ centerpoint = column_centerpoints[column]
10097
+ # Calculate standard deviation of the column
10098
+ std_dev = df_copy[column].std()
10099
+
10100
+ if std_dev > 0: # Avoid division by zero
10101
+ # Z-score-like: (value - centerpoint) / std_dev
10102
+ df_normalized[column] = (df_copy[column] - centerpoint) / std_dev
10103
+ else:
10104
+ # If std_dev is 0, just subtract centerpoint
10105
+ df_normalized[column] = df_copy[column] - centerpoint
10106
+
10107
+ # Convert back to original format with first column as regular column
10108
+ df_normalized = df_normalized.reset_index()
10109
+
10110
+ return df_normalized
10111
+
10112
+ def show_in_table(self, df, metric, title):
10113
+
10114
+ # Create new table
10115
+ table = CustomTableView(self.parent())
10116
+ table.setModel(PandasModel(df))
10117
+
10118
+ try:
10119
+ first_column_name = table.model()._data.columns[0]
10120
+ table.sort_table(first_column_name, ascending=True)
10121
+ except:
10122
+ pass
10123
+
10124
+ # Add to tabbed widget
10125
+ if title is None:
10126
+ self.parent().tabbed_data.add_table(f"{metric} Analysis", table)
10127
+ else:
10128
+ self.parent().tabbed_data.add_table(f"{title}", table)
10129
+
10130
+
10131
+
10132
+ # Adjust column widths to content
10133
+ for column in range(table.model().columnCount(None)):
10134
+ table.resizeColumnToContents(column)
10135
+
10136
+ def run(self):
10137
+
10138
+ def df_to_dict_by_rows(df, row_indices, title):
10139
+ """
10140
+ Convert a pandas DataFrame to a dictionary by selecting specific rows.
10141
+ No normalization - dataframe is already normalized.
10142
+
10143
+ Parameters:
10144
+ df (pd.DataFrame): DataFrame with first column as row headers, remaining columns contain floats
10145
+ row_indices (list): List of values from the first column representing rows to include
10146
+
10147
+ Returns:
10148
+ dict: Dictionary where keys are column headers and values are lists of column values (as floats)
10149
+ for the specified rows
10150
+ """
10151
+ # Make a copy to avoid modifying the original dataframe
10152
+ df_copy = df.copy()
10153
+
10154
+ # Set the first column as the index (row headers)
10155
+ df_copy = df_copy.set_index(df_copy.columns[0])
10156
+
10157
+ # Mask the dataframe to include only the specified rows
10158
+ masked_df = df_copy.loc[row_indices]
10159
+
10160
+ # Create empty dictionary
10161
+ result_dict = {}
10162
+
10163
+ # For each column, add the column header as key and column values as list
10164
+ for column in masked_df.columns:
10165
+ result_dict[column] = masked_df[column].tolist()
10166
+
10167
+ masked_df.insert(0, "NodeIDs", row_indices)
10168
+ self.show_in_table(masked_df, metric = "NodeID", title = title)
10169
+
10170
+
10171
+ return result_dict
10172
+
10173
+ from . import neighborhoods
10174
+
10175
+ if self.idens.currentIndex() != 0:
10176
+
10177
+ iden = self.idens.currentText()
10178
+ iden_list = []
10179
+ import ast
10180
+
10181
+ for item in my_network.node_identities:
10182
+
10183
+ try:
10184
+ parse = ast.literal_eval(my_network.node_identities[item])
10185
+ if iden in parse:
10186
+ iden_list.append(item)
10187
+ except:
10188
+ if (iden == my_network.node_identities[item]):
10189
+ iden_list.append(item)
10190
+
10191
+ violin_dict = df_to_dict_by_rows(self.df, iden_list, f"Z-Score-like Channel Intensities of Identity {iden}, {len(iden_list)} Nodes")
10192
+
10193
+ neighborhoods.create_violin_plots(violin_dict, graph_title=f"Z-Score-like Channel Intensities of Identity {iden}, {len(iden_list)} Nodes")
10194
+
10195
+
10196
+ if self.coms.currentIndex() != 0:
10197
+
10198
+ com = self.coms.currentText()
10199
+
10200
+ com_dict = n3d.invert_dict(my_network.communities)
10201
+
10202
+ com_list = com_dict[int(com)]
10203
+
10204
+ violin_dict = df_to_dict_by_rows(self.df, com_list, f"Z-Score-like Channel Intensities of Community/Neighborhood {com}, {len(com_list)} Nodes")
10205
+
10206
+ neighborhoods.create_violin_plots(violin_dict, graph_title=f"Z-Score-like Channel Intensities of Community/Neighborhood {com}, {len(com_list)} Nodes")
10207
+
10208
+
10209
+ def run2(self):
10210
+ def df_to_dict(df):
10211
+ # Make a copy to avoid modifying the original dataframe
10212
+ df_copy = df.copy()
10213
+
10214
+ # Set the first column as the index (row headers)
10215
+ df_copy = df_copy.set_index(df_copy.columns[0])
10216
+
10217
+ # Convert all remaining columns to float type (batch conversion)
10218
+ df_copy = df_copy.astype(float)
10219
+
10220
+ # Create the result dictionary
10221
+ result_dict = {}
10222
+ for row_idx in df_copy.index:
10223
+ result_dict[row_idx] = df_copy.loc[row_idx].tolist()
10224
+
10225
+ return result_dict
10226
+
10227
+ try:
10228
+ umap_dict = df_to_dict(self.backup_df)
10229
+ my_network.identity_umap(umap_dict)
10230
+ except:
10231
+ pass
10232
+
10233
+
10234
+
10235
+
9895
10236
  class DegreeDialog(QDialog):
9896
10237
 
9897
10238
 
@@ -12931,16 +13272,58 @@ class GrayWaterDialog(QDialog):
12931
13272
  run_button.clicked.connect(self.run_watershed)
12932
13273
  layout.addRow(run_button)
12933
13274
 
13275
+ def wait_for_threshold_processing(self):
13276
+ """
13277
+ Opens ThresholdWindow and waits for user to process the image.
13278
+ Returns True if completed, False if cancelled.
13279
+ The thresholded image will be available in the main window after completion.
13280
+ """
13281
+ # Create event loop to wait for user
13282
+ loop = QEventLoop()
13283
+ result = {'completed': False}
13284
+
13285
+ # Create the threshold window
13286
+ thresh_window = ThresholdWindow(self.parent(), 0)
13287
+
13288
+
13289
+ # Connect signals
13290
+ def on_processing_complete():
13291
+ result['completed'] = True
13292
+ loop.quit()
13293
+
13294
+ def on_processing_cancelled():
13295
+ result['completed'] = False
13296
+ loop.quit()
13297
+
13298
+ thresh_window.processing_complete.connect(on_processing_complete)
13299
+ thresh_window.processing_cancelled.connect(on_processing_cancelled)
13300
+
13301
+ # Show window and wait
13302
+ thresh_window.show()
13303
+ thresh_window.raise_()
13304
+ thresh_window.activateWindow()
13305
+
13306
+ # Block until user clicks "Apply Threshold & Continue" or "Cancel"
13307
+ loop.exec()
13308
+
13309
+ # Clean up
13310
+ thresh_window.deleteLater()
13311
+
13312
+ return result['completed']
13313
+
12934
13314
  def run_watershed(self):
12935
13315
 
12936
13316
  try:
12937
13317
 
13318
+ self.accept()
13319
+ print("Please threshold foreground, or press cancel/skip if not desired:")
13320
+ self.wait_for_threshold_processing()
13321
+ data = self.parent().channel_data[self.parent().active_channel]
13322
+
12938
13323
  min_intensity = float(self.min_intensity.text()) if self.min_intensity.text().strip() else None
12939
13324
 
12940
13325
  min_peak_distance = int(self.min_peak_distance.text()) if self.min_peak_distance.text().strip() else 1
12941
13326
 
12942
- data = self.parent().channel_data[self.parent().active_channel]
12943
-
12944
13327
  data = n3d.gray_watershed(data, min_peak_distance, min_intensity)
12945
13328
 
12946
13329
  self.parent().load_channel(self.parent().active_channel, data, data = True, preserve_zoom = (self.parent().ax.get_xlim(), self.parent().ax.get_ylim()))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nettracer3d
3
- Version: 1.0.1
3
+ Version: 1.0.3
4
4
  Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
5
5
  Author-email: Liam McLaughlin <liamm@wustl.edu>
6
6
  Project-URL: Documentation, https://nettracer3d.readthedocs.io/en/latest/
@@ -110,6 +110,6 @@ McLaughlin, L., Zhang, B., Sharma, S. et al. Three dimensional multiscalar neuro
110
110
 
111
111
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
112
112
 
113
- -- Version 1.0.1 Updates --
113
+ -- Version 1.0.3 Updates --
114
114
 
115
- * Bug fixes, mainly
115
+ * Some small bug fixes and adjustments
File without changes
File without changes