nettracer3d 1.1.3__tar.gz → 1.1.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nettracer3d might be problematic. Click here for more details.

Files changed (31) hide show
  1. {nettracer3d-1.1.3/src/nettracer3d.egg-info → nettracer3d-1.1.6}/PKG-INFO +3 -3
  2. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/README.md +2 -2
  3. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/pyproject.toml +1 -1
  4. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/neighborhoods.py +89 -66
  5. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/nettracer.py +115 -16
  6. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/nettracer_gui.py +316 -124
  7. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/network_draw.py +9 -3
  8. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/node_draw.py +5 -1
  9. {nettracer3d-1.1.3 → nettracer3d-1.1.6/src/nettracer3d.egg-info}/PKG-INFO +3 -3
  10. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/LICENSE +0 -0
  11. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/setup.cfg +0 -0
  12. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/__init__.py +0 -0
  13. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/cellpose_manager.py +0 -0
  14. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/community_extractor.py +0 -0
  15. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/excelotron.py +0 -0
  16. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/modularity.py +0 -0
  17. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/morphology.py +0 -0
  18. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/network_analysis.py +0 -0
  19. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/painting.py +0 -0
  20. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/proximity.py +0 -0
  21. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/run.py +0 -0
  22. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/segmenter.py +0 -0
  23. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/segmenter_GPU.py +0 -0
  24. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/simple_network.py +0 -0
  25. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/smart_dilate.py +0 -0
  26. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d/stats.py +0 -0
  27. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d.egg-info/SOURCES.txt +0 -0
  28. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d.egg-info/dependency_links.txt +0 -0
  29. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d.egg-info/entry_points.txt +0 -0
  30. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d.egg-info/requires.txt +0 -0
  31. {nettracer3d-1.1.3 → nettracer3d-1.1.6}/src/nettracer3d.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nettracer3d
3
- Version: 1.1.3
3
+ Version: 1.1.6
4
4
  Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
5
5
  Author-email: Liam McLaughlin <liamm@wustl.edu>
6
6
  Project-URL: Documentation, https://nettracer3d.readthedocs.io/en/latest/
@@ -110,6 +110,6 @@ McLaughlin, L., Zhang, B., Sharma, S. et al. Three dimensional multiscalar neuro
110
110
 
111
111
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
112
112
 
113
- -- Version 1.1.3 Updates --
113
+ -- Version 1.1.6 Updates --
114
114
 
115
- * Some minor text adjustments
115
+ * Some adjustments
@@ -65,6 +65,6 @@ McLaughlin, L., Zhang, B., Sharma, S. et al. Three dimensional multiscalar neuro
65
65
 
66
66
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
67
67
 
68
- -- Version 1.1.3 Updates --
68
+ -- Version 1.1.6 Updates --
69
69
 
70
- * Some minor text adjustments
70
+ * Some adjustments
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "nettracer3d"
3
- version = "1.1.3"
3
+ version = "1.1.6"
4
4
  authors = [
5
5
  { name="Liam McLaughlin", email="liamm@wustl.edu" },
6
6
  ]
@@ -793,7 +793,7 @@ def create_community_heatmap(community_intensity, node_community, node_centroids
793
793
  return np.array([r, g, b], dtype=np.uint8)
794
794
 
795
795
  # Create lookup table for RGB colors
796
- max_label = int(max(max(labeled_array.flat), max(node_to_community_intensity.keys()) if node_to_community_intensity else 0))
796
+ max_label = max(max(labeled_array.flat), max(node_to_community_intensity.keys()) if node_to_community_intensity else 0)
797
797
  color_lut = np.zeros((max_label + 1, 3), dtype=np.uint8) # Default to black (0,0,0)
798
798
 
799
799
  # Fill lookup table with RGB colors based on community intensity
@@ -1036,8 +1036,8 @@ def create_node_heatmap(node_intensity, node_centroids, shape=None, is_3d=True,
1036
1036
 
1037
1037
  # Modified usage in your main function:
1038
1038
  # Create lookup table for RGBA colors (note the 4 channels now)
1039
- max_label = int(max(max(labeled_array.flat), max(node_to_intensity.keys()) if node_to_intensity else 0))
1040
- color_lut = np.zeros((max_label + 1, 4), dtype=np.uint8)
1039
+ max_label = max(max(labeled_array.flat), max(node_to_intensity.keys()) if node_to_intensity else 0)
1040
+ color_lut = np.zeros((max_label + 1, 4), dtype=np.uint8) # Default to transparent (0,0,0,0)
1041
1041
 
1042
1042
  # Fill lookup table with RGBA colors based on intensity
1043
1043
  for node_id, intensity in node_to_intensity.items():
@@ -1128,7 +1128,7 @@ def create_node_heatmap(node_intensity, node_centroids, shape=None, is_3d=True,
1128
1128
 
1129
1129
  def create_violin_plots(data_dict, graph_title="Violin Plots"):
1130
1130
  """
1131
- Create violin plots from dictionary data with distinct colors.
1131
+ Create violin plots from dictionary data with distinct colors and IQR lines.
1132
1132
 
1133
1133
  Parameters:
1134
1134
  data_dict (dict): Dictionary where keys are column headers (strings) and
@@ -1140,110 +1140,133 @@ def create_violin_plots(data_dict, graph_title="Violin Plots"):
1140
1140
  return
1141
1141
 
1142
1142
  # Prepare data
1143
+ data_dict = dict(sorted(data_dict.items()))
1143
1144
  labels = list(data_dict.keys())
1144
1145
  data_lists = list(data_dict.values())
1145
1146
 
1146
- # Generate colors using the community color strategy
1147
+ # Generate colors
1147
1148
  try:
1148
- # Create a mock community dict for color generation
1149
- mock_community_dict = {i: i+1 for i in range(len(labels))} # No outliers for simplicity
1150
-
1151
- # Get distinct colors
1152
- n_colors = len(labels)
1153
- colors_rgb = community_extractor.generate_distinct_colors(n_colors)
1154
-
1155
- # Sort by data size for consistent color assignment (like community sizes)
1156
- data_sizes = [(i, len(data_lists[i])) for i in range(len(data_lists))]
1157
- sorted_indices = sorted(data_sizes, key=lambda x: (-x[1], x[0]))
1158
-
1159
- # Create color mapping
1160
- colors = []
1161
- for i, _ in sorted_indices:
1162
- color_idx = sorted_indices.index((i, _))
1163
- if color_idx < len(colors_rgb):
1164
- # Convert RGB (0-255) to matplotlib format (0-1)
1165
- rgb_normalized = tuple(c/255.0 for c in colors_rgb[color_idx])
1166
- colors.append(rgb_normalized)
1167
- else:
1168
- colors.append('gray') # Fallback color
1169
-
1170
- # Reorder colors to match original label order
1171
- final_colors = ['gray'] * len(labels)
1172
- for idx, (original_idx, _) in enumerate(sorted_indices):
1173
- final_colors[original_idx] = colors[idx]
1174
-
1149
+ final_colors = generate_distinct_colors(len(labels))
1175
1150
  except Exception as e:
1176
1151
  print(f"Color generation failed, using default colors: {e}")
1177
- # Fallback to default matplotlib colors
1178
1152
  final_colors = plt.cm.Set3(np.linspace(0, 1, len(labels)))
1179
1153
 
1180
- # Create the plot
1181
1154
  fig, ax = plt.subplots(figsize=(max(8, len(labels) * 1.5), 6))
1182
1155
 
1183
1156
  # Create violin plots
1184
- violin_parts = ax.violinplot(data_lists, positions=range(len(labels)),
1185
- showmeans=False, showmedians=True, showextrema=True)
1157
+ violin_parts = ax.violinplot(
1158
+ data_lists, positions=range(len(labels)),
1159
+ showmeans=False, showmedians=True, showextrema=True
1160
+ )
1186
1161
 
1187
- # Color the violins
1162
+ # Color violins
1188
1163
  for i, pc in enumerate(violin_parts['bodies']):
1189
1164
  if i < len(final_colors):
1190
1165
  pc.set_facecolor(final_colors[i])
1191
1166
  pc.set_alpha(0.7)
1192
1167
 
1193
- # Color the other violin elements
1168
+ # Color other violin parts
1194
1169
  for partname in ('cbars', 'cmins', 'cmaxes', 'cmedians'):
1195
1170
  if partname in violin_parts:
1196
1171
  violin_parts[partname].set_edgecolor('black')
1197
1172
  violin_parts[partname].set_linewidth(1)
1198
-
1199
- # Add data points as scatter plot overlay with much lower transparency
1200
- """
1201
- for i, data in enumerate(data_lists):
1202
- y = data
1203
- # Add some jitter to x positions for better visibility
1204
- x = np.random.normal(i, 0.04, size=len(y))
1205
- ax.scatter(x, y, alpha=0.2, s=15, color='black', edgecolors='none', zorder=3) # No borders, more transparent
1206
- """
1207
1173
 
1208
- # Calculate reasonable y-axis limits to focus on the bulk of the data
1174
+ # Set y-limits using percentiles to reduce extreme outlier influence
1209
1175
  all_data = [val for sublist in data_lists for val in sublist]
1210
1176
  if all_data:
1211
- # Use percentiles to exclude extreme outliers from the view
1212
- y_min = np.percentile(all_data, 5) # 5th percentile
1213
- y_max = np.percentile(all_data, 95) # 95th percentile
1214
-
1215
- # Add some padding
1177
+ y_min = np.percentile(all_data, 5)
1178
+ y_max = np.percentile(all_data, 95)
1216
1179
  y_range = y_max - y_min
1217
1180
  y_padding = y_range * 0.15
1218
1181
  ax.set_ylim(y_min - y_padding, y_max + y_padding)
1219
1182
 
1220
- # Add IQR and median text annotations BELOW the violins
1183
+ # Add IQR and median text annotations and dotted IQR lines
1221
1184
  for i, data in enumerate(data_lists):
1222
1185
  if len(data) > 0:
1223
1186
  q1, median, q3 = np.percentile(data, [25, 50, 75])
1224
1187
  iqr = q3 - q1
1188
+
1189
+ # Add dotted green lines for IQR
1190
+ ax.hlines(
1191
+ [q1, q3],
1192
+ i - 0.25, i + 0.25,
1193
+ colors='green',
1194
+ linestyles='dotted',
1195
+ linewidth=1.5,
1196
+ zorder=3,
1197
+ label='IQR (25th–75th)' if i == 0 else None # Add label once
1198
+ )
1225
1199
 
1226
- # Position text below the violin (using current y-axis limits)
1200
+ # Text annotation below the violins
1227
1201
  y_min_current = ax.get_ylim()[0]
1228
1202
  y_text = y_min_current - (ax.get_ylim()[1] - ax.get_ylim()[0]) * 0.15
1229
-
1230
- ax.text(i, y_text, f'Median: {median:.2f}\nIQR: {iqr:.2f}',
1231
- horizontalalignment='center', fontsize=8,
1232
- bbox=dict(boxstyle='round,pad=0.3', facecolor='white', alpha=0.8))
1203
+ ax.text(
1204
+ i, y_text, f'Median: {median:.2f}\nIQR: {iqr:.2f}',
1205
+ ha='center', fontsize=8,
1206
+ bbox=dict(boxstyle='round,pad=0.3', facecolor='white', alpha=0.8)
1207
+ )
1233
1208
 
1234
- # Customize the plot
1209
+ # Customize appearance
1235
1210
  ax.set_xticks(range(len(labels)))
1236
1211
  ax.set_xticklabels(labels, rotation=45, ha='right')
1237
1212
  ax.set_title(graph_title, fontsize=14, fontweight='bold')
1238
1213
  ax.set_ylabel('Normalized Values (Z-score-like)', fontsize=12)
1239
1214
  ax.grid(True, alpha=0.3)
1240
1215
 
1241
- # Add a horizontal line at y=0 (the identity centerpoint)
1242
- ax.axhline(y=0, color='red', linestyle='--', alpha=0.5, linewidth=1,
1243
- label='Identity Centerpoint')
1216
+ # Add baseline
1217
+ ax.axhline(y=0, color='red', linestyle='--', alpha=0.5, linewidth=1, label='Identity Centerpoint')
1244
1218
  ax.legend(loc='upper right')
1245
1219
 
1246
- # Adjust layout to prevent label cutoff and accommodate bottom text
1247
- plt.subplots_adjust(bottom=0.2) # Extra space for bottom text
1220
+ plt.subplots_adjust(bottom=0.2)
1248
1221
  plt.tight_layout()
1249
- plt.show()
1222
+ plt.show()
1223
+
1224
+ # --- Outlier Detection ---
1225
+ outliers_info = []
1226
+ non_outlier_data = []
1227
+
1228
+ for i, data in enumerate(data_lists):
1229
+ if len(data) > 0:
1230
+ q1, median, q3 = np.percentile(data, [25, 50, 75])
1231
+ iqr = q3 - q1
1232
+ lower_bound = q1 - 1.5 * iqr
1233
+ upper_bound = q3 + 1.5 * iqr
1234
+
1235
+ outliers = [val for val in data if val < lower_bound or val > upper_bound]
1236
+ non_outliers = [val for val in data if lower_bound <= val <= upper_bound]
1237
+
1238
+ outliers_info.append({
1239
+ 'label': labels[i],
1240
+ 'outliers': outliers,
1241
+ 'lower_bound': lower_bound,
1242
+ 'upper_bound': upper_bound,
1243
+ 'total_count': len(data)
1244
+ })
1245
+ non_outlier_data.append(non_outliers)
1246
+ else:
1247
+ outliers_info.append({
1248
+ 'label': labels[i],
1249
+ 'outliers': [],
1250
+ 'lower_bound': None,
1251
+ 'upper_bound': None,
1252
+ 'total_count': 0
1253
+ })
1254
+ non_outlier_data.append([])
1255
+
1256
+ print("\n" + "="*60)
1257
+ print("OUTLIER DETECTION SUMMARY")
1258
+ print("="*60)
1259
+ total_outliers = 0
1260
+ for info in outliers_info:
1261
+ n_outliers = len(info['outliers'])
1262
+ total_outliers += n_outliers
1263
+ if n_outliers > 0:
1264
+ print(f"{info['label']}: {n_outliers} outliers out of {info['total_count']} points "
1265
+ f"({n_outliers/info['total_count']*100:.1f}%)")
1266
+ print(f" Outlier Removed Range: [{info['lower_bound']:.2f}, {info['upper_bound']:.2f}]")
1267
+ if total_outliers == 0:
1268
+ print("No outliers detected in any dataset.")
1269
+ else:
1270
+ print(f"\nTotal outliers across all datasets: {total_outliers}")
1271
+ print("="*60 + "\n")
1272
+
@@ -384,6 +384,13 @@ def invert_dict(d):
384
384
  inverted.setdefault(value, []).append(key)
385
385
  return inverted
386
386
 
387
+ def revert_dict(d):
388
+ inverted = {}
389
+ for key, value_list in d.items():
390
+ for value in value_list:
391
+ inverted[value] = key
392
+ return inverted
393
+
387
394
  def invert_dict_special(d):
388
395
 
389
396
  d = invert_dict(d)
@@ -1045,9 +1052,12 @@ def show_3d(arrays_3d=None, arrays_4d=None, down_factor=None, order=0, xy_scale=
1045
1052
  # Downsample arrays if specified
1046
1053
  arrays_3d = [downsample(array, down_factor, order=order) for array in arrays_3d] if arrays_3d is not None else None
1047
1054
  arrays_4d = [downsample(array, down_factor, order=order) for array in arrays_4d] if arrays_4d is not None else None
1055
+ scale = [z_scale * down_factor, xy_scale * down_factor, xy_scale * down_factor]
1056
+ else:
1057
+ scale = [z_scale, xy_scale, xy_scale]
1058
+
1048
1059
 
1049
1060
  viewer = napari.Viewer(ndisplay=3)
1050
- scale = [z_scale, xy_scale, xy_scale] # [z, y, x] order for napari
1051
1061
 
1052
1062
  # Add 3D arrays if provided
1053
1063
  if arrays_3d is not None:
@@ -3065,7 +3075,7 @@ class Network_3D:
3065
3075
  for _ in range(weight):
3066
3076
  lista.append(u)
3067
3077
  listb.append(v)
3068
- listc.append(weight)
3078
+ listc.append(0)
3069
3079
 
3070
3080
  self._network_lists = [lista, listb, listc]
3071
3081
 
@@ -4291,6 +4301,88 @@ class Network_3D:
4291
4301
  self._network_lists = network_analysis.read_excel_to_lists(df)
4292
4302
  self._network, net_weights = network_analysis.weighted_network(df)
4293
4303
 
4304
+ def create_id_network(self, n=5):
4305
+ import ast
4306
+ import random
4307
+
4308
+ if self.node_identities is None:
4309
+ return
4310
+
4311
+ def invert_dict(d):
4312
+ inverted = {}
4313
+ for key, value in d.items():
4314
+ inverted.setdefault(value, []).append(key)
4315
+ return inverted
4316
+
4317
+ # Invert to get identity -> list of nodes
4318
+ identity_to_nodes = invert_dict(self.node_identities)
4319
+
4320
+ G = nx.Graph()
4321
+ edge_set = set()
4322
+
4323
+ # Step 1: Connect nodes within same exact identity
4324
+ for identity, nodes in identity_to_nodes.items():
4325
+ if len(nodes) <= 1:
4326
+ continue
4327
+
4328
+ # Each node chooses n random neighbors from its identity group
4329
+ for node in nodes:
4330
+ available = [other for other in nodes if other != node]
4331
+ num_to_choose = min(n, len(available))
4332
+ neighbors = random.sample(available, num_to_choose)
4333
+
4334
+ for neighbor in neighbors:
4335
+ edge = tuple(sorted([node, neighbor]))
4336
+ edge_set.add(edge)
4337
+
4338
+ # Step 2: For list-like identities, connect across groups with shared sub-identities
4339
+ for identity, nodes in identity_to_nodes.items():
4340
+ if identity.startswith('['):
4341
+ try:
4342
+ sub_identities = ast.literal_eval(identity)
4343
+
4344
+ # For each sub-identity in this list-like identity
4345
+ for sub_id in sub_identities:
4346
+ # Find all OTHER identity groups that contain this sub-identity
4347
+ for other_identity, other_nodes in identity_to_nodes.items():
4348
+ if other_identity == identity:
4349
+ continue # Skip connecting to same exact identity (already done in Step 1)
4350
+
4351
+ # Check if other_identity contains sub_id
4352
+ contains_sub_id = False
4353
+
4354
+ if other_identity.startswith('['):
4355
+ try:
4356
+ other_sub_ids = ast.literal_eval(other_identity)
4357
+ if sub_id in other_sub_ids:
4358
+ contains_sub_id = True
4359
+ except (ValueError, SyntaxError):
4360
+ pass
4361
+ elif other_identity == sub_id:
4362
+ # Single identity that matches our sub-identity
4363
+ contains_sub_id = True
4364
+
4365
+ if contains_sub_id:
4366
+ # Each node from current identity connects to n nodes from other_identity
4367
+ for node in nodes:
4368
+ num_to_choose = min(n, len(other_nodes))
4369
+ if num_to_choose > 0:
4370
+ neighbors = random.sample(other_nodes, num_to_choose)
4371
+
4372
+ for neighbor in neighbors:
4373
+ edge = tuple(sorted([node, neighbor]))
4374
+ edge_set.add(edge)
4375
+
4376
+ except (ValueError, SyntaxError):
4377
+ pass # Not a valid list, treat as already handled in Step 1
4378
+
4379
+ G.add_edges_from(edge_set)
4380
+ self.network = G
4381
+
4382
+
4383
+
4384
+
4385
+
4294
4386
  def calculate_all(self, nodes, edges, xy_scale = 1, z_scale = 1, down_factor = None, search = None, diledge = None, inners = True, remove_trunk = 0, ignore_search_region = False, other_nodes = None, label_nodes = True, directory = None, GPU = True, fast_dil = True, skeletonize = False, GPU_downsample = None):
4295
4387
  """
4296
4388
  Method to calculate and save to mem all properties of a Network_3D object. In general, after initializing a Network_3D object, this method should be called on the node and edge masks that will be used to calculate the network.
@@ -5768,7 +5860,7 @@ class Network_3D:
5768
5860
  neighborhoods.visualize_cluster_composition_umap(self.node_centroids, None, id_dictionary = self.node_identities, graph_label = "Node ID", title = 'UMAP Visualization of Node Centroids')
5769
5861
 
5770
5862
 
5771
- def identity_umap(self, data):
5863
+ def identity_umap(self, data, mode = 0):
5772
5864
 
5773
5865
  try:
5774
5866
 
@@ -5788,16 +5880,18 @@ class Network_3D:
5788
5880
  else:
5789
5881
  del umap_dict[item]
5790
5882
 
5791
- from scipy.stats import zscore
5883
+ #from scipy.stats import zscore
5792
5884
 
5793
5885
  # Z-score normalize each marker (column)
5794
- for key in umap_dict:
5795
- umap_dict[key] = zscore(umap_dict[key])
5796
-
5886
+ #for key in umap_dict:
5887
+ #umap_dict[key] = zscore(umap_dict[key])
5797
5888
 
5798
5889
  from . import neighborhoods
5799
5890
 
5800
- neighborhoods.visualize_cluster_composition_umap(umap_dict, None, id_dictionary = neighbor_classes, graph_label = "Node ID", title = 'UMAP Visualization of Node Identities by Z-Score')
5891
+ if mode == 0:
5892
+ neighborhoods.visualize_cluster_composition_umap(umap_dict, None, id_dictionary = neighbor_classes, graph_label = "Node ID", title = 'UMAP Visualization of Node Identities by Z-Score')
5893
+ else:
5894
+ neighborhoods.visualize_cluster_composition_umap(umap_dict, None, id_dictionary = neighbor_classes, graph_label = "Node ID", title = 'UMAP Visualization of Node Identities by Z-Score', neighborhoods = self.communities, original_communities = self.communities)
5801
5895
 
5802
5896
  except Exception as e:
5803
5897
  import traceback
@@ -5916,7 +6010,6 @@ class Network_3D:
5916
6010
  neighbor_group[com] = neighbors[node]
5917
6011
  except:
5918
6012
  neighbor_group[com] = 0
5919
- print(neighbors)
5920
6013
  neighborhoods.visualize_cluster_composition_umap(umap_dict, id_set, neighborhoods = neighbor_group, original_communities = neighbors)
5921
6014
  elif label == 1:
5922
6015
  neighborhoods.visualize_cluster_composition_umap(umap_dict, id_set, label = True)
@@ -5929,6 +6022,19 @@ class Network_3D:
5929
6022
  return output, id_set
5930
6023
 
5931
6024
 
6025
+ def group_nodes_by_intensity(self, data, count = None):
6026
+
6027
+ from . import neighborhoods
6028
+
6029
+ clusters = neighborhoods.cluster_arrays(data, count, seed = 42)
6030
+
6031
+ coms = {}
6032
+
6033
+ for i, cluster in enumerate(clusters):
6034
+ coms[i + 1] = cluster
6035
+
6036
+ self.communities = revert_dict(coms)
6037
+
5932
6038
  def assign_neighborhoods(self, seed, count, limit = None, prev_coms = None, proportional = False, mode = 0):
5933
6039
 
5934
6040
  from . import neighborhoods
@@ -6079,13 +6185,6 @@ class Network_3D:
6079
6185
 
6080
6186
  def community_cells(self, size = 32, xy_scale = 1, z_scale = 1):
6081
6187
 
6082
- def revert_dict(d):
6083
- inverted = {}
6084
- for key, value_list in d.items():
6085
- for value in value_list:
6086
- inverted[value] = key
6087
- return inverted
6088
-
6089
6188
  size_x = int(size * xy_scale)
6090
6189
  size_z = int(size * z_scale)
6091
6190