nettracer3d 1.1.1__py3-none-any.whl → 1.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nettracer3d might be problematic. Click here for more details.

nettracer3d/morphology.py CHANGED
@@ -205,15 +205,20 @@ def quantify_edge_node(nodes, edges, search = 0, xy_scale = 1, z_scale = 1, core
205
205
 
206
206
  # Helper methods for counting the lens of skeletons:
207
207
 
208
- def calculate_skeleton_lengths(skeleton_binary, xy_scale=1.0, z_scale=1.0):
208
+ def calculate_skeleton_lengths(skeleton_binary, xy_scale=1.0, z_scale=1.0, skeleton_coords = None):
209
209
  """
210
210
  Calculate total length of all skeletons in a 3D binary image.
211
211
 
212
212
  skeleton_binary: 3D boolean array where True = skeleton voxel
213
213
  xy_scale, z_scale: physical units per voxel
214
214
  """
215
- # Find all skeleton voxels
216
- skeleton_coords = np.argwhere(skeleton_binary)
215
+
216
+ if skeleton_coords is None:
217
+ # Find all skeleton voxels
218
+ skeleton_coords = np.argwhere(skeleton_binary)
219
+ shape = skeleton_binary.shape
220
+ else:
221
+ shape = skeleton_binary #Very professional stuff
217
222
 
218
223
  if len(skeleton_coords) == 0:
219
224
  return 0.0
@@ -222,7 +227,7 @@ def calculate_skeleton_lengths(skeleton_binary, xy_scale=1.0, z_scale=1.0):
222
227
  coord_to_idx = {tuple(coord): idx for idx, coord in enumerate(skeleton_coords)}
223
228
 
224
229
  # Build adjacency graph
225
- adjacency_list = build_adjacency_graph(skeleton_coords, coord_to_idx, skeleton_binary.shape)
230
+ adjacency_list = build_adjacency_graph(skeleton_coords, coord_to_idx, shape)
226
231
 
227
232
  # Calculate lengths using scaled distances
228
233
  total_length = calculate_graph_length(skeleton_coords, adjacency_list, xy_scale, z_scale)
@@ -200,10 +200,14 @@ def plot_dict_heatmap(unsorted_data_dict, id_set, figsize=(12, 8), title="Neighb
200
200
 
201
201
  data_dict = {k: unsorted_data_dict[k] for k in sorted(unsorted_data_dict.keys())}
202
202
  # Convert dict to 2D array for heatmap
203
- # Each row represents one key from the dict
204
203
  keys = list(data_dict.keys())
205
204
  data_matrix = np.array([data_dict[key] for key in keys])
206
-
205
+
206
+ # Move key 0 to the bottom if it exists as the first key
207
+ if keys and keys[0] == 0:
208
+ keys.append(keys.pop(0))
209
+ data_matrix = np.vstack([data_matrix[1:], data_matrix[0:1]])
210
+
207
211
  # Create the plot
208
212
  fig, ax = plt.subplots(figsize=figsize)
209
213
 
@@ -276,8 +280,13 @@ def plot_dict_heatmap(unsorted_data_dict, id_set, figsize=(12, 8), title="Neighb
276
280
  ax.set_xticks(np.arange(len(id_set)))
277
281
  ax.set_yticks(np.arange(len(keys)))
278
282
  ax.set_xticklabels(id_set)
279
- ax.set_yticklabels(keys)
280
-
283
+ labels = list(keys)
284
+ if labels and labels[-1] == 0:
285
+ labels[-1] = 'Excluded (0)'
286
+ ax.set_yticklabels(labels)
287
+
288
+
289
+
281
290
  # Rotate x-axis labels for better readability
282
291
  plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
283
292
 
@@ -1128,7 +1137,7 @@ def create_node_heatmap(node_intensity, node_centroids, shape=None, is_3d=True,
1128
1137
 
1129
1138
  def create_violin_plots(data_dict, graph_title="Violin Plots"):
1130
1139
  """
1131
- Create violin plots from dictionary data with distinct colors.
1140
+ Create violin plots from dictionary data with distinct colors and IQR lines.
1132
1141
 
1133
1142
  Parameters:
1134
1143
  data_dict (dict): Dictionary where keys are column headers (strings) and
@@ -1140,110 +1149,133 @@ def create_violin_plots(data_dict, graph_title="Violin Plots"):
1140
1149
  return
1141
1150
 
1142
1151
  # Prepare data
1152
+ data_dict = dict(sorted(data_dict.items()))
1143
1153
  labels = list(data_dict.keys())
1144
1154
  data_lists = list(data_dict.values())
1145
1155
 
1146
- # Generate colors using the community color strategy
1156
+ # Generate colors
1147
1157
  try:
1148
- # Create a mock community dict for color generation
1149
- mock_community_dict = {i: i+1 for i in range(len(labels))} # No outliers for simplicity
1150
-
1151
- # Get distinct colors
1152
- n_colors = len(labels)
1153
- colors_rgb = community_extractor.generate_distinct_colors(n_colors)
1154
-
1155
- # Sort by data size for consistent color assignment (like community sizes)
1156
- data_sizes = [(i, len(data_lists[i])) for i in range(len(data_lists))]
1157
- sorted_indices = sorted(data_sizes, key=lambda x: (-x[1], x[0]))
1158
-
1159
- # Create color mapping
1160
- colors = []
1161
- for i, _ in sorted_indices:
1162
- color_idx = sorted_indices.index((i, _))
1163
- if color_idx < len(colors_rgb):
1164
- # Convert RGB (0-255) to matplotlib format (0-1)
1165
- rgb_normalized = tuple(c/255.0 for c in colors_rgb[color_idx])
1166
- colors.append(rgb_normalized)
1167
- else:
1168
- colors.append('gray') # Fallback color
1169
-
1170
- # Reorder colors to match original label order
1171
- final_colors = ['gray'] * len(labels)
1172
- for idx, (original_idx, _) in enumerate(sorted_indices):
1173
- final_colors[original_idx] = colors[idx]
1174
-
1158
+ final_colors = generate_distinct_colors(len(labels))
1175
1159
  except Exception as e:
1176
1160
  print(f"Color generation failed, using default colors: {e}")
1177
- # Fallback to default matplotlib colors
1178
1161
  final_colors = plt.cm.Set3(np.linspace(0, 1, len(labels)))
1179
1162
 
1180
- # Create the plot
1181
1163
  fig, ax = plt.subplots(figsize=(max(8, len(labels) * 1.5), 6))
1182
1164
 
1183
1165
  # Create violin plots
1184
- violin_parts = ax.violinplot(data_lists, positions=range(len(labels)),
1185
- showmeans=False, showmedians=True, showextrema=True)
1166
+ violin_parts = ax.violinplot(
1167
+ data_lists, positions=range(len(labels)),
1168
+ showmeans=False, showmedians=True, showextrema=True
1169
+ )
1186
1170
 
1187
- # Color the violins
1171
+ # Color violins
1188
1172
  for i, pc in enumerate(violin_parts['bodies']):
1189
1173
  if i < len(final_colors):
1190
1174
  pc.set_facecolor(final_colors[i])
1191
1175
  pc.set_alpha(0.7)
1192
1176
 
1193
- # Color the other violin elements
1177
+ # Color other violin parts
1194
1178
  for partname in ('cbars', 'cmins', 'cmaxes', 'cmedians'):
1195
1179
  if partname in violin_parts:
1196
1180
  violin_parts[partname].set_edgecolor('black')
1197
1181
  violin_parts[partname].set_linewidth(1)
1198
-
1199
- # Add data points as scatter plot overlay with much lower transparency
1200
- """
1201
- for i, data in enumerate(data_lists):
1202
- y = data
1203
- # Add some jitter to x positions for better visibility
1204
- x = np.random.normal(i, 0.04, size=len(y))
1205
- ax.scatter(x, y, alpha=0.2, s=15, color='black', edgecolors='none', zorder=3) # No borders, more transparent
1206
- """
1207
1182
 
1208
- # Calculate reasonable y-axis limits to focus on the bulk of the data
1183
+ # Set y-limits using percentiles to reduce extreme outlier influence
1209
1184
  all_data = [val for sublist in data_lists for val in sublist]
1210
1185
  if all_data:
1211
- # Use percentiles to exclude extreme outliers from the view
1212
- y_min = np.percentile(all_data, 5) # 5th percentile
1213
- y_max = np.percentile(all_data, 95) # 95th percentile
1214
-
1215
- # Add some padding
1186
+ y_min = np.percentile(all_data, 5)
1187
+ y_max = np.percentile(all_data, 95)
1216
1188
  y_range = y_max - y_min
1217
1189
  y_padding = y_range * 0.15
1218
1190
  ax.set_ylim(y_min - y_padding, y_max + y_padding)
1219
1191
 
1220
- # Add IQR and median text annotations BELOW the violins
1192
+ # Add IQR and median text annotations and dotted IQR lines
1221
1193
  for i, data in enumerate(data_lists):
1222
1194
  if len(data) > 0:
1223
1195
  q1, median, q3 = np.percentile(data, [25, 50, 75])
1224
1196
  iqr = q3 - q1
1197
+
1198
+ # Add dotted green lines for IQR
1199
+ ax.hlines(
1200
+ [q1, q3],
1201
+ i - 0.25, i + 0.25,
1202
+ colors='green',
1203
+ linestyles='dotted',
1204
+ linewidth=1.5,
1205
+ zorder=3,
1206
+ label='IQR (25th–75th)' if i == 0 else None # Add label once
1207
+ )
1225
1208
 
1226
- # Position text below the violin (using current y-axis limits)
1209
+ # Text annotation below the violins
1227
1210
  y_min_current = ax.get_ylim()[0]
1228
1211
  y_text = y_min_current - (ax.get_ylim()[1] - ax.get_ylim()[0]) * 0.15
1229
-
1230
- ax.text(i, y_text, f'Median: {median:.2f}\nIQR: {iqr:.2f}',
1231
- horizontalalignment='center', fontsize=8,
1232
- bbox=dict(boxstyle='round,pad=0.3', facecolor='white', alpha=0.8))
1212
+ ax.text(
1213
+ i, y_text, f'Median: {median:.2f}\nIQR: {iqr:.2f}',
1214
+ ha='center', fontsize=8,
1215
+ bbox=dict(boxstyle='round,pad=0.3', facecolor='white', alpha=0.8)
1216
+ )
1233
1217
 
1234
- # Customize the plot
1218
+ # Customize appearance
1235
1219
  ax.set_xticks(range(len(labels)))
1236
1220
  ax.set_xticklabels(labels, rotation=45, ha='right')
1237
1221
  ax.set_title(graph_title, fontsize=14, fontweight='bold')
1238
1222
  ax.set_ylabel('Normalized Values (Z-score-like)', fontsize=12)
1239
1223
  ax.grid(True, alpha=0.3)
1240
1224
 
1241
- # Add a horizontal line at y=0 (the identity centerpoint)
1242
- ax.axhline(y=0, color='red', linestyle='--', alpha=0.5, linewidth=1,
1243
- label='Identity Centerpoint')
1225
+ # Add baseline
1226
+ ax.axhline(y=0, color='red', linestyle='--', alpha=0.5, linewidth=1, label='Identity Basepoint')
1244
1227
  ax.legend(loc='upper right')
1245
1228
 
1246
- # Adjust layout to prevent label cutoff and accommodate bottom text
1247
- plt.subplots_adjust(bottom=0.2) # Extra space for bottom text
1229
+ plt.subplots_adjust(bottom=0.2)
1248
1230
  plt.tight_layout()
1249
- plt.show()
1231
+ plt.show()
1232
+
1233
+ # --- Outlier Detection ---
1234
+ outliers_info = []
1235
+ non_outlier_data = []
1236
+
1237
+ for i, data in enumerate(data_lists):
1238
+ if len(data) > 0:
1239
+ q1, median, q3 = np.percentile(data, [25, 50, 75])
1240
+ iqr = q3 - q1
1241
+ lower_bound = q1 - 1.5 * iqr
1242
+ upper_bound = q3 + 1.5 * iqr
1243
+
1244
+ outliers = [val for val in data if val < lower_bound or val > upper_bound]
1245
+ non_outliers = [val for val in data if lower_bound <= val <= upper_bound]
1246
+
1247
+ outliers_info.append({
1248
+ 'label': labels[i],
1249
+ 'outliers': outliers,
1250
+ 'lower_bound': lower_bound,
1251
+ 'upper_bound': upper_bound,
1252
+ 'total_count': len(data)
1253
+ })
1254
+ non_outlier_data.append(non_outliers)
1255
+ else:
1256
+ outliers_info.append({
1257
+ 'label': labels[i],
1258
+ 'outliers': [],
1259
+ 'lower_bound': None,
1260
+ 'upper_bound': None,
1261
+ 'total_count': 0
1262
+ })
1263
+ non_outlier_data.append([])
1264
+
1265
+ print("\n" + "="*60)
1266
+ print("OUTLIER DETECTION SUMMARY")
1267
+ print("="*60)
1268
+ total_outliers = 0
1269
+ for info in outliers_info:
1270
+ n_outliers = len(info['outliers'])
1271
+ total_outliers += n_outliers
1272
+ if n_outliers > 0:
1273
+ print(f"{info['label']}: {n_outliers} outliers out of {info['total_count']} points "
1274
+ f"({n_outliers/info['total_count']*100:.1f}%)")
1275
+ print(f" Outlier Removed Range: [{info['lower_bound']:.2f}, {info['upper_bound']:.2f}]")
1276
+ if total_outliers == 0:
1277
+ print("No outliers detected in any dataset.")
1278
+ else:
1279
+ print(f"\nTotal outliers across all datasets: {total_outliers}")
1280
+ print("="*60 + "\n")
1281
+