nettracer3d 0.2.6__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,6 +2,8 @@ import pandas as pd
2
2
  import networkx as nx
3
3
  import tifffile
4
4
  import numpy as np
5
+ from typing import List, Dict, Tuple
6
+ from collections import defaultdict, Counter
5
7
  from networkx.algorithms import community
6
8
  from scipy import ndimage
7
9
  from scipy.ndimage import zoom
@@ -601,8 +603,159 @@ def extract_mothers(nodes, excel_file_path, centroid_dic = None, directory = Non
601
603
 
602
604
 
603
605
 
606
+ def find_hub_nodes(G: nx.Graph, proportion: float = 0.1) -> List:
607
+ """
608
+ Identifies hub nodes in a network based on average shortest path length,
609
+ handling multiple connected components.
610
+
611
+ Args:
612
+ G (nx.Graph): NetworkX graph (can have multiple components)
613
+ proportion (float): Proportion of top nodes to return (0.0 to 1.0)
614
+
615
+ Returns:
616
+ List of nodes identified as hubs across all components
617
+ """
618
+ if not 0 < proportion <= 1:
619
+ raise ValueError("Proportion must be between 0 and 1")
620
+
621
+ # Get connected components
622
+ components = list(nx.connected_components(G))
623
+
624
+ # Dictionary to store average path lengths for all nodes
625
+ avg_path_lengths: Dict[int, float] = {}
626
+
627
+ # Process each component separately
628
+ for component in components:
629
+ # Create subgraph for this component
630
+ subgraph = G.subgraph(component)
631
+
632
+ # Calculate average shortest path length for each node in this component
633
+ for node in subgraph.nodes():
634
+ # Get shortest paths from this node to all others in the component
635
+ path_lengths = nx.single_source_shortest_path_length(subgraph, node)
636
+ # Calculate average path length within this component
637
+ avg_length = sum(path_lengths.values()) / (len(subgraph.nodes()) - 1)
638
+ avg_path_lengths[node] = avg_length
639
+
640
+ # Sort nodes by average path length (ascending)
641
+ sorted_nodes = sorted(avg_path_lengths.items(), key=lambda x: x[1])
642
+
643
+ # Calculate number of nodes to return
644
+ num_nodes = int(np.ceil(len(G.nodes()) * proportion))
645
+
646
+ # Return the top nodes (those with lowest average path lengths)
647
+ hub_nodes = [node for node, _ in sorted_nodes[:num_nodes]]
648
+
649
+ return hub_nodes
650
+
651
+
604
652
 
653
+ def generate_distinct_colors(n_colors: int) -> List[Tuple[int, int, int]]:
654
+ """
655
+ Generate visually distinct RGB colors using HSV color space.
656
+ Colors are generated with maximum saturation and value, varying only in hue.
657
+
658
+ Args:
659
+ n_colors: Number of distinct colors needed
660
+
661
+ Returns:
662
+ List of RGB tuples
663
+ """
664
+ colors = []
665
+ for i in range(n_colors):
666
+ hue = i / n_colors
667
+ # Convert HSV to RGB (assuming S=V=1)
668
+ h = hue * 6
669
+ c = int(255)
670
+ x = int(255 * (1 - abs(h % 2 - 1)))
671
+
672
+ if h < 1:
673
+ rgb = (c, x, 0)
674
+ elif h < 2:
675
+ rgb = (x, c, 0)
676
+ elif h < 3:
677
+ rgb = (0, c, x)
678
+ elif h < 4:
679
+ rgb = (0, x, c)
680
+ elif h < 5:
681
+ rgb = (x, 0, c)
682
+ else:
683
+ rgb = (c, 0, x)
684
+
685
+ colors.append(rgb)
686
+ return colors
605
687
 
688
+ def assign_community_colors(community_dict: Dict[int, int], labeled_array: np.ndarray) -> np.ndarray:
689
+ """
690
+ Assign distinct colors to communities and create an RGB image.
691
+
692
+ Args:
693
+ community_dict: Dictionary mapping node IDs to community numbers
694
+ labeled_array: 3D numpy array with labels corresponding to node IDs
695
+
696
+ Returns:
697
+ RGB-coded numpy array (H, W, D, 3)
698
+ """
699
+ # Get unique communities and their sizes
700
+ communities = set(community_dict.values())
701
+ community_sizes = Counter(community_dict.values())
702
+
703
+ # Sort communities by size (descending)
704
+ sorted_communities = sorted(communities, key=lambda x: community_sizes[x], reverse=True)
705
+
706
+ # Generate distinct colors
707
+ colors = generate_distinct_colors(len(communities))
708
+
709
+ # Create mapping from community to color
710
+ community_to_color = {comm: colors[i] for i, comm in enumerate(sorted_communities)}
711
+
712
+ # Create mapping from node ID to color
713
+ node_to_color = {node: community_to_color[comm] for node, comm in community_dict.items()}
714
+
715
+ # Create RGB array
716
+ rgb_array = np.zeros((*labeled_array.shape, 3), dtype=np.uint8)
717
+
718
+ # Assign colors to each voxel based on its label
719
+ for label in np.unique(labeled_array):
720
+ if label in node_to_color: # Skip background (usually label 0)
721
+ mask = labeled_array == label
722
+ for i in range(3): # RGB channels
723
+ rgb_array[mask, i] = node_to_color[label][i]
724
+
725
+ return rgb_array
726
+
727
+ def assign_community_grays(community_dict: Dict[int, int], labeled_array: np.ndarray) -> np.ndarray:
728
+ """
729
+ Assign distinct grayscale values to communities.
730
+
731
+ Args:
732
+ community_dict: Dictionary mapping node IDs to community numbers
733
+ labeled_array: 3D numpy array with labels corresponding to node IDs
734
+
735
+ Returns:
736
+ grayscale numpy array
737
+ """
738
+ # Get unique communities
739
+ communities = set(community_dict.values())
740
+ n_communities = len(communities)
741
+
742
+ # Generate evenly spaced grayscale values (excluding pure black for background)
743
+ gray_values = np.linspace(1, 255, n_communities, dtype=np.uint8)
744
+
745
+ # Create direct mapping from node ID to grayscale value
746
+ node_to_gray = {node: gray_values[list(communities).index(comm)]
747
+ for node, comm in community_dict.items()}
748
+
749
+ # Create output array
750
+ gray_array = np.zeros_like(labeled_array, dtype=np.uint8)
751
+
752
+ # Use numpy's vectorized operations for faster assignment
753
+ unique_labels = np.unique(labeled_array)
754
+ for label in unique_labels:
755
+ if label in node_to_gray:
756
+ gray_array[labeled_array == label] = node_to_gray[label]
757
+
758
+ return gray_array
606
759
 
607
760
 
608
761
  if __name__ == "__main__":
nettracer3d/hub_getter.py CHANGED
@@ -115,7 +115,7 @@ def labels_to_boolean(label_array, labels_list):
115
115
 
116
116
  return boolean_array
117
117
 
118
- def get_hubs(nodepath, network, proportion = None, directory = None, centroids = None):
118
+ def get_hubs(nodepath, network, proportion = None, directory = None, centroids = None, gen_more_images = False):
119
119
 
120
120
  if type(nodepath) == str:
121
121
  nodepath = tifffile.imread(nodepath)
nettracer3d/modularity.py CHANGED
@@ -7,6 +7,8 @@ import matplotlib.colors as mcolors
7
7
  import os
8
8
  from . import network_analysis
9
9
  from . import simple_network
10
+ import numpy as np
11
+ import itertools
10
12
 
11
13
  def open_network(excel_file_path):
12
14
 
@@ -159,16 +161,7 @@ def louvain_mod(G, edge_weights=None, identifier=None, geometric = False, geo_in
159
161
  for i, component in enumerate(connected_components):
160
162
  # Apply the Louvain community detection on the subgraph
161
163
  partition = community_louvain.best_partition(G.subgraph(component))
162
-
163
- # Invert the partition dictionary to get communities
164
- #communities = {}
165
- #for node, comm_id in partition.items():
166
- #communities.setdefault(comm_id, []).append(node)
167
- #communities = list(communities.values())
168
-
169
- # Assign a different color to each community within the component for visualization
170
- #colors = [mcolors.to_hex(plt.cm.tab10(i / len(connected_components))[:3]) for _ in range(len(communities))]
171
-
164
+
172
165
  # Calculate modularity
173
166
  modularity = community_louvain.modularity(partition, G.subgraph(component))
174
167
  num_nodes = len(component)
@@ -180,7 +173,7 @@ def louvain_mod(G, edge_weights=None, identifier=None, geometric = False, geo_in
180
173
 
181
174
  def show_communities_flex(G, master_list, normalized_weights, geo_info = None, geometric=False, directory=None, weighted=True, partition=None, style=0):
182
175
  if partition is None:
183
- partition, normalized_weights = community_partition(master_list, weighted=weighted, style=style)
176
+ partition, normalized_weights, _ = community_partition(master_list, weighted=weighted, style=style)
184
177
  print(partition)
185
178
  if normalized_weights is None:
186
179
  G, edge_weights = network_analysis.weighted_network(master_list)
@@ -289,7 +282,242 @@ def show_communities_flex(G, master_list, normalized_weights, geo_info = None, g
289
282
 
290
283
 
291
284
 
292
- def community_partition(master_list, weighted = False, style = 0):
285
+ def community_partition(master_list, weighted = False, style = 0, dostats = True):
286
+
287
+ def calculate_network_stats(G, communities):
288
+ """
289
+ Calculate comprehensive network statistics for the graph and its communities.
290
+
291
+ Parameters:
292
+ -----------
293
+ G : networkx.Graph
294
+ The input graph
295
+ communities : list
296
+ List of sets/lists containing node ids for each community
297
+
298
+ Returns:
299
+ --------
300
+ dict
301
+ Dictionary containing various network statistics
302
+ """
303
+ stats = {}
304
+
305
+ try:
306
+
307
+ # Overall network modularity
308
+ stats['Modularity Entire Network'] = community.modularity(G, communities)
309
+ except:
310
+ pass
311
+
312
+ try:
313
+ # Component-level modularity
314
+ connected_components = list(nx.connected_components(G))
315
+ if len(connected_components) > 1:
316
+ for i, component in enumerate(connected_components):
317
+ subgraph = G.subgraph(component)
318
+ component_communities = list(community.label_propagation_communities(subgraph))
319
+ modularity = community.modularity(subgraph, component_communities)
320
+ num_nodes = len(component)
321
+ stats[f'Modularity of component with {num_nodes} nodes'] = modularity
322
+ except:
323
+ pass
324
+
325
+ try:
326
+ # Community size statistics
327
+ stats['Number of Communities'] = len(communities)
328
+ community_sizes = [len(com) for com in communities]
329
+ stats['Community Sizes'] = community_sizes
330
+ stats['Average Community Size'] = np.mean(community_sizes)
331
+ except:
332
+ pass
333
+
334
+ try:
335
+
336
+ # Per-community statistics
337
+ for i, com in enumerate(communities):
338
+ subgraph = G.subgraph(com)
339
+
340
+ # Basic community metrics
341
+ stats[f'Community {i+1} Density'] = nx.density(subgraph)
342
+ stats[f'Community {i+1} Conductance'] = nx.conductance(G, com)
343
+ stats[f'Community {i+1} Avg Clustering'] = nx.average_clustering(subgraph)
344
+
345
+ # Degree centrality
346
+ degree_cent = nx.degree_centrality(subgraph)
347
+ stats[f'Community {i+1} Avg Degree Centrality'] = np.mean(list(degree_cent.values()))
348
+
349
+ # Average path length (only for connected subgraphs)
350
+ if nx.is_connected(subgraph):
351
+ stats[f'Community {i+1} Avg Path Length'] = nx.average_shortest_path_length(subgraph)
352
+ except:
353
+ pass
354
+
355
+ try:
356
+ # Global network metrics
357
+ stats['Global Clustering Coefficient'] = nx.average_clustering(G)
358
+ except:
359
+ pass
360
+ try:
361
+ stats['Assortativity'] = nx.degree_assortativity_coefficient(G)
362
+ except:
363
+ pass
364
+
365
+ def count_inter_community_edges(G, communities):
366
+ inter_edges = 0
367
+ for com1, com2 in itertools.combinations(communities, 2):
368
+ inter_edges += len(list(nx.edge_boundary(G, com1, com2)))
369
+ return inter_edges
370
+
371
+ try:
372
+ stats['Inter-community Edges'] = count_inter_community_edges(G, communities)
373
+ except:
374
+ pass
375
+
376
+ # Calculate mixing parameter (ratio of external to total edges for nodes)
377
+ def mixing_parameter(G, communities):
378
+ external_edges = 0
379
+ total_edges = 0
380
+ for com in communities:
381
+ subgraph = G.subgraph(com)
382
+ internal_edges = subgraph.number_of_edges()
383
+ total_com_edges = sum(G.degree(node) for node in com)
384
+ external_edges += total_com_edges - (2 * internal_edges)
385
+ total_edges += total_com_edges
386
+ return external_edges / total_edges
387
+
388
+ try:
389
+ stats['Mixing Parameter'] = mixing_parameter(G, communities)
390
+ except:
391
+ pass
392
+
393
+ return stats
394
+
395
+ def calculate_louvain_network_stats(G, partition):
396
+ """
397
+ Calculate comprehensive network statistics for the graph using Louvain community detection.
398
+
399
+ Parameters:
400
+ -----------
401
+ G : networkx.Graph
402
+ The input graph
403
+ partition : dict
404
+ Dictionary mapping node -> community id from Louvain detection
405
+
406
+ Returns:
407
+ --------
408
+ dict
409
+ Dictionary containing various network statistics
410
+ """
411
+ stats = {}
412
+
413
+ # Convert partition dict to communities list format
414
+ communities = []
415
+ max_community = max(partition.values())
416
+ for com_id in range(max_community + 1):
417
+ community_nodes = {node for node, com in partition.items() if com == com_id}
418
+ if community_nodes: # Only add non-empty communities
419
+ communities.append(community_nodes)
420
+
421
+ try:
422
+ # Overall network modularity using Louvain
423
+ stats['Modularity Entire Network'] = community_louvain.modularity(partition, G)
424
+ except:
425
+ pass
426
+
427
+ try:
428
+ # Component-level modularity
429
+ connected_components = list(nx.connected_components(G))
430
+ if len(connected_components) > 1:
431
+ for i, component in enumerate(connected_components):
432
+ subgraph = G.subgraph(component)
433
+ subgraph_partition = community_louvain.best_partition(subgraph)
434
+ modularity = community_louvain.modularity(subgraph_partition, subgraph)
435
+ num_nodes = len(component)
436
+ stats[f'Modularity of component with {num_nodes} nodes'] = modularity
437
+ except:
438
+ pass
439
+
440
+ try:
441
+ # Community size statistics
442
+ stats['Number of Communities'] = len(communities)
443
+ community_sizes = [len(com) for com in communities]
444
+ stats['Community Sizes'] = community_sizes
445
+ stats['Average Community Size'] = np.mean(community_sizes)
446
+ except:
447
+ pass
448
+
449
+ try:
450
+ # Per-community statistics
451
+ for i, com in enumerate(communities):
452
+ subgraph = G.subgraph(com)
453
+
454
+ # Basic community metrics
455
+ stats[f'Community {i+1} Density'] = nx.density(subgraph)
456
+ stats[f'Community {i+1} Conductance'] = nx.conductance(G, com)
457
+ stats[f'Community {i+1} Avg Clustering'] = nx.average_clustering(subgraph)
458
+
459
+ # Degree centrality
460
+ degree_cent = nx.degree_centrality(subgraph)
461
+ stats[f'Community {i+1} Avg Degree Centrality'] = np.mean(list(degree_cent.values()))
462
+
463
+ # Average path length (only for connected subgraphs)
464
+ if nx.is_connected(subgraph):
465
+ stats[f'Community {i+1} Avg Path Length'] = nx.average_shortest_path_length(subgraph)
466
+ except:
467
+ pass
468
+
469
+ try:
470
+ # Add some Louvain-specific statistics
471
+ stats['Partition Resolution'] = 1.0 # Default resolution parameter
472
+ except:
473
+ pass
474
+ try:
475
+ stats['Number of Iterations'] = len(set(partition.values()))
476
+ except:
477
+ pass
478
+
479
+ # Global network metrics
480
+ try:
481
+ stats['Global Clustering Coefficient'] = nx.average_clustering(G)
482
+ except:
483
+ pass
484
+ try:
485
+ stats['Assortativity'] = nx.degree_assortativity_coefficient(G)
486
+ except:
487
+ pass
488
+
489
+ def count_inter_community_edges(G, communities):
490
+ inter_edges = 0
491
+ for com1, com2 in itertools.combinations(communities, 2):
492
+ inter_edges += len(list(nx.edge_boundary(G, com1, com2)))
493
+ return inter_edges
494
+
495
+ try:
496
+ stats['Inter-community Edges'] = count_inter_community_edges(G, communities)
497
+ except:
498
+ pass
499
+
500
+ # Calculate mixing parameter (ratio of external to total edges for nodes)
501
+ def mixing_parameter(G, communities):
502
+ external_edges = 0
503
+ total_edges = 0
504
+ for com in communities:
505
+ subgraph = G.subgraph(com)
506
+ internal_edges = subgraph.number_of_edges()
507
+ total_com_edges = sum(G.degree(node) for node in com)
508
+ external_edges += total_com_edges - (2 * internal_edges)
509
+ total_edges += total_com_edges
510
+ return external_edges / total_edges
511
+
512
+ try:
513
+ stats['Mixing Parameter'] = mixing_parameter(G, communities)
514
+ except:
515
+ pass
516
+
517
+
518
+ return stats
519
+
520
+ stats = {}
293
521
 
294
522
  if weighted:
295
523
  G, edge_weights = network_analysis.weighted_network(master_list)
@@ -316,7 +544,12 @@ def community_partition(master_list, weighted = False, style = 0):
316
544
  # Perform Louvain community detection
317
545
  partition = community_louvain.best_partition(G)
318
546
 
319
- return partition, normalized_weights
547
+ if dostats:
548
+
549
+ stats = calculate_louvain_network_stats(G, partition)
550
+
551
+
552
+ return partition, normalized_weights, stats
320
553
 
321
554
  elif style == 1:
322
555
 
@@ -330,7 +563,11 @@ def community_partition(master_list, weighted = False, style = 0):
330
563
  # Perform Louvain community detection
331
564
  partition = community_louvain.best_partition(G)
332
565
 
333
- return partition, None
566
+ if dostats:
567
+
568
+ stats = calculate_louvain_network_stats(G, partition)
569
+
570
+ return partition, None, stats
334
571
 
335
572
  elif style == 0 and weighted:
336
573
 
@@ -357,7 +594,13 @@ def community_partition(master_list, weighted = False, style = 0):
357
594
  for node in com:
358
595
  output[node] = i + 1
359
596
 
360
- return output, normalized_weights
597
+ if dostats:
598
+
599
+ stats = calculate_network_stats(G, communities)
600
+
601
+
602
+
603
+ return output, normalized_weights, stats
361
604
 
362
605
  elif style == 0:
363
606
 
@@ -370,9 +613,6 @@ def community_partition(master_list, weighted = False, style = 0):
370
613
  # Add edges from the DataFrame
371
614
  G.add_edges_from(edges)
372
615
 
373
- # Detect communities using label propagation
374
- communities = list(community.label_propagation_communities(G))
375
- output = {}
376
616
 
377
617
  # Detect communities using label propagation
378
618
  communities = list(community.label_propagation_communities(G))
@@ -381,7 +621,11 @@ def community_partition(master_list, weighted = False, style = 0):
381
621
  for node in com:
382
622
  output[node] = i + 1
383
623
 
384
- return output, None
624
+ if dostats:
625
+
626
+ stats = calculate_network_stats(G, communities)
627
+
628
+ return output, None, stats
385
629
 
386
630
 
387
631
 
nettracer3d/morphology.py CHANGED
@@ -5,6 +5,7 @@ from scipy.ndimage import zoom
5
5
  import multiprocessing as mp
6
6
  from concurrent.futures import ThreadPoolExecutor, as_completed
7
7
  import tifffile
8
+ from functools import partial
8
9
  import pandas as pd
9
10
 
10
11
  def get_reslice_indices(args):
@@ -70,7 +71,7 @@ def _get_node_edge_dict(label_array, edge_array, label, dilate_xy, dilate_z, cor
70
71
 
71
72
  # Create a boolean mask where elements with the specified label are True
72
73
  label_array = label_array == label
73
- dil_array = nettracer.dilate_3D(label_array, dilate_xy, dilate_xy, dilate_z) #Dilate the label to see where the dilated label overlaps
74
+ dil_array = nettracer.dilate_3D_recursive(label_array, dilate_xy, dilate_xy, dilate_z) #Dilate the label to see where the dilated label overlaps
74
75
 
75
76
  if cores == 0: #For getting the volume of objects. Cores presumes you want the 'core' included in the interaction.
76
77
  edge_array = edge_array * dil_array # Filter the edges by the label in question
@@ -189,25 +190,161 @@ def quantify_edge_node(nodes, edges, search = 0, xy_scale = 1, z_scale = 1, core
189
190
 
190
191
  return edge_quants
191
192
 
192
- def calculate_voxel_volumes(array, xy_scale = 1, z_scale = 1):
193
+
194
+ def calculate_voxel_volumes(array, xy_scale=1, z_scale=1):
193
195
  """
194
- Calculate voxel volumes for each uniquely labelled object in a 3D numpy array.
196
+ Calculate voxel volumes for each uniquely labelled object in a 3D numpy array
197
+ using parallel processing.
195
198
 
196
199
  Args:
197
200
  array: 3D numpy array where different objects are marked with different integer labels
201
+ xy_scale: Scale factor for x and y dimensions
202
+ z_scale: Scale factor for z dimension
198
203
 
199
204
  Returns:
200
205
  Dictionary mapping object labels to their voxel volumes
201
206
  """
207
+
208
+ def process_volume_chunk(chunk_data, labels, xy_scale, z_scale):
209
+ """
210
+ Calculate volumes for a chunk of the array.
211
+
212
+ Args:
213
+ chunk_data: 3D numpy array chunk
214
+ labels: Array of unique labels to process
215
+ xy_scale: Scale factor for x and y dimensions
216
+ z_scale: Scale factor for z dimension
217
+
218
+ Returns:
219
+ Dictionary of label: volume pairs for this chunk
220
+ """
221
+ chunk_volumes = {}
222
+ for label in labels:
223
+ volume = np.count_nonzero(chunk_data == label) * (xy_scale**2) * z_scale
224
+ if volume > 0: # Only include if object exists in this chunk
225
+ chunk_volumes[label] = volume
226
+ return chunk_volumes
227
+
202
228
  # Get unique labels (excluding 0 which typically represents background)
203
229
  labels = np.unique(array)
204
230
  if len(labels) == 2:
205
231
  array, _ = nettracer.label_objects(array)
206
232
  labels = np.unique(array)
207
-
208
233
  labels = labels[labels != 0] # Remove background label if present
209
234
 
210
- # Create dictionary of label: volume pairs
211
- volumes = {label: (np.count_nonzero(array == label) * (xy_scale**2) * z_scale) for label in labels}
235
+ if len(labels) == 0:
236
+ return {}
237
+
238
+ # Get number of CPU cores
239
+ num_cores = mp.cpu_count()
240
+
241
+ # Calculate chunk size along y-axis
242
+ chunk_size = array.shape[1] // num_cores
243
+ if chunk_size < 1:
244
+ chunk_size = 1
245
+
246
+ # Create chunks along y-axis
247
+ chunks = []
248
+ for i in range(0, array.shape[1], chunk_size):
249
+ end = min(i + chunk_size, array.shape[1])
250
+ chunks.append(array[:, i:end, :])
251
+
252
+ # Process chunks in parallel
253
+ process_func = partial(process_volume_chunk,
254
+ labels=labels,
255
+ xy_scale=xy_scale,
256
+ z_scale=z_scale)
257
+
258
+ volumes = {}
259
+ with ThreadPoolExecutor(max_workers=num_cores) as executor:
260
+ chunk_results = list(executor.map(process_func, chunks))
261
+
262
+ # Combine results from all chunks
263
+ for chunk_volumes in chunk_results:
264
+ for label, volume in chunk_volumes.items():
265
+ if label in volumes:
266
+ volumes[label] += volume
267
+ else:
268
+ volumes[label] = volume
269
+
270
+ return volumes
271
+
272
+
273
+ def search_neighbor_ids(nodes, targets, id_dict, neighborhood_dict, totals, search, xy_scale, z_scale):
274
+
275
+
276
+ targets = np.isin(nodes, targets)
277
+ targets = nettracer.binarize(targets)
278
+
279
+ dilate_xy, dilate_z = nettracer.dilation_length_to_pixels(xy_scale, z_scale, search, search)
280
+ print(f"Dilation parameters - xy: {dilate_xy}, z: {dilate_z}")
281
+
282
+ targets = nettracer.dilate_3D_recursive(targets, dilate_xy, dilate_xy, dilate_z)
283
+ targets = targets != 0
284
+ print(f"After dilation - targets shape: {targets.shape}, sum: {np.sum(targets)}")
285
+
286
+ targets = targets * nodes
287
+ print(f"After multiplication with nodes - unique values in targets: {np.unique(targets)}")
288
+
289
+ unique, counts = np.unique(targets, return_counts=True)
290
+ count_dict = dict(zip(unique, counts))
291
+ print(f"Initial count_dict: {count_dict}")
292
+
293
+ del count_dict[0]
294
+ print(f"count_dict after removing zeros: {count_dict}")
295
+
296
+ unique, counts = np.unique(nodes, return_counts=True)
297
+ total_dict = dict(zip(unique, counts))
298
+ print(f"Initial total_dict: {total_dict}")
212
299
 
213
- return volumes
300
+ del total_dict[0]
301
+ print(f"total_dict after removing zeros: {total_dict}")
302
+
303
+ print(f"id_dict keys: {list(id_dict.keys())}")
304
+ print(f"Initial neighborhood_dict: {neighborhood_dict}")
305
+ print(f"Initial totals: {totals}")
306
+
307
+ for label in total_dict:
308
+ if label in id_dict:
309
+ if label in count_dict:
310
+ neighborhood_dict[id_dict[label]] += count_dict[label]
311
+ print(f"Updated neighborhood_dict[{id_dict[label]}] with count {count_dict[label]}")
312
+ totals[id_dict[label]] += total_dict[label]
313
+ print(f"Updated totals[{id_dict[label]}] with total {total_dict[label]}")
314
+
315
+ print(f"Final neighborhood_dict: {neighborhood_dict}")
316
+ print(f"Final totals: {totals}")
317
+ return neighborhood_dict, totals
318
+
319
+
320
+
321
+
322
+
323
+
324
+ def get_search_space_dilate(target, centroids, id_dict, search, scaling = 1):
325
+
326
+ ymax = np.max(centroids[:, 0])
327
+ xmax = np.max(centroids[:, 1])
328
+
329
+
330
+ array = np.zeros((ymax + 1, xmax + 1))
331
+
332
+ for i, row in enumerate(centroids):
333
+ if i + 1 in id_dict and target in id_dict[i+1]:
334
+ y = row[0] # get y coordinate
335
+ x = row[1] # get x coordinate
336
+ array[y, x] = 1 # set value at that coordinate
337
+
338
+
339
+ #array = downsample(array, 3)
340
+ array = dilate_2D(array, search, search)
341
+
342
+ search_space = np.count_nonzero(array) * scaling * scaling
343
+
344
+ tifffile.imwrite('search_regions.tif', array)
345
+
346
+ print(f"Search space is {search_space}")
347
+
348
+
349
+
350
+ return array