nettracer3d 0.8.3__py3-none-any.whl → 0.8.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nettracer3d/proximity.py CHANGED
@@ -88,7 +88,7 @@ def process_label(args):
88
88
  print(f"Processing node {label}")
89
89
 
90
90
  # Get the pre-computed bounding box for this label
91
- slice_obj = bounding_boxes[label-1] # -1 because label numbers start at 1
91
+ slice_obj = bounding_boxes[int(label)-1] # -1 because label numbers start at 1
92
92
  if slice_obj is None:
93
93
  return None, None
94
94
 
@@ -113,7 +113,7 @@ def create_node_dictionary(nodes, num_nodes, dilate_xy, dilate_z, targets=None,
113
113
  with ThreadPoolExecutor(max_workers=mp.cpu_count()) as executor:
114
114
  # Create args list with bounding_boxes included
115
115
  args_list = [(nodes, i, dilate_xy, dilate_z, array_shape, bounding_boxes)
116
- for i in range(1, num_nodes + 1)]
116
+ for i in range(1, int(num_nodes) + 1)]
117
117
 
118
118
  if targets is not None:
119
119
  args_list = [tup for tup in args_list if tup[1] in targets]
@@ -144,7 +144,7 @@ def find_shared_value_pairs(input_dict):
144
144
 
145
145
  #Related to kdtree centroid searching:
146
146
 
147
- def populate_array(centroids, clip=False):
147
+ def populate_array(centroids, clip=False, shape = None):
148
148
  """
149
149
  Create a 3D array from centroid coordinates.
150
150
 
@@ -164,12 +164,16 @@ def populate_array(centroids, clip=False):
164
164
  coords = np.array(list(centroids.values()))
165
165
  # Round coordinates to nearest integer
166
166
  coords = np.round(coords).astype(int)
167
- min_coords = coords.min(axis=0)
168
- max_coords = coords.max(axis=0)
167
+ if shape is None:
168
+ min_coords = coords.min(axis=0)
169
+ max_coords = coords.max(axis=0)
170
+ else:
171
+ min_coords = [0, 0, 0]
172
+ max_coords = shape
169
173
 
170
174
  # Check for negative coordinates only if not clipping
171
- if not clip and np.any(min_coords < 0):
172
- raise ValueError("Negative coordinates found in centroids")
175
+ #if not clip and np.any(min_coords < 0):
176
+ #raise ValueError("Negative coordinates found in centroids")
173
177
 
174
178
  # Apply clipping if requested
175
179
  clipped_centroids = {}
@@ -183,10 +187,15 @@ def populate_array(centroids, clip=False):
183
187
  for i, obj_id in enumerate(centroids.keys()):
184
188
  clipped_centroids[obj_id] = coords[i].tolist()
185
189
 
186
- # Create array
187
- array = np.zeros((max_coords[0] + 1,
188
- max_coords[1] + 1,
189
- max_coords[2] + 1), dtype=int)
190
+ if shape is None:
191
+ # Create array
192
+ array = np.zeros((max_coords[0] + 1,
193
+ max_coords[1] + 1,
194
+ max_coords[2] + 1), dtype=int)
195
+ else:
196
+ array = np.zeros((max_coords[0],
197
+ max_coords[1],
198
+ max_coords[2]), dtype=int)
190
199
 
191
200
  # Populate array with (possibly clipped) rounded coordinates
192
201
  for i, (obj_id, coord) in enumerate(centroids.items()):
@@ -194,7 +203,10 @@ def populate_array(centroids, clip=False):
194
203
  z, y, x = coords[i] # Use pre-computed clipped coordinates
195
204
  else:
196
205
  z, y, x = np.round([coord[0], coord[1], coord[2]]).astype(int)
197
- array[z, y, x] = obj_id
206
+ try:
207
+ array[z, y, x] = obj_id
208
+ except:
209
+ pass
198
210
 
199
211
  if clip:
200
212
  return array, clipped_centroids
@@ -328,7 +340,7 @@ def find_neighbors_kdtree(radius, centroids=None, array=None, targets=None, n_jo
328
340
 
329
341
  if targets is None:
330
342
  # Original behavior: find neighbors for all points
331
- query_points = points
343
+ query_points = np.array(points)
332
344
  query_indices = list(range(len(points)))
333
345
  else:
334
346
  # Convert targets to set for O(1) lookup
@@ -354,7 +366,6 @@ def find_neighbors_kdtree(radius, centroids=None, array=None, targets=None, n_jo
354
366
  # Convert to numpy array for querying
355
367
  query_points = np.array(target_points)
356
368
  query_indices = target_indices
357
-
358
369
 
359
370
  # Handle case where no target values were found
360
371
  if len(query_points) == 0:
@@ -373,6 +384,7 @@ def find_neighbors_kdtree(radius, centroids=None, array=None, targets=None, n_jo
373
384
 
374
385
  # Skip parallelization for small datasets or when n_jobs=1
375
386
  if n_jobs == 1 or len(neighbor_indices) < 100:
387
+ #if True:
376
388
  # Sequential processing (original logic with max_neighbors support)
377
389
  output = []
378
390
  for i, neighbors in enumerate(neighbor_indices):
@@ -599,10 +611,10 @@ def convert_centroids_to_array(centroids_list, xy_scale = 1, z_scale = 1):
599
611
  points_array[:, 1:] = points_array[:, 1:] * xy_scale #account for scaling
600
612
 
601
613
  points_array[:, 0] = points_array[:, 0] * z_scale #account for scaling
602
-
614
+
603
615
  return points_array
604
616
 
605
- def generate_r_values(points_array, step_size, bounds = None, dim = 2, max_proportion=0.5):
617
+ def generate_r_values(points_array, step_size, bounds = None, dim = 2, max_proportion=0.5, max_r = None):
606
618
  """
607
619
  Generate an array of r values based on point distribution and step size.
608
620
 
@@ -634,18 +646,27 @@ def generate_r_values(points_array, step_size, bounds = None, dim = 2, max_propo
634
646
  min_coords = np.array([0,0,0])
635
647
  dimensions = max_coords - min_coords
636
648
 
637
- max_dimension = np.max(dimensions)
649
+ if 1 in dimensions:
650
+ dimensions = np.delete(dimensions, 0) #Presuming 2D data
651
+
652
+ min_dimension = np.min(dimensions) #Biased for smaller dimension now for safety
638
653
 
639
654
  # Calculate maximum r value (typically half the shortest side for 2D,
640
655
  # or scaled by max_proportion for general use)
641
- max_r = max_dimension * max_proportion
656
+ if max_r is None:
657
+ max_r = min_dimension * max_proportion
658
+ if max_proportion < 1:
659
+ print(f"Omitting search radii beyond {max_r}")
660
+ else:
661
+ print(f"Omitting search radii beyond {max_r} (to keep analysis within the mask)")
662
+
642
663
 
643
664
  # Generate r values from 0 to max_r with step_size increments
644
665
  num_steps = int(max_r / step_size)
645
666
  r_values = np.linspace(step_size, max_r, num_steps)
646
667
 
647
668
  if r_values[0] == 0:
648
- np.delete(r_values, 0)
669
+ r_values = np.delete(r_values, 0)
649
670
 
650
671
  return r_values
651
672
 
@@ -662,7 +683,7 @@ def convert_augmented_array_to_points(augmented_array):
662
683
  # Extract just the coordinate columns (all except first column)
663
684
  return augmented_array[:, 1:]
664
685
 
665
- def optimized_ripleys_k(reference_points, subset_points, r_values, bounds=None, edge_correction=True, dim = 2, is_subset = False):
686
+ def optimized_ripleys_k(reference_points, subset_points, r_values, bounds=None, dim = 2, is_subset = False, volume = None, n_subset = None):
666
687
  """
667
688
  Optimized computation of Ripley's K function using KD-Tree with simplified but effective edge correction.
668
689
 
@@ -677,7 +698,8 @@ def optimized_ripleys_k(reference_points, subset_points, r_values, bounds=None,
677
698
  K_values: numpy array of K values corresponding to r_values
678
699
  """
679
700
  n_ref = len(reference_points)
680
- n_subset = len(subset_points)
701
+ if n_subset is None:
702
+ n_subset = len(subset_points)
681
703
 
682
704
  # Determine bounds if not provided
683
705
  if bounds is None:
@@ -688,10 +710,12 @@ def optimized_ripleys_k(reference_points, subset_points, r_values, bounds=None,
688
710
  # Calculate volume of study area
689
711
  min_bounds, max_bounds = bounds
690
712
  sides = max_bounds - min_bounds
691
- if dim == 2:
692
- volume = sides[0] * sides[1]
693
- else:
694
- volume = np.prod(sides)
713
+
714
+ if volume is None:
715
+ if dim == 2:
716
+ volume = sides[0] * sides[1]
717
+ else:
718
+ volume = np.prod(sides)
695
719
 
696
720
  # Point intensity (points per unit volume)
697
721
  intensity = n_ref / volume
@@ -705,94 +729,19 @@ def optimized_ripleys_k(reference_points, subset_points, r_values, bounds=None,
705
729
  # For each r value, compute cumulative counts
706
730
  for i, r in enumerate(r_values):
707
731
  total_count = 0
708
-
732
+
709
733
  # Query the tree for all points within radius r of each subset point
710
734
  for j, point in enumerate(subset_points):
711
735
  # Find all reference points within radius r
712
736
  indices = tree.query_ball_point(point, r)
713
737
  count = len(indices)
714
-
715
- # Apply edge correction if needed
716
- if edge_correction:
717
- # Calculate edge correction weight
718
- weight = 1.0
719
-
720
- if dim == 2:
721
- # For 2D - check all four boundaries
722
- x, y = point
723
-
724
- # Distances to all boundaries
725
- x_min_dist = x - min_bounds[0]
726
- x_max_dist = max_bounds[0] - x
727
- y_min_dist = y - min_bounds[1]
728
- y_max_dist = max_bounds[1] - y
729
-
730
- proportion_in = 1.0
731
- # Apply correction for each boundary if needed
732
- if x_min_dist < r:
733
- proportion_in -= 0.5 * (1 - x_min_dist/r)
734
- if x_max_dist < r:
735
- proportion_in -= 0.5 * (1 - x_max_dist/r)
736
- if y_min_dist < r:
737
- proportion_in -= 0.5 * (1 - y_min_dist/r)
738
- if y_max_dist < r:
739
- proportion_in -= 0.5 * (1 - y_max_dist/r)
740
-
741
- # Corner correction
742
- if ((x_min_dist < r and y_min_dist < r) or
743
- (x_min_dist < r and y_max_dist < r) or
744
- (x_max_dist < r and y_min_dist < r) or
745
- (x_max_dist < r and y_max_dist < r)):
746
- proportion_in += 0.1 # Add a small boost for corners
747
-
748
- elif dim == 3:
749
- # For 3D - check all six boundaries
750
- x, y, z = point
751
-
752
- # Distances to all boundaries
753
- x_min_dist = x - min_bounds[0]
754
- x_max_dist = max_bounds[0] - x
755
- y_min_dist = y - min_bounds[1]
756
- y_max_dist = max_bounds[1] - y
757
- z_min_dist = z - min_bounds[2]
758
- z_max_dist = max_bounds[2] - z
759
-
760
- proportion_in = 1.0
761
- # Apply correction for each boundary if needed
762
- if x_min_dist < r:
763
- proportion_in -= 0.25 * (1 - x_min_dist/r)
764
- if x_max_dist < r:
765
- proportion_in -= 0.25 * (1 - x_max_dist/r)
766
- if y_min_dist < r:
767
- proportion_in -= 0.25 * (1 - y_min_dist/r)
768
- if y_max_dist < r:
769
- proportion_in -= 0.25 * (1 - y_max_dist/r)
770
- if z_min_dist < r:
771
- proportion_in -= 0.25 * (1 - z_min_dist/r)
772
- if z_max_dist < r:
773
- proportion_in -= 0.25 * (1 - z_max_dist/r)
774
738
 
775
- # Corner correction for 3D (if point is near a corner)
776
- num_close_edges = (
777
- (x_min_dist < r) + (x_max_dist < r) +
778
- (y_min_dist < r) + (y_max_dist < r) +
779
- (z_min_dist < r) + (z_max_dist < r)
780
- )
781
- if num_close_edges >= 2:
782
- proportion_in += 0.05 * num_close_edges # Stronger boost for more edges
783
-
784
- # Ensure proportion_in stays within reasonable bounds
785
- proportion_in = max(0.1, min(1.0, proportion_in))
786
- weight = 1.0 / proportion_in
787
-
788
- count *= weight
789
-
790
739
  total_count += count
791
-
740
+
792
741
  # Subtract self-counts if points appear in both sets
793
742
  if is_subset or np.array_equal(reference_points, subset_points):
794
743
  total_count -= n_ref # Subtract all self-counts
795
-
744
+
796
745
  # Normalize
797
746
  K_values[i] = total_count / (n_subset * intensity)
798
747