nettracer3d 0.8.8__py3-none-any.whl → 0.8.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nettracer3d might be problematic. Click here for more details.

@@ -70,67 +70,170 @@ class InteractiveSegmenter:
70
70
  self.previous_z_back = None
71
71
 
72
72
 
73
- def process_chunk(self, chunk_coords):
74
- """Process a chunk staying in CuPy as much as possible"""
73
+ def get_minimal_chunks_for_coordinates(self, coordinates_by_z):
74
+ """
75
+ Get minimal set of 2D chunks needed to cover the given coordinates
76
+ Uses same chunking logic as create_2d_chunks()
77
+ """
78
+ MAX_CHUNK_SIZE = self.twod_chunk_size
79
+ needed_chunks = {}
75
80
 
76
- foreground_coords = [] # Keep as list of CuPy coordinates
77
- background_coords = []
81
+ for z in coordinates_by_z:
82
+ y_coords = [coord[0] for coord in coordinates_by_z[z]]
83
+ x_coords = [coord[1] for coord in coordinates_by_z[z]]
84
+
85
+ # Find bounding box of coordinates in this Z-slice
86
+ y_min, y_max = min(y_coords), max(y_coords)
87
+ x_min, x_max = min(x_coords), max(x_coords)
88
+
89
+ # Create chunks using same logic as create_2d_chunks
90
+ y_dim = self.image_3d.shape[1]
91
+ x_dim = self.image_3d.shape[2]
92
+ total_pixels = y_dim * x_dim
93
+
94
+ if total_pixels <= MAX_CHUNK_SIZE:
95
+ # Single chunk for entire Z slice
96
+ needed_chunks[z] = [[z, 0, y_dim, 0, x_dim]]
97
+ else:
98
+ # Multiple chunks - find which ones contain our coordinates
99
+ largest_dim = 'y' if y_dim >= x_dim else 'x'
100
+ num_divisions = int(cp.ceil(total_pixels / MAX_CHUNK_SIZE))
101
+
102
+ chunks_for_z = []
103
+
104
+ if largest_dim == 'y':
105
+ div_size = int(cp.ceil(y_dim / num_divisions))
106
+ for i in range(0, y_dim, div_size):
107
+ end_i = min(i + div_size, y_dim)
108
+ # Check if this chunk contains any of our coordinates
109
+ if any(i <= y <= end_i-1 for y in y_coords):
110
+ chunks_for_z.append([z, i, end_i, 0, x_dim])
111
+ else:
112
+ div_size = int(cp.ceil(x_dim / num_divisions))
113
+ for i in range(0, x_dim, div_size):
114
+ end_i = min(i + div_size, x_dim)
115
+ # Check if this chunk contains any of our coordinates
116
+ if any(i <= x <= end_i-1 for x in x_coords):
117
+ chunks_for_z.append([z, 0, y_dim, i, end_i])
118
+
119
+ needed_chunks[z] = chunks_for_z
120
+
121
+ return needed_chunks
78
122
 
123
+ def compute_features_for_chunk_2d(self, chunk_coords, speed):
124
+ """
125
+ Compute features for a 2D chunk
126
+ chunk_coords: [z, y_start, y_end, x_start, x_end]
127
+ """
128
+ z, y_start, y_end, x_start, x_end = chunk_coords
79
129
 
130
+ # Extract 2D subarray for this chunk
131
+ subarray_2d = self.image_3d[z, y_start:y_end, x_start:x_end]
132
+
133
+ # Compute features for this chunk
134
+ if speed:
135
+ feature_map = self.compute_feature_maps_gpu_2d(image_2d=subarray_2d)
136
+ else:
137
+ feature_map = self.compute_deep_feature_maps_gpu_2d(image_2d=subarray_2d)
138
+
139
+ return feature_map, (y_start, x_start) # Return offset for coordinate mapping
140
+
141
+
142
+ def process_chunk_updated(self, chunk_coords):
143
+ """Updated process_chunk with proper 2D chunking"""
144
+
145
+ foreground_coords = []
146
+ background_coords = []
147
+
80
148
  if self.realtimechunks is None:
149
+ # 3D processing (original logic unchanged)
81
150
  z_min, z_max = chunk_coords[0], chunk_coords[1]
82
151
  y_min, y_max = chunk_coords[2], chunk_coords[3]
83
152
  x_min, x_max = chunk_coords[4], chunk_coords[5]
84
153
 
85
- # Create meshgrid using CuPy - already good
86
154
  z_range = cp.arange(z_min, z_max)
87
155
  y_range = cp.arange(y_min, y_max)
88
156
  x_range = cp.arange(x_min, x_max)
89
157
 
90
- # More efficient way to create coordinates
91
158
  chunk_coords_array = cp.stack(cp.meshgrid(
92
159
  z_range, y_range, x_range, indexing='ij'
93
160
  )).reshape(3, -1).T
94
161
 
95
- # Keep as CuPy array instead of converting to list
96
162
  chunk_coords_gpu = chunk_coords_array
163
+
164
+ subarray = self.image_3d[z_min:z_max+1, y_min:y_max+1, x_min:x_max+1]
165
+
166
+ if self.use_two:
167
+ subarray = cp.squeeze(subarray)
168
+
169
+ if self.use_two and self.speed:
170
+ feature_map = self.compute_feature_maps_gpu_2d(image_2d=subarray)
171
+ elif self.use_two:
172
+ feature_map = self.compute_deep_feature_maps_gpu_2d(image_2d=subarray)
173
+ elif self.speed:
174
+ feature_map = self.compute_feature_maps_gpu(subarray)
175
+ else:
176
+ feature_map = self.compute_deep_feature_maps_gpu(subarray)
177
+
178
+ if self.use_two:
179
+ feature_map = cp.expand_dims(feature_map, axis=0)
180
+
181
+ local_coords = chunk_coords_gpu.copy()
182
+ local_coords[:, 0] -= z_min
183
+ local_coords[:, 1] -= y_min
184
+ local_coords[:, 2] -= x_min
185
+
186
+ features_gpu = feature_map[local_coords[:, 0], local_coords[:, 1], local_coords[:, 2]]
187
+
188
+ features_cpu = cp.asnumpy(features_gpu)
189
+ predictions = self.model.predict(features_cpu)
190
+
191
+ pred_mask = cp.array(predictions, dtype=bool)
192
+ foreground_coords = chunk_coords_gpu[pred_mask]
193
+ background_coords = chunk_coords_gpu[~pred_mask]
194
+
97
195
  else:
98
- # Convert list to CuPy array once
196
+ # 2D processing - compute features for chunk only (not full Z-slice)
99
197
  chunk_coords_gpu = cp.array(chunk_coords)
100
198
  z_coords = chunk_coords_gpu[:, 0]
101
199
  y_coords = chunk_coords_gpu[:, 1]
102
200
  x_coords = chunk_coords_gpu[:, 2]
103
201
 
104
- z_min, z_max = cp.min(z_coords).item(), cp.max(z_coords).item()
105
- y_min, y_max = cp.min(y_coords).item(), cp.max(y_coords).item()
106
- x_min, x_max = cp.min(x_coords).item(), cp.max(x_coords).item()
107
-
108
- # Extract subarray - already good
109
- subarray = self.image_3d[z_min:z_max+1, y_min:y_max+1, x_min:x_max+1]
110
-
111
- # Compute features
112
- if self.speed:
113
- feature_map = self.compute_feature_maps_gpu(subarray)
114
- else:
115
- feature_map = self.compute_deep_feature_maps_gpu(subarray)
116
-
117
- # Extract features more efficiently
118
- local_coords = chunk_coords_gpu.copy()
119
- local_coords[:, 0] -= z_min
120
- local_coords[:, 1] -= y_min
121
- local_coords[:, 2] -= x_min
122
-
123
- # Vectorized feature extraction
124
- features_gpu = feature_map[local_coords[:, 0], local_coords[:, 1], local_coords[:, 2]]
125
-
126
- features_cpu = cp.asnumpy(features_gpu)
127
- predictions = self.model.predict(features_cpu)
128
-
129
- # Keep coordinates as CuPy arrays
130
- pred_mask = cp.array(predictions, dtype=bool)
131
- foreground_coords = chunk_coords_gpu[pred_mask]
132
- background_coords = chunk_coords_gpu[~pred_mask]
133
-
202
+ z = int(cp.unique(z_coords)[0]) # All coordinates should have same Z
203
+
204
+ # Get chunk bounds
205
+ y_min, y_max = int(cp.min(y_coords)), int(cp.max(y_coords))
206
+ x_min, x_max = int(cp.min(x_coords)), int(cp.max(x_coords))
207
+
208
+ # Expand bounds slightly to ensure we capture the chunk properly
209
+ y_min = max(0, y_min)
210
+ x_min = max(0, x_min)
211
+ y_max = min(self.image_3d.shape[1], y_max + 1)
212
+ x_max = min(self.image_3d.shape[2], x_max + 1)
213
+
214
+ # Extract 2D subarray for this chunk
215
+ subarray_2d = self.image_3d[z, y_min:y_max, x_min:x_max]
216
+
217
+ # Compute features for just this chunk
218
+ if self.speed:
219
+ feature_map = self.compute_feature_maps_gpu_2d(image_2d=subarray_2d)
220
+ else:
221
+ feature_map = self.compute_deep_feature_maps_gpu_2d(image_2d=subarray_2d)
222
+
223
+ # Convert global coordinates to local chunk coordinates
224
+ local_y_coords = y_coords - y_min
225
+ local_x_coords = x_coords - x_min
226
+
227
+ # Extract features using local coordinates
228
+ features_gpu = feature_map[local_y_coords, local_x_coords]
229
+
230
+ features_cpu = cp.asnumpy(features_gpu)
231
+ predictions = self.model.predict(features_cpu)
232
+
233
+ pred_mask = cp.array(predictions, dtype=bool)
234
+ foreground_coords = chunk_coords_gpu[pred_mask]
235
+ background_coords = chunk_coords_gpu[~pred_mask]
236
+
134
237
  return foreground_coords, background_coords
135
238
 
136
239
  def twodim_coords(self, z, y_start, y_end, x_start, x_end):
@@ -373,7 +476,7 @@ class InteractiveSegmenter:
373
476
 
374
477
  # Stack all channel features
375
478
  return cp.concatenate(features_per_channel, axis=-1)
376
-
479
+
377
480
  # Pre-allocate result array
378
481
  num_features = len(self.sigmas) + len(self.dogs) + 2 # +2 for original image + gradient
379
482
  features = cp.empty(image_2d.shape + (num_features,), dtype=image_2d.dtype)
@@ -495,30 +598,25 @@ class InteractiveSegmenter:
495
598
  hyy = cupy_ndimage.gaussian_filter(gaussian_img, sigma=0, order=[2, 0], mode='reflect')
496
599
  hxy = cupy_ndimage.gaussian_filter(gaussian_img, sigma=0, order=[1, 1], mode='reflect')
497
600
 
498
- # Vectorized eigenvalue computation using cupy broadcasting
499
- # Create arrays with shape (d0, d1, 2, 2) for all 2D Hessian matrices
500
- shape = image_2d.shape
501
- hessian_matrices = cp.zeros(shape + (2, 2))
601
+ # Analytical eigenvalue computation for 2x2 symmetric matrices
602
+ # For matrix [[hxx, hxy], [hxy, hyy]], eigenvalues are:
603
+ # λ = (trace ± sqrt(trace² - 4*det)) / 2
502
604
 
503
- # Fill the symmetric 2D Hessian matrices
504
- hessian_matrices[..., 0, 0] = hxx
505
- hessian_matrices[..., 1, 1] = hyy
506
- hessian_matrices[..., 0, 1] = hessian_matrices[..., 1, 0] = hxy
605
+ trace = hxx + hyy
606
+ det = hxx * hyy - hxy * hxy
507
607
 
508
- # Reshape for batch eigenvalue computation
509
- original_shape = hessian_matrices.shape[:-2] # (d0, d1)
510
- batch_size = int(cp.prod(cp.array(original_shape)))
511
- hessian_batch = hessian_matrices.reshape(batch_size, 2, 2)
608
+ # Calculate discriminant and ensure it's non-negative
609
+ discriminant = trace * trace - 4 * det
610
+ discriminant = cp.maximum(discriminant, 0) # Handle numerical errors
512
611
 
513
- # Compute eigenvalues for all matrices at once using CuPy
514
- # Since Hessian matrices are symmetric, we can use eigvalsh
515
- eigenvalues_batch = cp.linalg.eigvalsh(hessian_batch)
612
+ sqrt_discriminant = cp.sqrt(discriminant)
516
613
 
517
- # Get only the largest eigenvalue for each matrix
518
- largest_eigenvalues = cp.max(eigenvalues_batch, axis=1)
614
+ # Calculate both eigenvalues
615
+ eigenval1 = (trace + sqrt_discriminant) / 2
616
+ eigenval2 = (trace - sqrt_discriminant) / 2
519
617
 
520
- # Reshape back to original spatial dimensions
521
- largest_eigenvalues = largest_eigenvalues.reshape(original_shape)
618
+ # Take the larger eigenvalue (most positive/least negative)
619
+ largest_eigenvalues = cp.maximum(eigenval1, eigenval2)
522
620
 
523
621
  # Add the largest eigenvalue as a feature
524
622
  features[..., feature_idx] = largest_eigenvalues
@@ -678,12 +776,12 @@ class InteractiveSegmenter:
678
776
 
679
777
  def extract_chunk_features_gpu(self, chunk_coords):
680
778
  """
681
- GPU version of feature extraction without prediction
682
- Returns CPU features and GPU coordinates for efficient batch processing
779
+ Updated GPU version of feature extraction with chunked 2D processing
780
+ Returns GPU features and GPU coordinates for efficient batch processing
683
781
  """
684
782
 
685
- if self.previewing or not self.use_two:
686
- # 3D processing
783
+ if not self.use_two:
784
+ # 3D processing (unchanged)
687
785
  if self.realtimechunks is None:
688
786
  z_min, z_max = chunk_coords[0], chunk_coords[1]
689
787
  y_min, y_max = chunk_coords[2], chunk_coords[3]
@@ -723,14 +821,10 @@ class InteractiveSegmenter:
723
821
 
724
822
  features_gpu = feature_map[local_coords[:, 0], local_coords[:, 1], local_coords[:, 2]]
725
823
 
726
- # Convert features to CPU for sklearn, convert coordinates to NumPy for final assignment
727
- #features_cpu = cp.asnumpy(features_gpu)
728
- #coords_cpu = cp.asnumpy(chunk_coords_gpu)
729
-
730
824
  return features_gpu, chunk_coords_gpu
731
825
 
732
826
  else:
733
- # 2D processing
827
+ # 2D processing - Updated to use chunked feature computation
734
828
  if len(chunk_coords) == 5:
735
829
  z = chunk_coords[0]
736
830
  y_start = chunk_coords[1]
@@ -738,20 +832,25 @@ class InteractiveSegmenter:
738
832
  x_start = chunk_coords[3]
739
833
  x_end = chunk_coords[4]
740
834
 
741
- # Generate coordinates for this slice
835
+ # Generate coordinates for this chunk
742
836
  coords_array = self.twodim_coords(z, y_start, y_end, x_start, x_end)
743
837
 
744
- # Get feature map for this z-slice
745
- feature_map = self.get_feature_map_slice(z, self.speed, True)
838
+ # NEW: Compute features for just this chunk instead of full Z-slice
839
+ # Extract 2D subarray for this chunk
840
+ subarray_2d = self.image_3d[z, y_start:y_end, x_start:x_end]
746
841
 
747
- # Extract features using GPU operations
748
- y_indices = coords_array[:, 1]
749
- x_indices = coords_array[:, 2]
750
- features_gpu = feature_map[y_indices, x_indices]
842
+ # Compute features for this chunk only
843
+ if self.speed:
844
+ feature_map = self.compute_feature_maps_gpu_2d(image_2d=subarray_2d)
845
+ else:
846
+ feature_map = self.compute_deep_feature_maps_gpu_2d(image_2d=subarray_2d)
847
+
848
+ # Convert global coordinates to local chunk coordinates
849
+ y_indices = coords_array[:, 1] - y_start # Local Y coordinates
850
+ x_indices = coords_array[:, 2] - x_start # Local X coordinates
751
851
 
752
- # Convert features to CPU for sklearn, convert coordinates to NumPy for final assignment
753
- #features_cpu = cp.asnumpy(features_gpu)
754
- #coords_cpu = cp.asnumpy(coords_array)
852
+ # Extract features using local coordinates
853
+ features_gpu = feature_map[y_indices, x_indices]
755
854
 
756
855
  return features_gpu, coords_array
757
856
 
@@ -790,51 +889,54 @@ class InteractiveSegmenter:
790
889
 
791
890
  def get_realtime_chunks_2d(self, chunk_size=None):
792
891
  """
793
- Create square chunks with 1 z-thickness (2D chunks across XY planes)
892
+ Create chunks with 1 z-thickness (2D chunks across XY planes)
893
+ Now uses the same logic as create_2d_chunks for consistency
794
894
  """
795
895
 
796
- if chunk_size is None:
797
- chunk_size = int(cp.sqrt(self.twod_chunk_size))
798
-
799
- # Determine if we need to chunk XY planes
800
- small_dims = (self.image_3d.shape[1] <= chunk_size and
801
- self.image_3d.shape[2] <= chunk_size)
802
- few_z = self.image_3d.shape[0] <= 100 # arbitrary threshold
803
-
804
- # If small enough, each Z is one chunk
805
- if small_dims and few_z:
806
- chunk_size_xy = max(self.image_3d.shape[1], self.image_3d.shape[2])
807
- else:
808
- chunk_size_xy = chunk_size
809
-
810
- # Calculate chunks for XY plane
811
- y_chunks = (self.image_3d.shape[1] + chunk_size_xy - 1) // chunk_size_xy
812
- x_chunks = (self.image_3d.shape[2] + chunk_size_xy - 1) // chunk_size_xy
896
+ MAX_CHUNK_SIZE = self.twod_chunk_size
813
897
 
814
898
  # Populate chunk dictionary
815
899
  chunk_dict = {}
816
900
 
817
- # Create chunks for each Z plane (single Z thickness)
901
+ # Create chunks for each Z plane using the same logic as create_2d_chunks
818
902
  for z in range(self.image_3d.shape[0]):
819
- if small_dims:
903
+ y_dim = self.image_3d.shape[1]
904
+ x_dim = self.image_3d.shape[2]
905
+ total_pixels = y_dim * x_dim
906
+
907
+ if total_pixels <= MAX_CHUNK_SIZE:
908
+ # Single chunk for entire Z slice
820
909
  chunk_dict[(z, 0, 0)] = {
821
- 'coords': [0, self.image_3d.shape[1], 0, self.image_3d.shape[2]],
910
+ 'coords': [0, y_dim, 0, x_dim], # [y_start, y_end, x_start, x_end]
822
911
  'processed': False,
823
- 'z': z # Keep for backward compatibility
912
+ 'z': z
824
913
  }
825
914
  else:
826
- # Multiple chunks per Z plane
827
- for y_chunk in range(y_chunks):
828
- for x_chunk in range(x_chunks):
829
- y_start = y_chunk * chunk_size_xy
830
- x_start = x_chunk * chunk_size_xy
831
- y_end = min(y_start + chunk_size_xy, self.image_3d.shape[1])
832
- x_end = min(x_start + chunk_size_xy, self.image_3d.shape[2])
833
-
834
- chunk_dict[(z, y_start, x_start)] = {
835
- 'coords': [y_start, y_end, x_start, x_end],
915
+ # Multiple chunks per Z plane - divide along largest dimension
916
+ largest_dim = 'y' if y_dim >= x_dim else 'x'
917
+ num_divisions = int(cp.ceil(total_pixels / MAX_CHUNK_SIZE))
918
+
919
+ if largest_dim == 'y':
920
+ # Divide along Y dimension
921
+ div_size = int(cp.ceil(y_dim / num_divisions))
922
+ for i in range(0, y_dim, div_size):
923
+ end_i = min(i + div_size, y_dim)
924
+ # Use (z, y_start, x_start) as key for consistency
925
+ chunk_dict[(z, i, 0)] = {
926
+ 'coords': [i, end_i, 0, x_dim], # [y_start, y_end, x_start, x_end]
836
927
  'processed': False,
837
- 'z': z # Keep for backward compatibility
928
+ 'z': z
929
+ }
930
+ else:
931
+ # Divide along X dimension
932
+ div_size = int(cp.ceil(x_dim / num_divisions))
933
+ for i in range(0, x_dim, div_size):
934
+ end_i = min(i + div_size, x_dim)
935
+ # Use (z, y_start, x_start) as key for consistency
936
+ chunk_dict[(z, 0, i)] = {
937
+ 'coords': [0, y_dim, i, end_i], # [y_start, y_end, x_start, x_end]
938
+ 'processed': False,
939
+ 'z': z
838
940
  }
839
941
 
840
942
  self.realtimechunks = chunk_dict
@@ -890,24 +992,16 @@ class InteractiveSegmenter:
890
992
 
891
993
 
892
994
  def segment_volume_realtime(self, gpu=True):
893
- """Segment volume in realtime using CuPy for GPU acceleration"""
995
+ """Updated realtime segmentation - no more feature map caching needed"""
894
996
  import cupy as cp
895
997
 
896
- #try:
897
- #from cuml.ensemble import RandomForestClassifier as cuRandomForestClassifier
898
- #gpu_ml_available = True
899
- #except:
900
- #print("Cannot find cuML, using CPU to segment instead...")
901
- #gpu_ml_available = False
902
- #gpu = False
903
-
904
998
  if self.realtimechunks is None:
905
999
  if not self.use_two:
906
1000
  self.get_realtime_chunks()
907
1001
  else:
908
1002
  self.get_realtime_chunks_2d()
909
1003
  else:
910
- for chunk_pos in self.realtimechunks: # chunk_pos is the (z, y_start, x_start) tuple
1004
+ for chunk_pos in self.realtimechunks:
911
1005
  self.realtimechunks[chunk_pos]['processed'] = False
912
1006
 
913
1007
  chunk_dict = self.realtimechunks
@@ -923,7 +1017,6 @@ class InteractiveSegmenter:
923
1017
  if pos[0] == curr_z and not info['processed']]
924
1018
 
925
1019
  if current_z_chunks:
926
- # Find nearest chunk in current Z plane using the chunk positions from the key
927
1020
  nearest = min(current_z_chunks,
928
1021
  key=lambda x: ((x[0][1] - curr_y) ** 2 +
929
1022
  (x[0][2] - curr_x) ** 2))
@@ -938,7 +1031,6 @@ class InteractiveSegmenter:
938
1031
 
939
1032
  if available_z:
940
1033
  target_z = available_z[0][0]
941
- # Find nearest chunk in target Z plane
942
1034
  z_chunks = [(pos, info) for pos, info in chunk_dict.items()
943
1035
  if pos[0] == target_z and not info['processed']]
944
1036
  nearest = min(z_chunks,
@@ -949,31 +1041,24 @@ class InteractiveSegmenter:
949
1041
  return None
950
1042
 
951
1043
  while True:
952
- # Find nearest unprocessed chunk using class attributes
953
1044
  chunk_idx = get_nearest_unprocessed_chunk(self)
954
1045
  if chunk_idx is None:
955
1046
  break
956
1047
 
957
- # Process the chunk directly
958
1048
  chunk = chunk_dict[chunk_idx]
959
1049
  chunk['processed'] = True
960
- coords = chunk['coords']
1050
+ coords = chunk['coords'] # [y_start, y_end, x_start, x_end]
1051
+ z = chunk['z']
961
1052
 
962
- # Use CuPy for meshgrid
963
- coords_array = cp.stack(cp.meshgrid(
964
- cp.array([chunk['z']]),
965
- cp.arange(coords[0], coords[1]),
966
- cp.arange(coords[2], coords[3]),
967
- indexing='ij'
968
- )).reshape(3, -1).T
969
-
970
- # Convert to CPU for further processing - add cp.asnumpy() here
971
- coords = list(map(tuple, cp.asnumpy(coords_array)))
1053
+ # Generate coordinates for this chunk
1054
+ coords_array = self.twodim_coords(z, coords[0], coords[1], coords[2], coords[3])
1055
+
1056
+ # Convert to CPU for processing
1057
+ coords_list = list(map(tuple, cp.asnumpy(coords_array)))
972
1058
 
973
- # Process the chunk directly based on whether GPU is available
974
- fore, back = self.process_chunk(coords)
1059
+ # Process the chunk - now computes features only for this chunk
1060
+ fore, back = self.process_chunk_updated(coords_list)
975
1061
 
976
- # Yield the results
977
1062
  yield cp.asnumpy(fore), cp.asnumpy(back)
978
1063
 
979
1064
 
@@ -1028,42 +1113,73 @@ class InteractiveSegmenter:
1028
1113
  self.use_two = False
1029
1114
 
1030
1115
  if use_two:
1031
- if not self.use_two: #Clarifies if we need to redo feature cache for 2D
1116
+ if not self.use_two:
1032
1117
  self.use_two = True
1033
1118
  self.two_slices = []
1034
1119
  foreground_array = cp.asarray(foreground_array)
1035
1120
 
1036
1121
  # Get foreground coordinates and features
1037
1122
  z_fore, y_fore, x_fore = cp.where(foreground_array == 1)
1038
- # Keep as CuPy arrays but convert to regular Python types for dictionary keys
1039
1123
  fore_coords = [(int(z), int(y), int(x)) for z, y, x in zip(z_fore, y_fore, x_fore)]
1040
1124
 
1041
1125
  # Get background coordinates and features
1042
1126
  z_back, y_back, x_back = cp.where(foreground_array == 2)
1043
- # Keep as CuPy arrays but convert to regular Python types for dictionary keys
1044
1127
  back_coords = [(int(z), int(y), int(x)) for z, y, x in zip(z_back, y_back, x_back)]
1045
1128
 
1046
1129
  foreground_features = []
1047
1130
  background_features = []
1131
+
1132
+ # Organize coordinates by Z
1048
1133
  z_fores = self.organize_by_z(fore_coords)
1049
1134
  z_backs = self.organize_by_z(back_coords)
1050
- slices = set(list(z_fores.keys()) + list(z_backs.keys()))
1051
-
1052
- for z in slices:
1053
- current_map = self.get_feature_map_slice(z, speed, use_gpu)
1054
- if z in z_fores:
1055
- for y, x in z_fores[z]:
1056
- # Get the feature vector for this foreground point
1057
- feature_vector = current_map[y, x]
1058
- # Add to our collection
1059
- foreground_features.append(cp.asnumpy(feature_vector))
1060
- if z in z_backs:
1061
- for y, x in z_backs[z]:
1062
- # Get the feature vector for this background point
1063
- feature_vector = current_map[y, x]
1064
- # Add to our collection
1065
- background_features.append(cp.asnumpy(feature_vector))
1066
-
1135
+
1136
+ # Combine all Z-slices that have coordinates
1137
+ all_z_coords = {}
1138
+ for z in z_fores:
1139
+ if z not in all_z_coords:
1140
+ all_z_coords[z] = []
1141
+ all_z_coords[z].extend(z_fores[z])
1142
+ for z in z_backs:
1143
+ if z not in all_z_coords:
1144
+ all_z_coords[z] = []
1145
+ all_z_coords[z].extend(z_backs[z])
1146
+
1147
+ # Get minimal chunks needed to cover all coordinates
1148
+ needed_chunks = self.get_minimal_chunks_for_coordinates(all_z_coords)
1149
+
1150
+ # Process each chunk and extract features
1151
+ for z in needed_chunks:
1152
+ for chunk_coords in needed_chunks[z]:
1153
+ # Compute features for this chunk
1154
+ feature_map, (y_offset, x_offset) = self.compute_features_for_chunk_2d(chunk_coords, speed)
1155
+
1156
+ # Extract foreground features from this chunk
1157
+ if z in z_fores:
1158
+ for y, x in z_fores[z]:
1159
+ # Check if this coordinate is in the current chunk
1160
+ y_start, y_end = chunk_coords[1], chunk_coords[2]
1161
+ x_start, x_end = chunk_coords[3], chunk_coords[4]
1162
+
1163
+ if y_start <= y < y_end and x_start <= x < x_end:
1164
+ # Convert global coordinates to local chunk coordinates
1165
+ local_y = y - y_offset
1166
+ local_x = x - x_offset
1167
+ feature_vector = feature_map[local_y, local_x]
1168
+ foreground_features.append(cp.asnumpy(feature_vector))
1169
+
1170
+ # Extract background features from this chunk
1171
+ if z in z_backs:
1172
+ for y, x in z_backs[z]:
1173
+ # Check if this coordinate is in the current chunk
1174
+ y_start, y_end = chunk_coords[1], chunk_coords[2]
1175
+ x_start, x_end = chunk_coords[3], chunk_coords[4]
1176
+
1177
+ if y_start <= y < y_end and x_start <= x < x_end:
1178
+ # Convert global coordinates to local chunk coordinates
1179
+ local_y = y - y_offset
1180
+ local_x = x - x_offset
1181
+ feature_vector = feature_map[local_y, local_x]
1182
+ background_features.append(cp.asnumpy(feature_vector))
1067
1183
  else:
1068
1184
 
1069
1185
  box_size = self.master_chunk
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nettracer3d
3
- Version: 0.8.8
3
+ Version: 0.8.9
4
4
  Summary: Scripts for intializing and analyzing networks from segmentations of three dimensional images.
5
5
  Author-email: Liam McLaughlin <liamm@wustl.edu>
6
6
  Project-URL: Documentation, https://nettracer3d.readthedocs.io/en/latest/
@@ -110,6 +110,6 @@ McLaughlin, L., Zhang, B., Sharma, S. et al. Three dimensional multiscalar neuro
110
110
 
111
111
  NetTracer3D was developed by Liam McLaughlin while working under Dr. Sanjay Jain at Washington University School of Medicine.
112
112
 
113
- -- Version 0.8.8 Updates --
113
+ -- Version 0.8.9 Updates --
114
114
 
115
115
  * See Documentation Once Updated
@@ -6,20 +6,20 @@ nettracer3d/modularity.py,sha256=O9OeKbjD3v6gSFz9K2GzP6LsxlpQaPfeJbM1pyIEigw,217
6
6
  nettracer3d/morphology.py,sha256=jyDjYzrZ4LvI5jOyw8DLsxmo-i5lpqHsejYpW7Tq7Mo,19786
7
7
  nettracer3d/neighborhoods.py,sha256=VWubD5CBu9aNPhUea7FbAk9aTOq0FLKR9y-1VT7YkAc,39677
8
8
  nettracer3d/nettracer.py,sha256=TEV-nDmkcGP3UjWEor1LtEwm5mFBQu2nB0VRyz9Lt08,253649
9
- nettracer3d/nettracer_gui.py,sha256=fD03qJU3W5VHW4OeHZg0SnCu4k6km5_vxPoTbd1OAmg,540421
9
+ nettracer3d/nettracer_gui.py,sha256=aYU4r8HLLNiKPOpQIjdvRu2BRtvx18Z6OXa8N7g27Xg,546298
10
10
  nettracer3d/network_analysis.py,sha256=kBzsVaq4dZkMe0k-VGvQIUvM-tK0ZZ8bvb-wtsugZRQ,46150
11
11
  nettracer3d/network_draw.py,sha256=F7fw6Pcf4qWOhdKwLmhwqWdschbDlHzwCVolQC9imeU,14117
12
12
  nettracer3d/node_draw.py,sha256=kZcR1PekLg0riioNeGcALIXQyZ5PtHA_9MT6z7Zovdk,10401
13
13
  nettracer3d/painting.py,sha256=K_dwngivw80r-Yyg4btKMsWGn566ZE9PnrQl986uxJE,23497
14
14
  nettracer3d/proximity.py,sha256=bTaucn_InQ-v1GIk8ug-dXvDhIO59rnBMl5nIwAmNyw,35335
15
15
  nettracer3d/run.py,sha256=xYeaAc8FCx8MuzTGyL3NR3mK7WZzffAYAH23bNRZYO4,127
16
- nettracer3d/segmenter.py,sha256=O3xjCimPwoL8LM1w4cKVTB7saY-UptFuYC8qOIo3iWg,61637
17
- nettracer3d/segmenter_GPU.py,sha256=3CJLXCiySZP2dJbkpfBoXwAYbV4TnvIYAm6oQv-T-y4,63479
16
+ nettracer3d/segmenter.py,sha256=_ZBLe2eqV9c5yxuyfN69XR5mBoc_OD-CZjwdTCUV0YQ,67045
17
+ nettracer3d/segmenter_GPU.py,sha256=SGOsy8tSRlxAnD1OrJmjhiQ3k6CWiUFZZdrN6pkkgeU,69057
18
18
  nettracer3d/simple_network.py,sha256=dkG4jpc4zzdeuoaQobgGfL3PNo6N8dGKQ5hEEubFIvA,9947
19
19
  nettracer3d/smart_dilate.py,sha256=TvRUh6B4q4zIdCO1BWH-xgTdND5OUNmo99eyxG9oIAU,27145
20
- nettracer3d-0.8.8.dist-info/licenses/LICENSE,sha256=jnNT-yBeIAKAHpYthPvLeqCzJ6nSurgnKmloVnfsjCI,764
21
- nettracer3d-0.8.8.dist-info/METADATA,sha256=t1q1vmtqmJ1h1L67QdYCWDnGBB5XH6ZyQXhBc7gJN_o,7008
22
- nettracer3d-0.8.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
23
- nettracer3d-0.8.8.dist-info/entry_points.txt,sha256=Nx1rr_0QhJXDBHAQg2vcqCzLMKBzSHfwy3xwGkueVyc,53
24
- nettracer3d-0.8.8.dist-info/top_level.txt,sha256=zsYy9rZwirfCEOubolhee4TyzqBAL5gSUeFMzhFTX8c,12
25
- nettracer3d-0.8.8.dist-info/RECORD,,
20
+ nettracer3d-0.8.9.dist-info/licenses/LICENSE,sha256=jnNT-yBeIAKAHpYthPvLeqCzJ6nSurgnKmloVnfsjCI,764
21
+ nettracer3d-0.8.9.dist-info/METADATA,sha256=3aV3Pcj-2AmQzzRkI5crBVw_ZbsLW8I_kV5WiebYKws,7008
22
+ nettracer3d-0.8.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
23
+ nettracer3d-0.8.9.dist-info/entry_points.txt,sha256=Nx1rr_0QhJXDBHAQg2vcqCzLMKBzSHfwy3xwGkueVyc,53
24
+ nettracer3d-0.8.9.dist-info/top_level.txt,sha256=zsYy9rZwirfCEOubolhee4TyzqBAL5gSUeFMzhFTX8c,12
25
+ nettracer3d-0.8.9.dist-info/RECORD,,