nettracer3d 0.8.3__py3-none-any.whl → 0.8.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nettracer3d might be problematic. Click here for more details.
- nettracer3d/community_extractor.py +3 -3
- nettracer3d/excelotron.py +21 -2
- nettracer3d/neighborhoods.py +140 -31
- nettracer3d/nettracer.py +516 -82
- nettracer3d/nettracer_gui.py +1072 -842
- nettracer3d/network_analysis.py +90 -29
- nettracer3d/node_draw.py +6 -2
- nettracer3d/painting.py +373 -0
- nettracer3d/proximity.py +52 -103
- nettracer3d/segmenter.py +849 -851
- nettracer3d/segmenter_GPU.py +806 -658
- nettracer3d/smart_dilate.py +44 -10
- {nettracer3d-0.8.3.dist-info → nettracer3d-0.8.5.dist-info}/METADATA +6 -3
- nettracer3d-0.8.5.dist-info/RECORD +25 -0
- {nettracer3d-0.8.3.dist-info → nettracer3d-0.8.5.dist-info}/licenses/LICENSE +2 -4
- nettracer3d-0.8.3.dist-info/RECORD +0 -24
- {nettracer3d-0.8.3.dist-info → nettracer3d-0.8.5.dist-info}/WHEEL +0 -0
- {nettracer3d-0.8.3.dist-info → nettracer3d-0.8.5.dist-info}/entry_points.txt +0 -0
- {nettracer3d-0.8.3.dist-info → nettracer3d-0.8.5.dist-info}/top_level.txt +0 -0
nettracer3d/segmenter_GPU.py
CHANGED
|
@@ -29,7 +29,6 @@ class InteractiveSegmenter:
|
|
|
29
29
|
max_depth=None
|
|
30
30
|
)
|
|
31
31
|
|
|
32
|
-
self.feature_cache = None
|
|
33
32
|
self.lock = threading.Lock()
|
|
34
33
|
self._currently_segmenting = None
|
|
35
34
|
self.use_gpu = True
|
|
@@ -47,9 +46,9 @@ class InteractiveSegmenter:
|
|
|
47
46
|
self.two_slices = []
|
|
48
47
|
self.speed = True
|
|
49
48
|
self.cur_gpu = False
|
|
50
|
-
self.map_slice = None
|
|
51
49
|
self.prev_z = None
|
|
52
50
|
self.previewing = False
|
|
51
|
+
self.batch_amplifier = 2 # Can raise this number to make SKLearn batches larger
|
|
53
52
|
|
|
54
53
|
# flags to track state
|
|
55
54
|
self._currently_processing = False
|
|
@@ -58,10 +57,11 @@ class InteractiveSegmenter:
|
|
|
58
57
|
self.mem_lock = False
|
|
59
58
|
|
|
60
59
|
#Adjustable feature map params:
|
|
61
|
-
self.
|
|
60
|
+
self.sigmas = [1,2,4,8]
|
|
62
61
|
self.windows = 10
|
|
63
62
|
self.dogs = [(1, 2), (2, 4), (4, 8)]
|
|
64
63
|
self.master_chunk = 49
|
|
64
|
+
self.twod_chunk_size = 262144
|
|
65
65
|
|
|
66
66
|
#Data when loading prev model:
|
|
67
67
|
self.previous_foreground = None
|
|
@@ -69,139 +69,6 @@ class InteractiveSegmenter:
|
|
|
69
69
|
self.previous_z_fore = None
|
|
70
70
|
self.previous_z_back = None
|
|
71
71
|
|
|
72
|
-
def segment_slice_chunked(self, slice_z, block_size=49):
|
|
73
|
-
"""
|
|
74
|
-
A completely standalone method to segment a single z-slice in chunks
|
|
75
|
-
with improved safeguards.
|
|
76
|
-
"""
|
|
77
|
-
# Check if we're already processing this slice
|
|
78
|
-
if self._currently_processing and self._currently_processing == slice_z:
|
|
79
|
-
return
|
|
80
|
-
|
|
81
|
-
# Set processing flag with the slice we're processing
|
|
82
|
-
self._currently_processing = slice_z
|
|
83
|
-
|
|
84
|
-
try:
|
|
85
|
-
# First attempt to get the feature map
|
|
86
|
-
feature_map = None
|
|
87
|
-
|
|
88
|
-
try:
|
|
89
|
-
if slice_z in self.feature_cache:
|
|
90
|
-
feature_map = self.feature_cache[slice_z]
|
|
91
|
-
elif hasattr(self, 'map_slice') and self.map_slice is not None and slice_z == self.current_z:
|
|
92
|
-
feature_map = self.map_slice
|
|
93
|
-
else:
|
|
94
|
-
# Generate new feature map
|
|
95
|
-
try:
|
|
96
|
-
feature_map = self.get_feature_map_slice(slice_z, self.current_speed, False)
|
|
97
|
-
self.map_slice = feature_map
|
|
98
|
-
except Exception as e:
|
|
99
|
-
print(f"Error generating feature map: {e}")
|
|
100
|
-
import traceback
|
|
101
|
-
traceback.print_exc()
|
|
102
|
-
return # Exit if we can't generate the feature map
|
|
103
|
-
except:
|
|
104
|
-
# Generate new feature map
|
|
105
|
-
try:
|
|
106
|
-
feature_map = self.get_feature_map_slice(slice_z, self.current_speed, False)
|
|
107
|
-
self.map_slice = feature_map
|
|
108
|
-
except Exception as e:
|
|
109
|
-
print(f"Error generating feature map: {e}")
|
|
110
|
-
import traceback
|
|
111
|
-
traceback.print_exc()
|
|
112
|
-
return # Exit if we can't generate the feature map
|
|
113
|
-
|
|
114
|
-
# Check that we have a valid feature map
|
|
115
|
-
if feature_map is None:
|
|
116
|
-
return
|
|
117
|
-
|
|
118
|
-
# Get dimensions of the slice
|
|
119
|
-
y_size, x_size = self.image_3d.shape[1], self.image_3d.shape[2]
|
|
120
|
-
chunk_count = 0
|
|
121
|
-
|
|
122
|
-
# Determine if feature_map is a CuPy array
|
|
123
|
-
is_cupy_array = hasattr(feature_map, 'get')
|
|
124
|
-
|
|
125
|
-
# Process in blocks for chunked feedback
|
|
126
|
-
for y_start in range(0, y_size, block_size):
|
|
127
|
-
if self._currently_processing != slice_z:
|
|
128
|
-
return
|
|
129
|
-
|
|
130
|
-
for x_start in range(0, x_size, block_size):
|
|
131
|
-
if self._currently_processing != slice_z:
|
|
132
|
-
return
|
|
133
|
-
|
|
134
|
-
y_end = min(y_start + block_size, y_size)
|
|
135
|
-
x_end = min(x_start + block_size, x_size)
|
|
136
|
-
|
|
137
|
-
# Create coordinates and features for this block
|
|
138
|
-
coords = []
|
|
139
|
-
features_list = []
|
|
140
|
-
|
|
141
|
-
for y in range(y_start, y_end):
|
|
142
|
-
for x in range(x_start, x_end):
|
|
143
|
-
coords.append((slice_z, y, x))
|
|
144
|
-
features_list.append(feature_map[y, x])
|
|
145
|
-
|
|
146
|
-
# Convert features to NumPy properly based on type
|
|
147
|
-
if is_cupy_array:
|
|
148
|
-
# If feature_map is a CuPy array, we need to extract a CuPy array
|
|
149
|
-
# from the list and then convert it to NumPy
|
|
150
|
-
try:
|
|
151
|
-
# Create a CuPy array from the list of feature vectors
|
|
152
|
-
features_array = cp.stack(features_list)
|
|
153
|
-
# Convert to NumPy explicitly using .get()
|
|
154
|
-
features = features_array.get()
|
|
155
|
-
except Exception as e:
|
|
156
|
-
print(f"Error converting features to NumPy: {e}")
|
|
157
|
-
# Fallback: convert each feature individually
|
|
158
|
-
features = []
|
|
159
|
-
for feat in features_list:
|
|
160
|
-
if hasattr(feat, 'get'):
|
|
161
|
-
features.append(feat.get())
|
|
162
|
-
else:
|
|
163
|
-
features.append(feat)
|
|
164
|
-
else:
|
|
165
|
-
# If it's already a NumPy array, we can use it directly
|
|
166
|
-
features = features_list
|
|
167
|
-
|
|
168
|
-
# Skip empty blocks
|
|
169
|
-
if not coords:
|
|
170
|
-
continue
|
|
171
|
-
|
|
172
|
-
# Predict
|
|
173
|
-
try:
|
|
174
|
-
try:
|
|
175
|
-
predictions = self.model.predict(features)
|
|
176
|
-
except ValueError:
|
|
177
|
-
self.feature_cache = None
|
|
178
|
-
self.map_slice = None
|
|
179
|
-
return None, None
|
|
180
|
-
|
|
181
|
-
# Split results
|
|
182
|
-
foreground = set()
|
|
183
|
-
background = set()
|
|
184
|
-
|
|
185
|
-
for coord, pred in zip(coords, predictions):
|
|
186
|
-
if pred:
|
|
187
|
-
foreground.add(coord)
|
|
188
|
-
else:
|
|
189
|
-
background.add(coord)
|
|
190
|
-
|
|
191
|
-
# Yield this chunk
|
|
192
|
-
chunk_count += 1
|
|
193
|
-
yield foreground, background
|
|
194
|
-
|
|
195
|
-
except Exception as e:
|
|
196
|
-
print(f"Error processing chunk: {e}")
|
|
197
|
-
import traceback
|
|
198
|
-
traceback.print_exc()
|
|
199
|
-
|
|
200
|
-
finally:
|
|
201
|
-
# Only clear if we're still processing the same slice
|
|
202
|
-
# (otherwise, another slice might have taken over)
|
|
203
|
-
if self._currently_processing == slice_z:
|
|
204
|
-
self._currently_processing = None
|
|
205
72
|
|
|
206
73
|
def process_chunk(self, chunk_coords):
|
|
207
74
|
"""Process a chunk staying in CuPy as much as possible"""
|
|
@@ -209,116 +76,61 @@ class InteractiveSegmenter:
|
|
|
209
76
|
foreground_coords = [] # Keep as list of CuPy coordinates
|
|
210
77
|
background_coords = []
|
|
211
78
|
|
|
212
|
-
if self.previewing or not self.use_two:
|
|
213
79
|
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
# Create meshgrid using CuPy - already good
|
|
220
|
-
z_range = cp.arange(z_min, z_max)
|
|
221
|
-
y_range = cp.arange(y_min, y_max)
|
|
222
|
-
x_range = cp.arange(x_min, x_max)
|
|
223
|
-
|
|
224
|
-
# More efficient way to create coordinates
|
|
225
|
-
chunk_coords_array = cp.stack(cp.meshgrid(
|
|
226
|
-
z_range, y_range, x_range, indexing='ij'
|
|
227
|
-
)).reshape(3, -1).T
|
|
228
|
-
|
|
229
|
-
# Keep as CuPy array instead of converting to list
|
|
230
|
-
chunk_coords_gpu = chunk_coords_array
|
|
231
|
-
else:
|
|
232
|
-
# Convert list to CuPy array once
|
|
233
|
-
chunk_coords_gpu = cp.array(chunk_coords)
|
|
234
|
-
z_coords = chunk_coords_gpu[:, 0]
|
|
235
|
-
y_coords = chunk_coords_gpu[:, 1]
|
|
236
|
-
x_coords = chunk_coords_gpu[:, 2]
|
|
237
|
-
|
|
238
|
-
z_min, z_max = cp.min(z_coords).item(), cp.max(z_coords).item()
|
|
239
|
-
y_min, y_max = cp.min(y_coords).item(), cp.max(y_coords).item()
|
|
240
|
-
x_min, x_max = cp.min(x_coords).item(), cp.max(x_coords).item()
|
|
241
|
-
|
|
242
|
-
# Extract subarray - already good
|
|
243
|
-
subarray = self.image_3d[z_min:z_max+1, y_min:y_max+1, x_min:x_max+1]
|
|
244
|
-
|
|
245
|
-
# Compute features
|
|
246
|
-
if self.speed:
|
|
247
|
-
feature_map = self.compute_feature_maps_gpu(subarray)
|
|
248
|
-
else:
|
|
249
|
-
feature_map = self.compute_deep_feature_maps_gpu(subarray)
|
|
250
|
-
|
|
251
|
-
# Extract features more efficiently
|
|
252
|
-
local_coords = chunk_coords_gpu.copy()
|
|
253
|
-
local_coords[:, 0] -= z_min
|
|
254
|
-
local_coords[:, 1] -= y_min
|
|
255
|
-
local_coords[:, 2] -= x_min
|
|
80
|
+
if self.realtimechunks is None:
|
|
81
|
+
z_min, z_max = chunk_coords[0], chunk_coords[1]
|
|
82
|
+
y_min, y_max = chunk_coords[2], chunk_coords[3]
|
|
83
|
+
x_min, x_max = chunk_coords[4], chunk_coords[5]
|
|
256
84
|
|
|
257
|
-
#
|
|
258
|
-
|
|
85
|
+
# Create meshgrid using CuPy - already good
|
|
86
|
+
z_range = cp.arange(z_min, z_max)
|
|
87
|
+
y_range = cp.arange(y_min, y_max)
|
|
88
|
+
x_range = cp.arange(x_min, x_max)
|
|
259
89
|
|
|
260
|
-
|
|
261
|
-
|
|
90
|
+
# More efficient way to create coordinates
|
|
91
|
+
chunk_coords_array = cp.stack(cp.meshgrid(
|
|
92
|
+
z_range, y_range, x_range, indexing='ij'
|
|
93
|
+
)).reshape(3, -1).T
|
|
262
94
|
|
|
263
|
-
# Keep
|
|
264
|
-
|
|
265
|
-
foreground_coords = chunk_coords_gpu[pred_mask]
|
|
266
|
-
background_coords = chunk_coords_gpu[~pred_mask]
|
|
267
|
-
|
|
95
|
+
# Keep as CuPy array instead of converting to list
|
|
96
|
+
chunk_coords_gpu = chunk_coords_array
|
|
268
97
|
else:
|
|
269
|
-
#
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
if len(chunk_coords) == 5:
|
|
275
|
-
z = chunk_coords[0]
|
|
276
|
-
y_start = chunk_coords[1]
|
|
277
|
-
y_end = chunk_coords[2]
|
|
278
|
-
x_start = chunk_coords[3]
|
|
279
|
-
x_end = chunk_coords[4]
|
|
280
|
-
|
|
281
|
-
# Generate coordinates for this slice or subchunk using the new function
|
|
282
|
-
coords_array = self.twodim_coords(z, y_start, y_end, x_start, x_end)
|
|
283
|
-
|
|
284
|
-
# Get the feature map for this z-slice
|
|
285
|
-
if self.feature_cache is None:
|
|
286
|
-
feature_map = self.get_feature_map_slice(z, self.speed, True) # Use GPU
|
|
287
|
-
elif z not in self.feature_cache and not self.previewing:
|
|
288
|
-
feature_map = self.get_feature_map_slice(z, self.speed, True) # Use GPU
|
|
289
|
-
elif (z not in self.feature_cache or self.feature_cache is None) and self.previewing:
|
|
290
|
-
feature_map = self.map_slice
|
|
291
|
-
if feature_map is None:
|
|
292
|
-
return [], []
|
|
293
|
-
else:
|
|
294
|
-
feature_map = self.feature_cache[z]
|
|
295
|
-
|
|
296
|
-
# Check if we have a valid feature map
|
|
297
|
-
if feature_map is None:
|
|
298
|
-
return [], []
|
|
299
|
-
|
|
300
|
-
# Extract y and x coordinates from the array
|
|
301
|
-
y_indices = coords_array[:, 1]
|
|
302
|
-
x_indices = coords_array[:, 2]
|
|
303
|
-
|
|
304
|
-
# Extract features using CuPy indexing
|
|
305
|
-
features_gpu = feature_map[y_indices, x_indices]
|
|
306
|
-
|
|
307
|
-
# Convert to NumPy for the model
|
|
308
|
-
features_cpu = features_gpu.get()
|
|
309
|
-
|
|
310
|
-
# Make predictions
|
|
311
|
-
predictions = self.model.predict(features_cpu)
|
|
312
|
-
|
|
313
|
-
# Create CuPy boolean mask from predictions
|
|
314
|
-
pred_mask = cp.array(predictions, dtype=bool)
|
|
315
|
-
|
|
316
|
-
# Split into foreground and background using the mask
|
|
317
|
-
fore_coords = coords_array[pred_mask]
|
|
318
|
-
back_coords = coords_array[~pred_mask]
|
|
319
|
-
|
|
320
|
-
return fore_coords, back_coords
|
|
98
|
+
# Convert list to CuPy array once
|
|
99
|
+
chunk_coords_gpu = cp.array(chunk_coords)
|
|
100
|
+
z_coords = chunk_coords_gpu[:, 0]
|
|
101
|
+
y_coords = chunk_coords_gpu[:, 1]
|
|
102
|
+
x_coords = chunk_coords_gpu[:, 2]
|
|
321
103
|
|
|
104
|
+
z_min, z_max = cp.min(z_coords).item(), cp.max(z_coords).item()
|
|
105
|
+
y_min, y_max = cp.min(y_coords).item(), cp.max(y_coords).item()
|
|
106
|
+
x_min, x_max = cp.min(x_coords).item(), cp.max(x_coords).item()
|
|
107
|
+
|
|
108
|
+
# Extract subarray - already good
|
|
109
|
+
subarray = self.image_3d[z_min:z_max+1, y_min:y_max+1, x_min:x_max+1]
|
|
110
|
+
|
|
111
|
+
# Compute features
|
|
112
|
+
if self.speed:
|
|
113
|
+
feature_map = self.compute_feature_maps_gpu(subarray)
|
|
114
|
+
else:
|
|
115
|
+
feature_map = self.compute_deep_feature_maps_gpu(subarray)
|
|
116
|
+
|
|
117
|
+
# Extract features more efficiently
|
|
118
|
+
local_coords = chunk_coords_gpu.copy()
|
|
119
|
+
local_coords[:, 0] -= z_min
|
|
120
|
+
local_coords[:, 1] -= y_min
|
|
121
|
+
local_coords[:, 2] -= x_min
|
|
122
|
+
|
|
123
|
+
# Vectorized feature extraction
|
|
124
|
+
features_gpu = feature_map[local_coords[:, 0], local_coords[:, 1], local_coords[:, 2]]
|
|
125
|
+
|
|
126
|
+
features_cpu = cp.asnumpy(features_gpu)
|
|
127
|
+
predictions = self.model.predict(features_cpu)
|
|
128
|
+
|
|
129
|
+
# Keep coordinates as CuPy arrays
|
|
130
|
+
pred_mask = cp.array(predictions, dtype=bool)
|
|
131
|
+
foreground_coords = chunk_coords_gpu[pred_mask]
|
|
132
|
+
background_coords = chunk_coords_gpu[~pred_mask]
|
|
133
|
+
|
|
322
134
|
return foreground_coords, background_coords
|
|
323
135
|
|
|
324
136
|
def twodim_coords(self, z, y_start, y_end, x_start, x_end):
|
|
@@ -356,337 +168,424 @@ class InteractiveSegmenter:
|
|
|
356
168
|
|
|
357
169
|
return slice_coords
|
|
358
170
|
|
|
171
|
+
|
|
359
172
|
def compute_feature_maps_gpu(self, image_3d=None):
|
|
360
|
-
"""
|
|
173
|
+
"""Optimized GPU version that caches Gaussian filters to avoid redundant computation"""
|
|
361
174
|
import cupy as cp
|
|
362
175
|
import cupyx.scipy.ndimage as cupy_ndimage
|
|
363
176
|
|
|
364
|
-
features = []
|
|
365
177
|
if image_3d is None:
|
|
366
178
|
image_3d = self.image_3d # Assuming this is already a cupy array
|
|
179
|
+
|
|
180
|
+
if image_3d.ndim == 4 and image_3d.shape[-1] == 3:
|
|
181
|
+
# RGB case - process each channel
|
|
182
|
+
features_per_channel = []
|
|
183
|
+
for channel in range(3):
|
|
184
|
+
channel_features = self.compute_feature_maps_gpu(image_3d[..., channel])
|
|
185
|
+
features_per_channel.append(channel_features)
|
|
186
|
+
|
|
187
|
+
# Stack all channel features
|
|
188
|
+
return cp.concatenate(features_per_channel, axis=-1)
|
|
367
189
|
|
|
368
|
-
|
|
190
|
+
# Pre-allocate result array
|
|
191
|
+
num_features = len(self.sigmas) + len(self.dogs) + 2
|
|
192
|
+
features = cp.empty(image_3d.shape + (num_features,), dtype=image_3d.dtype)
|
|
193
|
+
features[..., 0] = image_3d
|
|
369
194
|
|
|
370
|
-
|
|
371
|
-
for sigma in self.alphas:
|
|
372
|
-
smooth = cupy_ndimage.gaussian_filter(image_3d, sigma)
|
|
373
|
-
features.append(smooth)
|
|
195
|
+
feature_idx = 1
|
|
374
196
|
|
|
375
|
-
#
|
|
376
|
-
|
|
377
|
-
g1 = cupy_ndimage.gaussian_filter(image_3d, s1)
|
|
378
|
-
g2 = cupy_ndimage.gaussian_filter(image_3d, s2)
|
|
379
|
-
dog = g1 - g2
|
|
380
|
-
features.append(dog)
|
|
197
|
+
# Cache for Gaussian filters - only compute each sigma once
|
|
198
|
+
gaussian_cache = {}
|
|
381
199
|
|
|
382
|
-
#
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
200
|
+
# Compute all unique sigmas needed (from both sigmas and dogs)
|
|
201
|
+
all_sigmas = set(self.sigmas)
|
|
202
|
+
for s1, s2 in self.dogs:
|
|
203
|
+
all_sigmas.add(s1)
|
|
204
|
+
all_sigmas.add(s2)
|
|
386
205
|
|
|
387
|
-
#
|
|
388
|
-
|
|
389
|
-
|
|
206
|
+
# Pre-compute all Gaussian filters
|
|
207
|
+
for sigma in all_sigmas:
|
|
208
|
+
gaussian_cache[sigma] = cupy_ndimage.gaussian_filter(image_3d, sigma)
|
|
209
|
+
|
|
210
|
+
# Gaussian smoothing - use cached results
|
|
211
|
+
for sigma in self.sigmas:
|
|
212
|
+
features[..., feature_idx] = gaussian_cache[sigma]
|
|
213
|
+
feature_idx += 1
|
|
214
|
+
|
|
215
|
+
# Difference of Gaussians - use cached results
|
|
216
|
+
for s1, s2 in self.dogs:
|
|
217
|
+
features[..., feature_idx] = gaussian_cache[s1] - gaussian_cache[s2]
|
|
218
|
+
feature_idx += 1
|
|
390
219
|
|
|
391
|
-
#
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
397
|
-
features[i] = feat_adjusted
|
|
220
|
+
# Gradient magnitude
|
|
221
|
+
gx = cupy_ndimage.sobel(image_3d, axis=2, mode='reflect')
|
|
222
|
+
gy = cupy_ndimage.sobel(image_3d, axis=1, mode='reflect')
|
|
223
|
+
gz = cupy_ndimage.sobel(image_3d, axis=0, mode='reflect')
|
|
224
|
+
features[..., feature_idx] = cp.sqrt(gx**2 + gy**2 + gz**2)
|
|
398
225
|
|
|
399
|
-
return
|
|
226
|
+
return features
|
|
400
227
|
|
|
401
228
|
def compute_deep_feature_maps_gpu(self, image_3d=None):
|
|
402
|
-
"""
|
|
229
|
+
"""Vectorized detailed GPU version with Gaussian gradient magnitudes, Laplacians, and largest Hessian eigenvalue only"""
|
|
403
230
|
import cupy as cp
|
|
404
231
|
import cupyx.scipy.ndimage as cupy_ndimage
|
|
405
232
|
|
|
406
|
-
features = []
|
|
407
233
|
if image_3d is None:
|
|
408
234
|
image_3d = self.image_3d # Assuming this is already a cupy array
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
#
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
features.
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
235
|
+
|
|
236
|
+
if image_3d.ndim == 4 and image_3d.shape[-1] == 3:
|
|
237
|
+
# RGB case - process each channel
|
|
238
|
+
features_per_channel = []
|
|
239
|
+
for channel in range(3):
|
|
240
|
+
channel_features = self.compute_deep_feature_maps_gpu(image_3d[..., channel])
|
|
241
|
+
features_per_channel.append(channel_features)
|
|
242
|
+
|
|
243
|
+
# Stack all channel features
|
|
244
|
+
return cp.concatenate(features_per_channel, axis=-1)
|
|
245
|
+
|
|
246
|
+
# Calculate total number of features
|
|
247
|
+
num_basic_features = 1 + len(self.sigmas) + len(self.dogs) # original + gaussians + dogs
|
|
248
|
+
num_gradient_features = len(self.sigmas) # gradient magnitude for each sigma
|
|
249
|
+
num_laplacian_features = len(self.sigmas) # laplacian for each sigma
|
|
250
|
+
num_hessian_features = len(self.sigmas) * 1 # 1 eigenvalue (largest) for each sigma
|
|
251
|
+
|
|
252
|
+
total_features = num_basic_features + num_gradient_features + num_laplacian_features + num_hessian_features
|
|
253
|
+
|
|
254
|
+
# Pre-allocate result array
|
|
255
|
+
features = cp.empty(image_3d.shape + (total_features,), dtype=image_3d.dtype)
|
|
256
|
+
features[..., 0] = image_3d
|
|
257
|
+
|
|
258
|
+
feature_idx = 1
|
|
259
|
+
|
|
260
|
+
# Cache for Gaussian filters - only compute each sigma once
|
|
261
|
+
gaussian_cache = {}
|
|
262
|
+
|
|
263
|
+
# Compute all unique sigmas needed (from both sigmas and dogs)
|
|
264
|
+
all_sigmas = set(self.sigmas)
|
|
265
|
+
for s1, s2 in self.dogs:
|
|
266
|
+
all_sigmas.add(s1)
|
|
267
|
+
all_sigmas.add(s2)
|
|
268
|
+
|
|
269
|
+
# Pre-compute all Gaussian filters
|
|
270
|
+
for sigma in all_sigmas:
|
|
271
|
+
gaussian_cache[sigma] = cupy_ndimage.gaussian_filter(image_3d, sigma)
|
|
272
|
+
|
|
273
|
+
# Gaussian smoothing - use cached results
|
|
274
|
+
for sigma in self.sigmas:
|
|
275
|
+
features[..., feature_idx] = gaussian_cache[sigma]
|
|
276
|
+
feature_idx += 1
|
|
277
|
+
|
|
278
|
+
# Difference of Gaussians - use cached results
|
|
279
|
+
for s1, s2 in self.dogs:
|
|
280
|
+
features[..., feature_idx] = gaussian_cache[s1] - gaussian_cache[s2]
|
|
281
|
+
feature_idx += 1
|
|
282
|
+
|
|
283
|
+
# Gaussian gradient magnitudes for each sigma (vectorized)
|
|
284
|
+
for sigma in self.sigmas:
|
|
285
|
+
gaussian_img = gaussian_cache[sigma]
|
|
286
|
+
gx = cupy_ndimage.sobel(gaussian_img, axis=2, mode='reflect')
|
|
287
|
+
gy = cupy_ndimage.sobel(gaussian_img, axis=1, mode='reflect')
|
|
288
|
+
gz = cupy_ndimage.sobel(gaussian_img, axis=0, mode='reflect')
|
|
289
|
+
features[..., feature_idx] = cp.sqrt(gx**2 + gy**2 + gz**2)
|
|
290
|
+
feature_idx += 1
|
|
291
|
+
|
|
292
|
+
# Laplacian of Gaussian for each sigma (vectorized)
|
|
293
|
+
for sigma in self.sigmas:
|
|
294
|
+
gaussian_img = gaussian_cache[sigma]
|
|
295
|
+
features[..., feature_idx] = cupy_ndimage.laplace(gaussian_img, mode='reflect')
|
|
296
|
+
feature_idx += 1
|
|
297
|
+
|
|
298
|
+
# Largest Hessian eigenvalue for each sigma (fully vectorized)
|
|
299
|
+
for sigma in self.sigmas:
|
|
300
|
+
gaussian_img = gaussian_cache[sigma]
|
|
301
|
+
|
|
302
|
+
# Compute second derivatives (Hessian components) - all vectorized
|
|
303
|
+
hxx = cupy_ndimage.gaussian_filter(gaussian_img, sigma=0, order=[0, 0, 2], mode='reflect')
|
|
304
|
+
hyy = cupy_ndimage.gaussian_filter(gaussian_img, sigma=0, order=[0, 2, 0], mode='reflect')
|
|
305
|
+
hzz = cupy_ndimage.gaussian_filter(gaussian_img, sigma=0, order=[2, 0, 0], mode='reflect')
|
|
306
|
+
hxy = cupy_ndimage.gaussian_filter(gaussian_img, sigma=0, order=[0, 1, 1], mode='reflect')
|
|
307
|
+
hxz = cupy_ndimage.gaussian_filter(gaussian_img, sigma=0, order=[1, 0, 1], mode='reflect')
|
|
308
|
+
hyz = cupy_ndimage.gaussian_filter(gaussian_img, sigma=0, order=[1, 1, 0], mode='reflect')
|
|
309
|
+
|
|
310
|
+
# Vectorized eigenvalue computation using cupy broadcasting
|
|
311
|
+
# Create arrays with shape (d0, d1, d2, 3, 3) for all Hessian matrices
|
|
312
|
+
shape = image_3d.shape
|
|
313
|
+
hessian_matrices = cp.zeros(shape + (3, 3))
|
|
314
|
+
|
|
315
|
+
# Fill the symmetric Hessian matrices
|
|
316
|
+
hessian_matrices[..., 0, 0] = hxx
|
|
317
|
+
hessian_matrices[..., 1, 1] = hyy
|
|
318
|
+
hessian_matrices[..., 2, 2] = hzz
|
|
319
|
+
hessian_matrices[..., 0, 1] = hessian_matrices[..., 1, 0] = hxy
|
|
320
|
+
hessian_matrices[..., 0, 2] = hessian_matrices[..., 2, 0] = hxz
|
|
321
|
+
hessian_matrices[..., 1, 2] = hessian_matrices[..., 2, 1] = hyz
|
|
322
|
+
|
|
323
|
+
# Reshape for batch eigenvalue computation
|
|
324
|
+
original_shape = hessian_matrices.shape[:-2] # (d0, d1, d2)
|
|
325
|
+
batch_size = int(cp.prod(cp.array(original_shape)))
|
|
326
|
+
hessian_batch = hessian_matrices.reshape(batch_size, 3, 3)
|
|
327
|
+
|
|
328
|
+
# Compute eigenvalues for all matrices at once using CuPy
|
|
329
|
+
# Since Hessian matrices are symmetric, we can use eigvalsh
|
|
330
|
+
eigenvalues_batch = cp.linalg.eigvalsh(hessian_batch)
|
|
331
|
+
|
|
332
|
+
# Get only the largest eigenvalue for each matrix
|
|
333
|
+
largest_eigenvalues = cp.max(eigenvalues_batch, axis=1)
|
|
334
|
+
|
|
335
|
+
# Reshape back to original spatial dimensions
|
|
336
|
+
largest_eigenvalues = largest_eigenvalues.reshape(original_shape)
|
|
337
|
+
|
|
338
|
+
# Add the largest eigenvalue as a feature
|
|
339
|
+
features[..., feature_idx] = largest_eigenvalues
|
|
340
|
+
feature_idx += 1
|
|
457
341
|
|
|
458
|
-
#
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
342
|
+
# Normalize only morphological features, keep intensity features raw
|
|
343
|
+
intensity_features = features[..., :num_basic_features] # original + gaussians + DoGs
|
|
344
|
+
morphology_features = features[..., num_basic_features:] # gradients + laplacians + eigenvalues
|
|
345
|
+
|
|
346
|
+
# Normalize only morphological features using CuPy
|
|
347
|
+
morph_means = cp.mean(morphology_features, axis=(0, 1, 2), keepdims=True)
|
|
348
|
+
morph_stds = cp.std(morphology_features, axis=(0, 1, 2), keepdims=True)
|
|
349
|
+
morph_stds = cp.where(morph_stds == 0, 1, morph_stds)
|
|
350
|
+
morphology_features = (morphology_features - morph_means) / morph_stds
|
|
351
|
+
|
|
352
|
+
# Recombine
|
|
353
|
+
features = cp.concatenate([intensity_features, morphology_features], axis=-1)
|
|
465
354
|
|
|
466
|
-
return
|
|
355
|
+
return features
|
|
467
356
|
|
|
468
357
|
|
|
469
|
-
def compute_feature_maps_gpu_2d(self, z=None):
|
|
470
|
-
"""Compute feature maps for 2D images using GPU with
|
|
358
|
+
def compute_feature_maps_gpu_2d(self, z=None, image_2d = None):
|
|
359
|
+
"""Compute feature maps for 2D images using GPU with caching optimization"""
|
|
471
360
|
import cupy as cp
|
|
472
361
|
import cupyx.scipy.ndimage as cupy_ndimage
|
|
473
362
|
|
|
474
|
-
# Extract 2D slice
|
|
475
|
-
if
|
|
363
|
+
# Extract 2D slice - convert to CuPy array if needed
|
|
364
|
+
if image_2d is None:
|
|
476
365
|
image_2d = cp.asarray(self.image_3d[z, :, :])
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
366
|
+
|
|
367
|
+
if image_2d.ndim == 3 and image_2d.shape[-1] == 3:
|
|
368
|
+
# RGB case - process each channel
|
|
369
|
+
features_per_channel = []
|
|
370
|
+
for channel in range(3):
|
|
371
|
+
channel_features = self.compute_feature_maps_gpu_2d(image_2d = image_2d[..., channel])
|
|
372
|
+
features_per_channel.append(channel_features)
|
|
373
|
+
|
|
374
|
+
# Stack all channel features
|
|
375
|
+
return cp.concatenate(features_per_channel, axis=-1)
|
|
480
376
|
|
|
481
|
-
|
|
482
|
-
|
|
377
|
+
# Pre-allocate result array
|
|
378
|
+
num_features = len(self.sigmas) + len(self.dogs) + 2 # +2 for original image + gradient
|
|
379
|
+
features = cp.empty(image_2d.shape + (num_features,), dtype=image_2d.dtype)
|
|
483
380
|
|
|
484
|
-
#
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
features.append(smooth)
|
|
381
|
+
# Include original image as first feature
|
|
382
|
+
features[..., 0] = image_2d
|
|
383
|
+
feature_idx = 1
|
|
488
384
|
|
|
489
|
-
#
|
|
490
|
-
|
|
491
|
-
g1 = cupy_ndimage.gaussian_filter(image_2d, s1)
|
|
492
|
-
g2 = cupy_ndimage.gaussian_filter(image_2d, s2)
|
|
493
|
-
dog = g1 - g2
|
|
494
|
-
features.append(dog)
|
|
385
|
+
# Cache for Gaussian filters - only compute each sigma once
|
|
386
|
+
gaussian_cache = {}
|
|
495
387
|
|
|
496
|
-
#
|
|
497
|
-
|
|
498
|
-
|
|
388
|
+
# Compute all unique sigmas needed (from both sigmas and dogs)
|
|
389
|
+
all_sigmas = set(self.sigmas)
|
|
390
|
+
for s1, s2 in self.dogs:
|
|
391
|
+
all_sigmas.add(s1)
|
|
392
|
+
all_sigmas.add(s2)
|
|
499
393
|
|
|
500
|
-
#
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
#
|
|
505
|
-
for
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
if len(feat.shape) < len(original_shape):
|
|
509
|
-
feat_adjusted = feat
|
|
510
|
-
missing_dims = len(original_shape) - len(feat.shape)
|
|
511
|
-
for _ in range(missing_dims):
|
|
512
|
-
feat_adjusted = cp.expand_dims(feat_adjusted, axis=0)
|
|
513
|
-
|
|
514
|
-
if feat_adjusted.shape != original_shape:
|
|
515
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
516
|
-
|
|
517
|
-
features[i] = feat_adjusted
|
|
394
|
+
# Pre-compute all Gaussian filters
|
|
395
|
+
for sigma in all_sigmas:
|
|
396
|
+
gaussian_cache[sigma] = cupy_ndimage.gaussian_filter(image_2d, sigma)
|
|
397
|
+
|
|
398
|
+
# Gaussian smoothing - use cached results
|
|
399
|
+
for sigma in self.sigmas:
|
|
400
|
+
features[..., feature_idx] = gaussian_cache[sigma]
|
|
401
|
+
feature_idx += 1
|
|
518
402
|
|
|
519
|
-
#
|
|
520
|
-
|
|
403
|
+
# Difference of Gaussians - use cached results
|
|
404
|
+
for s1, s2 in self.dogs:
|
|
405
|
+
features[..., feature_idx] = gaussian_cache[s1] - gaussian_cache[s2]
|
|
406
|
+
feature_idx += 1
|
|
521
407
|
|
|
522
|
-
#
|
|
523
|
-
|
|
408
|
+
# Gradient magnitude (2D version)
|
|
409
|
+
gx = cupy_ndimage.sobel(image_2d, axis=1, mode='reflect') # x direction
|
|
410
|
+
gy = cupy_ndimage.sobel(image_2d, axis=0, mode='reflect') # y direction
|
|
411
|
+
features[..., feature_idx] = cp.sqrt(gx**2 + gy**2)
|
|
524
412
|
|
|
525
|
-
return
|
|
413
|
+
return features
|
|
526
414
|
|
|
527
|
-
def compute_deep_feature_maps_gpu_2d(self, z=None):
|
|
528
|
-
"""
|
|
415
|
+
def compute_deep_feature_maps_gpu_2d(self, z=None, image_2d = None):
|
|
416
|
+
"""Vectorized detailed GPU version with Gaussian gradient magnitudes, Laplacians, and largest Hessian eigenvalue for 2D images"""
|
|
529
417
|
import cupy as cp
|
|
530
418
|
import cupyx.scipy.ndimage as cupy_ndimage
|
|
531
419
|
|
|
532
|
-
|
|
533
|
-
|
|
420
|
+
if z is None:
|
|
421
|
+
z = self.image_3d.shape[0] // 2 # Use middle slice if not specified
|
|
422
|
+
|
|
423
|
+
# Extract 2D slice - convert to CuPy array if needed
|
|
424
|
+
if image_2d is None:
|
|
534
425
|
image_2d = cp.asarray(self.image_3d[z, :, :])
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
#
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
426
|
+
|
|
427
|
+
if image_2d.ndim == 3 and image_2d.shape[-1] == 3:
|
|
428
|
+
# RGB case - process each channel
|
|
429
|
+
features_per_channel = []
|
|
430
|
+
for channel in range(3):
|
|
431
|
+
channel_features = self.compute_deep_feature_maps_gpu_2d(image_2d = image_2d[..., channel])
|
|
432
|
+
features_per_channel.append(channel_features)
|
|
433
|
+
|
|
434
|
+
# Stack all channel features
|
|
435
|
+
return cp.concatenate(features_per_channel, axis=-1)
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
# Calculate total number of features
|
|
439
|
+
num_basic_features = 1 + len(self.sigmas) + len(self.dogs) # original + gaussians + dogs
|
|
440
|
+
num_gradient_features = len(self.sigmas) # gradient magnitude for each sigma
|
|
441
|
+
num_laplacian_features = len(self.sigmas) # laplacian for each sigma
|
|
442
|
+
num_hessian_features = len(self.sigmas) * 1 # 1 eigenvalue (largest) for each sigma
|
|
443
|
+
|
|
444
|
+
total_features = num_basic_features + num_gradient_features + num_laplacian_features + num_hessian_features
|
|
445
|
+
|
|
446
|
+
# Pre-allocate result array
|
|
447
|
+
features = cp.empty(image_2d.shape + (total_features,), dtype=image_2d.dtype)
|
|
448
|
+
features[..., 0] = image_2d
|
|
449
|
+
|
|
450
|
+
feature_idx = 1
|
|
451
|
+
|
|
452
|
+
# Cache for Gaussian filters - only compute each sigma once
|
|
453
|
+
gaussian_cache = {}
|
|
454
|
+
|
|
455
|
+
# Compute all unique sigmas needed (from both sigmas and dogs)
|
|
456
|
+
all_sigmas = set(self.sigmas)
|
|
457
|
+
for s1, s2 in self.dogs:
|
|
458
|
+
all_sigmas.add(s1)
|
|
459
|
+
all_sigmas.add(s2)
|
|
460
|
+
|
|
461
|
+
# Pre-compute all Gaussian filters
|
|
462
|
+
for sigma in all_sigmas:
|
|
463
|
+
gaussian_cache[sigma] = cupy_ndimage.gaussian_filter(image_2d, sigma)
|
|
464
|
+
|
|
465
|
+
# Gaussian smoothing - use cached results
|
|
466
|
+
for sigma in self.sigmas:
|
|
467
|
+
features[..., feature_idx] = gaussian_cache[sigma]
|
|
468
|
+
feature_idx += 1
|
|
469
|
+
|
|
470
|
+
# Difference of Gaussians - use cached results
|
|
471
|
+
for s1, s2 in self.dogs:
|
|
472
|
+
features[..., feature_idx] = gaussian_cache[s1] - gaussian_cache[s2]
|
|
473
|
+
feature_idx += 1
|
|
474
|
+
|
|
475
|
+
# Gaussian gradient magnitudes for each sigma (vectorized, 2D version)
|
|
476
|
+
for sigma in self.sigmas:
|
|
477
|
+
gaussian_img = gaussian_cache[sigma]
|
|
478
|
+
gx = cupy_ndimage.sobel(gaussian_img, axis=1, mode='reflect') # x direction
|
|
479
|
+
gy = cupy_ndimage.sobel(gaussian_img, axis=0, mode='reflect') # y direction
|
|
480
|
+
features[..., feature_idx] = cp.sqrt(gx**2 + gy**2)
|
|
481
|
+
feature_idx += 1
|
|
482
|
+
|
|
483
|
+
# Laplacian of Gaussian for each sigma (vectorized, 2D version)
|
|
484
|
+
for sigma in self.sigmas:
|
|
485
|
+
gaussian_img = gaussian_cache[sigma]
|
|
486
|
+
features[..., feature_idx] = cupy_ndimage.laplace(gaussian_img, mode='reflect')
|
|
487
|
+
feature_idx += 1
|
|
488
|
+
|
|
489
|
+
# Largest Hessian eigenvalue for each sigma (fully vectorized, 2D version)
|
|
490
|
+
for sigma in self.sigmas:
|
|
491
|
+
gaussian_img = gaussian_cache[sigma]
|
|
492
|
+
|
|
493
|
+
# Compute second derivatives (Hessian components) - all vectorized for 2D
|
|
494
|
+
hxx = cupy_ndimage.gaussian_filter(gaussian_img, sigma=0, order=[0, 2], mode='reflect')
|
|
495
|
+
hyy = cupy_ndimage.gaussian_filter(gaussian_img, sigma=0, order=[2, 0], mode='reflect')
|
|
496
|
+
hxy = cupy_ndimage.gaussian_filter(gaussian_img, sigma=0, order=[1, 1], mode='reflect')
|
|
497
|
+
|
|
498
|
+
# Vectorized eigenvalue computation using cupy broadcasting
|
|
499
|
+
# Create arrays with shape (d0, d1, 2, 2) for all 2D Hessian matrices
|
|
500
|
+
shape = image_2d.shape
|
|
501
|
+
hessian_matrices = cp.zeros(shape + (2, 2))
|
|
502
|
+
|
|
503
|
+
# Fill the symmetric 2D Hessian matrices
|
|
504
|
+
hessian_matrices[..., 0, 0] = hxx
|
|
505
|
+
hessian_matrices[..., 1, 1] = hyy
|
|
506
|
+
hessian_matrices[..., 0, 1] = hessian_matrices[..., 1, 0] = hxy
|
|
507
|
+
|
|
508
|
+
# Reshape for batch eigenvalue computation
|
|
509
|
+
original_shape = hessian_matrices.shape[:-2] # (d0, d1)
|
|
510
|
+
batch_size = int(cp.prod(cp.array(original_shape)))
|
|
511
|
+
hessian_batch = hessian_matrices.reshape(batch_size, 2, 2)
|
|
512
|
+
|
|
513
|
+
# Compute eigenvalues for all matrices at once using CuPy
|
|
514
|
+
# Since Hessian matrices are symmetric, we can use eigvalsh
|
|
515
|
+
eigenvalues_batch = cp.linalg.eigvalsh(hessian_batch)
|
|
516
|
+
|
|
517
|
+
# Get only the largest eigenvalue for each matrix
|
|
518
|
+
largest_eigenvalues = cp.max(eigenvalues_batch, axis=1)
|
|
519
|
+
|
|
520
|
+
# Reshape back to original spatial dimensions
|
|
521
|
+
largest_eigenvalues = largest_eigenvalues.reshape(original_shape)
|
|
522
|
+
|
|
523
|
+
# Add the largest eigenvalue as a feature
|
|
524
|
+
features[..., feature_idx] = largest_eigenvalues
|
|
525
|
+
feature_idx += 1
|
|
574
526
|
|
|
575
|
-
#
|
|
576
|
-
|
|
577
|
-
features
|
|
578
|
-
|
|
579
|
-
#
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
#
|
|
586
|
-
|
|
587
|
-
gyx = cupy_ndimage.sobel(gy, axis=1, mode='reflect')
|
|
588
|
-
|
|
589
|
-
# Laplacian (sum of second derivatives)
|
|
590
|
-
laplacian = gxx + gyy
|
|
591
|
-
features.append(laplacian)
|
|
592
|
-
|
|
593
|
-
# Hessian determinant
|
|
594
|
-
hessian_det = gxx * gyy - gxy * gyx
|
|
595
|
-
features.append(hessian_det)
|
|
596
|
-
|
|
597
|
-
# Verify shapes
|
|
598
|
-
for i, feat in enumerate(features):
|
|
599
|
-
if feat.shape != original_shape:
|
|
600
|
-
# Check dimensionality and expand if needed
|
|
601
|
-
if len(feat.shape) < len(original_shape):
|
|
602
|
-
feat_adjusted = feat
|
|
603
|
-
missing_dims = len(original_shape) - len(feat.shape)
|
|
604
|
-
for _ in range(missing_dims):
|
|
605
|
-
feat_adjusted = cp.expand_dims(feat_adjusted, axis=0)
|
|
606
|
-
|
|
607
|
-
if feat_adjusted.shape != original_shape:
|
|
608
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
609
|
-
|
|
610
|
-
features[i] = feat_adjusted
|
|
527
|
+
# Normalize only morphological features, keep intensity features raw
|
|
528
|
+
intensity_features = features[..., :num_basic_features] # original + gaussians + DoGs
|
|
529
|
+
morphology_features = features[..., num_basic_features:] # gradients + laplacians + eigenvalues
|
|
530
|
+
|
|
531
|
+
# Normalize only morphological features using CuPy
|
|
532
|
+
morph_means = cp.mean(morphology_features, axis=(0, 1), keepdims=True)
|
|
533
|
+
morph_stds = cp.std(morphology_features, axis=(0, 1), keepdims=True)
|
|
534
|
+
morph_stds = cp.where(morph_stds == 0, 1, morph_stds)
|
|
535
|
+
morphology_features = (morphology_features - morph_means) / morph_stds
|
|
536
|
+
|
|
537
|
+
# Recombine
|
|
538
|
+
features = cp.concatenate([intensity_features, morphology_features], axis=-1)
|
|
611
539
|
|
|
612
|
-
|
|
613
|
-
|
|
540
|
+
return features
|
|
541
|
+
|
|
542
|
+
def create_2d_chunks(self):
|
|
543
|
+
"""Same 2D chunking logic"""
|
|
544
|
+
MAX_CHUNK_SIZE = self.twod_chunk_size
|
|
545
|
+
chunks = []
|
|
614
546
|
|
|
615
|
-
|
|
616
|
-
|
|
547
|
+
for z in range(self.image_3d.shape[0]):
|
|
548
|
+
y_dim = self.image_3d.shape[1]
|
|
549
|
+
x_dim = self.image_3d.shape[2]
|
|
550
|
+
total_pixels = y_dim * x_dim
|
|
551
|
+
|
|
552
|
+
if total_pixels <= MAX_CHUNK_SIZE:
|
|
553
|
+
chunks.append([z, 0, y_dim, 0, x_dim])
|
|
554
|
+
else:
|
|
555
|
+
largest_dim = 'y' if y_dim >= x_dim else 'x'
|
|
556
|
+
num_divisions = int(cp.ceil(total_pixels / MAX_CHUNK_SIZE))
|
|
557
|
+
|
|
558
|
+
if largest_dim == 'y':
|
|
559
|
+
div_size = int(cp.ceil(y_dim / num_divisions))
|
|
560
|
+
for i in range(0, y_dim, div_size):
|
|
561
|
+
end_i = min(i + div_size, y_dim)
|
|
562
|
+
chunks.append([z, i, end_i, 0, x_dim])
|
|
563
|
+
else:
|
|
564
|
+
div_size = int(cp.ceil(x_dim / num_divisions))
|
|
565
|
+
for i in range(0, x_dim, div_size):
|
|
566
|
+
end_i = min(i + div_size, x_dim)
|
|
567
|
+
chunks.append([z, 0, y_dim, i, end_i])
|
|
617
568
|
|
|
618
|
-
return
|
|
619
|
-
|
|
569
|
+
return chunks
|
|
570
|
+
|
|
620
571
|
def segment_volume(self, array, chunk_size=None, gpu=True):
|
|
621
|
-
"""
|
|
622
|
-
|
|
623
|
-
array = cp.asarray(array) # Ensure CuPy array
|
|
572
|
+
"""Optimized GPU version with sequential GPU processing and batched sklearn prediction"""
|
|
624
573
|
|
|
574
|
+
array = cp.asarray(array)
|
|
625
575
|
self.realtimechunks = None
|
|
626
|
-
self.map_slice = None
|
|
627
576
|
chunk_size = self.master_chunk
|
|
628
577
|
|
|
629
|
-
def create_2d_chunks():
|
|
630
|
-
"""
|
|
631
|
-
Create chunks by z-slices for 2D processing.
|
|
632
|
-
Each chunk is a complete z-slice with all y,x coordinates,
|
|
633
|
-
unless the slice exceeds 262144 pixels, in which case it's divided into subchunks.
|
|
634
|
-
|
|
635
|
-
Returns:
|
|
636
|
-
List of chunks where each chunk contains the parameters needed for processing.
|
|
637
|
-
Format depends on subchunking:
|
|
638
|
-
- No subchunking: [y_dim, x_dim, z, total_pixels, None]
|
|
639
|
-
- Y subchunking: [y_dim, x_dim, z, None, ['y', start_y, end_y]]
|
|
640
|
-
- X subchunking: [y_dim, x_dim, z, None, ['x', start_x, end_x]]
|
|
641
|
-
"""
|
|
642
|
-
MAX_CHUNK_SIZE = 262144
|
|
643
|
-
chunks = []
|
|
644
|
-
|
|
645
|
-
for z in range(self.image_3d.shape[0]):
|
|
646
|
-
# Get the dimensions of this z-slice
|
|
647
|
-
y_dim = self.image_3d.shape[1]
|
|
648
|
-
x_dim = self.image_3d.shape[2]
|
|
649
|
-
total_pixels = y_dim * x_dim
|
|
650
|
-
|
|
651
|
-
# If the slice is small enough, do not subchunk
|
|
652
|
-
if total_pixels <= MAX_CHUNK_SIZE:
|
|
653
|
-
chunks.append([z, 0, y_dim, 0, x_dim]) # [z_start, y_start, y_end, x_start, x_end]
|
|
654
|
-
else:
|
|
655
|
-
# Determine which dimension to divide (the largest one)
|
|
656
|
-
largest_dim = 'y' if y_dim >= x_dim else 'x'
|
|
657
|
-
|
|
658
|
-
# Calculate how many divisions we need
|
|
659
|
-
num_divisions = int(cp.ceil(total_pixels / MAX_CHUNK_SIZE))
|
|
660
|
-
|
|
661
|
-
# Calculate the approx size of each division along the largest dimension
|
|
662
|
-
if largest_dim == 'y':
|
|
663
|
-
div_size = int(cp.ceil(y_dim / num_divisions))
|
|
664
|
-
# Create subchunks by dividing the y-dimension
|
|
665
|
-
for i in range(0, y_dim, div_size):
|
|
666
|
-
end_i = min(i + div_size, y_dim)
|
|
667
|
-
chunks.append([z, i, end_i, 0, x_dim]) # [z, y_start, y_end, x_start, x_end]
|
|
668
|
-
else: # largest_dim == 'x'
|
|
669
|
-
div_size = int(cp.ceil(x_dim / num_divisions))
|
|
670
|
-
# Create subchunks by dividing the x-dimension
|
|
671
|
-
for i in range(0, x_dim, div_size):
|
|
672
|
-
end_i = min(i + div_size, x_dim)
|
|
673
|
-
chunks.append([z, 0, y_dim, i, end_i]) # [z, y_start, y_end, x_start, x_end]
|
|
674
|
-
|
|
675
|
-
return chunks
|
|
676
578
|
|
|
677
579
|
print("Chunking data...")
|
|
678
580
|
|
|
679
581
|
if not self.use_two:
|
|
680
|
-
# 3D Processing
|
|
681
|
-
# Round to nearest multiple of 32 for better memory alignment
|
|
582
|
+
# 3D Processing
|
|
682
583
|
chunk_size = ((chunk_size + 15) // 32) * 32
|
|
683
584
|
|
|
684
|
-
# Calculate number of chunks in each dimension
|
|
685
585
|
z_chunks = (self.image_3d.shape[0] + chunk_size - 1) // chunk_size
|
|
686
586
|
y_chunks = (self.image_3d.shape[1] + chunk_size - 1) // chunk_size
|
|
687
587
|
x_chunks = (self.image_3d.shape[2] + chunk_size - 1) // chunk_size
|
|
688
588
|
|
|
689
|
-
# Create start indices for all chunks at once using CuPy
|
|
690
589
|
chunk_starts = cp.array(cp.meshgrid(
|
|
691
590
|
cp.arange(z_chunks) * chunk_size,
|
|
692
591
|
cp.arange(y_chunks) * chunk_size,
|
|
@@ -696,8 +595,7 @@ class InteractiveSegmenter:
|
|
|
696
595
|
|
|
697
596
|
chunks = []
|
|
698
597
|
for chunk_start_gpu in chunk_starts:
|
|
699
|
-
|
|
700
|
-
z_start = int(chunk_start_gpu[0]) # Convert to regular Python int
|
|
598
|
+
z_start = int(chunk_start_gpu[0])
|
|
701
599
|
y_start = int(chunk_start_gpu[1])
|
|
702
600
|
x_start = int(chunk_start_gpu[2])
|
|
703
601
|
|
|
@@ -708,47 +606,155 @@ class InteractiveSegmenter:
|
|
|
708
606
|
coords = [z_start, z_end, y_start, y_end, x_start, x_end]
|
|
709
607
|
chunks.append(coords)
|
|
710
608
|
else:
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
#
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
609
|
+
chunks = self.create_2d_chunks()
|
|
610
|
+
|
|
611
|
+
print("Processing chunks with optimized GPU batching...")
|
|
612
|
+
|
|
613
|
+
# Optimal batch size - balance memory usage vs sklearn efficiency
|
|
614
|
+
max_workers = multiprocessing.cpu_count()
|
|
615
|
+
batch_size = max_workers * self.batch_amplifier # Process more chunks per batch for better sklearn utilization
|
|
616
|
+
total_processed = 0
|
|
617
|
+
|
|
618
|
+
# Configure sklearn for maximum parallelism
|
|
619
|
+
if hasattr(self.model, 'n_jobs'):
|
|
620
|
+
original_n_jobs = self.model.n_jobs
|
|
621
|
+
self.model.n_jobs = -1
|
|
622
|
+
|
|
623
|
+
try:
|
|
624
|
+
for batch_start in range(0, len(chunks), batch_size):
|
|
625
|
+
batch_end = min(batch_start + batch_size, len(chunks))
|
|
626
|
+
chunk_batch = chunks[batch_start:batch_end]
|
|
627
|
+
|
|
628
|
+
print(f"Processing batch {batch_start//batch_size + 1}/{(len(chunks) + batch_size - 1)//batch_size}")
|
|
629
|
+
|
|
630
|
+
# PHASE 1: Sequential GPU feature extraction (much faster than threading)
|
|
631
|
+
batch_results = []
|
|
632
|
+
|
|
633
|
+
for chunk in chunk_batch:
|
|
634
|
+
features_cpu, coords_gpu = self.extract_chunk_features_gpu(chunk)
|
|
635
|
+
if len(features_cpu) > 0:
|
|
636
|
+
batch_results.append((features_cpu, coords_gpu))
|
|
637
|
+
|
|
638
|
+
# PHASE 2: Batch predict with sklearn's parallelism
|
|
639
|
+
if batch_results:
|
|
640
|
+
# Combine all CPU features from this batch
|
|
641
|
+
all_batch_features = cp.vstack([result[0] for result in batch_results])
|
|
642
|
+
all_batch_coords = cp.vstack([result[1] for result in batch_results])
|
|
643
|
+
all_batch_features = cp.asnumpy(all_batch_features)
|
|
644
|
+
|
|
645
|
+
# Single prediction call using sklearn's internal parallelism
|
|
646
|
+
predictions = self.model.predict(all_batch_features)
|
|
647
|
+
predictions = cp.array(predictions, dtype=bool)
|
|
648
|
+
|
|
649
|
+
# Apply predictions to array
|
|
650
|
+
foreground_coords = all_batch_coords[predictions]
|
|
651
|
+
if len(foreground_coords) > 0:
|
|
733
652
|
try:
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
653
|
+
array[foreground_coords[:, 0], foreground_coords[:, 1], foreground_coords[:, 2]] = 255
|
|
654
|
+
except IndexError as e:
|
|
655
|
+
print(f"Index error when updating array: {e}")
|
|
656
|
+
# Fallback approach
|
|
657
|
+
for coord in foreground_coords:
|
|
658
|
+
z, y, x = int(coord[0]), int(coord[1]), int(coord[2])
|
|
659
|
+
if 0 <= z < array.shape[0] and 0 <= y < array.shape[1] and 0 <= x < array.shape[2]:
|
|
660
|
+
array[z, y, x] = 255
|
|
661
|
+
|
|
662
|
+
# Memory cleanup for this batch
|
|
663
|
+
del all_batch_features, all_batch_coords, predictions, foreground_coords
|
|
664
|
+
cp.get_default_memory_pool().free_all_blocks()
|
|
665
|
+
|
|
666
|
+
total_processed += len(chunk_batch)
|
|
667
|
+
print(f"Completed {total_processed}/{len(chunks)} chunks")
|
|
668
|
+
|
|
669
|
+
finally:
|
|
670
|
+
# Restore sklearn settings
|
|
671
|
+
if hasattr(self.model, 'n_jobs'):
|
|
672
|
+
self.model.n_jobs = original_n_jobs
|
|
739
673
|
|
|
740
|
-
#
|
|
741
|
-
|
|
742
|
-
|
|
674
|
+
# Final GPU memory cleanup
|
|
675
|
+
cp.get_default_memory_pool().free_all_blocks()
|
|
676
|
+
|
|
677
|
+
return cp.asnumpy(array)
|
|
743
678
|
|
|
744
|
-
|
|
679
|
+
def extract_chunk_features_gpu(self, chunk_coords):
|
|
680
|
+
"""
|
|
681
|
+
GPU version of feature extraction without prediction
|
|
682
|
+
Returns CPU features and GPU coordinates for efficient batch processing
|
|
683
|
+
"""
|
|
745
684
|
|
|
746
|
-
|
|
747
|
-
|
|
685
|
+
if self.previewing or not self.use_two:
|
|
686
|
+
# 3D processing
|
|
687
|
+
if self.realtimechunks is None:
|
|
688
|
+
z_min, z_max = chunk_coords[0], chunk_coords[1]
|
|
689
|
+
y_min, y_max = chunk_coords[2], chunk_coords[3]
|
|
690
|
+
x_min, x_max = chunk_coords[4], chunk_coords[5]
|
|
691
|
+
|
|
692
|
+
# Create coordinates using CuPy (GPU operations)
|
|
693
|
+
z_range = cp.arange(z_min, z_max)
|
|
694
|
+
y_range = cp.arange(y_min, y_max)
|
|
695
|
+
x_range = cp.arange(x_min, x_max)
|
|
696
|
+
|
|
697
|
+
chunk_coords_gpu = cp.stack(cp.meshgrid(
|
|
698
|
+
z_range, y_range, x_range, indexing='ij'
|
|
699
|
+
)).reshape(3, -1).T
|
|
700
|
+
else:
|
|
701
|
+
chunk_coords_gpu = cp.array(chunk_coords)
|
|
702
|
+
z_coords = chunk_coords_gpu[:, 0]
|
|
703
|
+
y_coords = chunk_coords_gpu[:, 1]
|
|
704
|
+
x_coords = chunk_coords_gpu[:, 2]
|
|
705
|
+
|
|
706
|
+
z_min, z_max = cp.min(z_coords).item(), cp.max(z_coords).item()
|
|
707
|
+
y_min, y_max = cp.min(y_coords).item(), cp.max(y_coords).item()
|
|
708
|
+
x_min, x_max = cp.min(x_coords).item(), cp.max(x_coords).item()
|
|
709
|
+
|
|
710
|
+
# Extract subarray and compute features (GPU operations)
|
|
711
|
+
subarray = self.image_3d[z_min:z_max+1, y_min:y_max+1, x_min:x_max+1]
|
|
712
|
+
|
|
713
|
+
if self.speed:
|
|
714
|
+
feature_map = self.compute_feature_maps_gpu(subarray)
|
|
715
|
+
else:
|
|
716
|
+
feature_map = self.compute_deep_feature_maps_gpu(subarray)
|
|
717
|
+
|
|
718
|
+
# Extract features using GPU operations
|
|
719
|
+
local_coords = chunk_coords_gpu.copy()
|
|
720
|
+
local_coords[:, 0] -= z_min
|
|
721
|
+
local_coords[:, 1] -= y_min
|
|
722
|
+
local_coords[:, 2] -= x_min
|
|
723
|
+
|
|
724
|
+
features_gpu = feature_map[local_coords[:, 0], local_coords[:, 1], local_coords[:, 2]]
|
|
725
|
+
|
|
726
|
+
# Convert features to CPU for sklearn, convert coordinates to NumPy for final assignment
|
|
727
|
+
#features_cpu = cp.asnumpy(features_gpu)
|
|
728
|
+
#coords_cpu = cp.asnumpy(chunk_coords_gpu)
|
|
729
|
+
|
|
730
|
+
return features_gpu, chunk_coords_gpu
|
|
731
|
+
|
|
732
|
+
else:
|
|
733
|
+
# 2D processing
|
|
734
|
+
if len(chunk_coords) == 5:
|
|
735
|
+
z = chunk_coords[0]
|
|
736
|
+
y_start = chunk_coords[1]
|
|
737
|
+
y_end = chunk_coords[2]
|
|
738
|
+
x_start = chunk_coords[3]
|
|
739
|
+
x_end = chunk_coords[4]
|
|
740
|
+
|
|
741
|
+
# Generate coordinates for this slice
|
|
742
|
+
coords_array = self.twodim_coords(z, y_start, y_end, x_start, x_end)
|
|
743
|
+
|
|
744
|
+
# Get feature map for this z-slice
|
|
745
|
+
feature_map = self.get_feature_map_slice(z, self.speed, True)
|
|
746
|
+
|
|
747
|
+
# Extract features using GPU operations
|
|
748
|
+
y_indices = coords_array[:, 1]
|
|
749
|
+
x_indices = coords_array[:, 2]
|
|
750
|
+
features_gpu = feature_map[y_indices, x_indices]
|
|
751
|
+
|
|
752
|
+
# Convert features to CPU for sklearn, convert coordinates to NumPy for final assignment
|
|
753
|
+
#features_cpu = cp.asnumpy(features_gpu)
|
|
754
|
+
#coords_cpu = cp.asnumpy(coords_array)
|
|
755
|
+
|
|
756
|
+
return features_gpu, coords_array
|
|
748
757
|
|
|
749
|
-
# Convert to NumPy at the very end for return
|
|
750
|
-
return cp.asnumpy(array)
|
|
751
|
-
|
|
752
758
|
def update_position(self, z=None, x=None, y=None):
|
|
753
759
|
"""Update current position for chunk prioritization with safeguards"""
|
|
754
760
|
|
|
@@ -776,15 +782,63 @@ class InteractiveSegmenter:
|
|
|
776
782
|
|
|
777
783
|
# Only clear map_slice if z changes and we're not already generating a new one
|
|
778
784
|
if self.current_z != self.prev_z:
|
|
779
|
-
|
|
780
|
-
if hasattr(self, 'feature_cache') and self.feature_cache is not None:
|
|
781
|
-
if self.current_z not in self.feature_cache:
|
|
782
|
-
self.map_slice = None
|
|
785
|
+
|
|
783
786
|
self._currently_segmenting = None
|
|
784
787
|
|
|
785
788
|
# Update previous z
|
|
786
789
|
self.prev_z = z
|
|
787
790
|
|
|
791
|
+
def get_realtime_chunks_2d(self, chunk_size=None):
|
|
792
|
+
"""
|
|
793
|
+
Create square chunks with 1 z-thickness (2D chunks across XY planes)
|
|
794
|
+
"""
|
|
795
|
+
|
|
796
|
+
if chunk_size is None:
|
|
797
|
+
chunk_size = int(cp.sqrt(self.twod_chunk_size))
|
|
798
|
+
|
|
799
|
+
# Determine if we need to chunk XY planes
|
|
800
|
+
small_dims = (self.image_3d.shape[1] <= chunk_size and
|
|
801
|
+
self.image_3d.shape[2] <= chunk_size)
|
|
802
|
+
few_z = self.image_3d.shape[0] <= 100 # arbitrary threshold
|
|
803
|
+
|
|
804
|
+
# If small enough, each Z is one chunk
|
|
805
|
+
if small_dims and few_z:
|
|
806
|
+
chunk_size_xy = max(self.image_3d.shape[1], self.image_3d.shape[2])
|
|
807
|
+
else:
|
|
808
|
+
chunk_size_xy = chunk_size
|
|
809
|
+
|
|
810
|
+
# Calculate chunks for XY plane
|
|
811
|
+
y_chunks = (self.image_3d.shape[1] + chunk_size_xy - 1) // chunk_size_xy
|
|
812
|
+
x_chunks = (self.image_3d.shape[2] + chunk_size_xy - 1) // chunk_size_xy
|
|
813
|
+
|
|
814
|
+
# Populate chunk dictionary
|
|
815
|
+
chunk_dict = {}
|
|
816
|
+
|
|
817
|
+
# Create chunks for each Z plane (single Z thickness)
|
|
818
|
+
for z in range(self.image_3d.shape[0]):
|
|
819
|
+
if small_dims:
|
|
820
|
+
chunk_dict[(z, 0, 0)] = {
|
|
821
|
+
'coords': [0, self.image_3d.shape[1], 0, self.image_3d.shape[2]],
|
|
822
|
+
'processed': False,
|
|
823
|
+
'z': z # Keep for backward compatibility
|
|
824
|
+
}
|
|
825
|
+
else:
|
|
826
|
+
# Multiple chunks per Z plane
|
|
827
|
+
for y_chunk in range(y_chunks):
|
|
828
|
+
for x_chunk in range(x_chunks):
|
|
829
|
+
y_start = y_chunk * chunk_size_xy
|
|
830
|
+
x_start = x_chunk * chunk_size_xy
|
|
831
|
+
y_end = min(y_start + chunk_size_xy, self.image_3d.shape[1])
|
|
832
|
+
x_end = min(x_start + chunk_size_xy, self.image_3d.shape[2])
|
|
833
|
+
|
|
834
|
+
chunk_dict[(z, y_start, x_start)] = {
|
|
835
|
+
'coords': [y_start, y_end, x_start, x_end],
|
|
836
|
+
'processed': False,
|
|
837
|
+
'z': z # Keep for backward compatibility
|
|
838
|
+
}
|
|
839
|
+
|
|
840
|
+
self.realtimechunks = chunk_dict
|
|
841
|
+
print("Ready!")
|
|
788
842
|
|
|
789
843
|
def get_realtime_chunks(self, chunk_size=49):
|
|
790
844
|
|
|
@@ -848,7 +902,10 @@ class InteractiveSegmenter:
|
|
|
848
902
|
#gpu = False
|
|
849
903
|
|
|
850
904
|
if self.realtimechunks is None:
|
|
851
|
-
self.
|
|
905
|
+
if not self.use_two:
|
|
906
|
+
self.get_realtime_chunks()
|
|
907
|
+
else:
|
|
908
|
+
self.get_realtime_chunks_2d()
|
|
852
909
|
else:
|
|
853
910
|
for chunk_pos in self.realtimechunks: # chunk_pos is the (z, y_start, x_start) tuple
|
|
854
911
|
self.realtimechunks[chunk_pos]['processed'] = False
|
|
@@ -952,80 +1009,58 @@ class InteractiveSegmenter:
|
|
|
952
1009
|
|
|
953
1010
|
if not saving:
|
|
954
1011
|
print("Training model...")
|
|
1012
|
+
self.model = RandomForestClassifier(
|
|
1013
|
+
n_estimators=100,
|
|
1014
|
+
n_jobs=-1,
|
|
1015
|
+
max_depth=None
|
|
1016
|
+
)
|
|
955
1017
|
|
|
956
1018
|
self.speed = speed
|
|
957
1019
|
self.cur_gpu = use_gpu
|
|
958
|
-
self.realtimechunks = None # dump ram
|
|
1020
|
+
#self.realtimechunks = None # dump ram
|
|
959
1021
|
|
|
960
1022
|
self.mem_lock = mem_lock
|
|
961
|
-
|
|
962
|
-
self.model = RandomForestClassifier(
|
|
963
|
-
n_estimators=100,
|
|
964
|
-
n_jobs=-1,
|
|
965
|
-
max_depth=None
|
|
966
|
-
)
|
|
967
1023
|
|
|
968
|
-
if use_two:
|
|
1024
|
+
if use_two != self.use_two:
|
|
1025
|
+
self.realtimechunks = None
|
|
969
1026
|
|
|
970
|
-
|
|
1027
|
+
if not use_two:
|
|
1028
|
+
self.use_two = False
|
|
971
1029
|
|
|
1030
|
+
if use_two:
|
|
972
1031
|
if not self.use_two: #Clarifies if we need to redo feature cache for 2D
|
|
973
|
-
self.feature_cache = None
|
|
974
1032
|
self.use_two = True
|
|
975
|
-
|
|
976
|
-
self.feature_cache = None #Decided this should reset, can remove this line to have it retain prev feature maps
|
|
977
1033
|
self.two_slices = []
|
|
978
|
-
|
|
979
|
-
if self.feature_cache == None:
|
|
980
|
-
self.feature_cache = {}
|
|
981
|
-
|
|
982
1034
|
foreground_array = cp.asarray(foreground_array)
|
|
983
|
-
|
|
1035
|
+
|
|
984
1036
|
# Get foreground coordinates and features
|
|
985
1037
|
z_fore, y_fore, x_fore = cp.where(foreground_array == 1)
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
y_fore_cpu = cp.asnumpy(y_fore)
|
|
989
|
-
x_fore_cpu = cp.asnumpy(x_fore)
|
|
990
|
-
|
|
991
|
-
fore_coords = list(zip(z_fore_cpu, y_fore_cpu, x_fore_cpu))
|
|
1038
|
+
# Keep as CuPy arrays but convert to regular Python types for dictionary keys
|
|
1039
|
+
fore_coords = [(int(z), int(y), int(x)) for z, y, x in zip(z_fore, y_fore, x_fore)]
|
|
992
1040
|
|
|
993
1041
|
# Get background coordinates and features
|
|
994
1042
|
z_back, y_back, x_back = cp.where(foreground_array == 2)
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
x_back_cpu = cp.asnumpy(x_back)
|
|
999
|
-
|
|
1000
|
-
back_coords = list(zip(z_back_cpu, y_back_cpu, x_back_cpu))
|
|
1001
|
-
|
|
1043
|
+
# Keep as CuPy arrays but convert to regular Python types for dictionary keys
|
|
1044
|
+
back_coords = [(int(z), int(y), int(x)) for z, y, x in zip(z_back, y_back, x_back)]
|
|
1045
|
+
|
|
1002
1046
|
foreground_features = []
|
|
1003
1047
|
background_features = []
|
|
1004
|
-
|
|
1005
1048
|
z_fores = self.organize_by_z(fore_coords)
|
|
1006
1049
|
z_backs = self.organize_by_z(back_coords)
|
|
1007
1050
|
slices = set(list(z_fores.keys()) + list(z_backs.keys()))
|
|
1008
|
-
|
|
1051
|
+
|
|
1009
1052
|
for z in slices:
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
1053
|
current_map = self.get_feature_map_slice(z, speed, use_gpu)
|
|
1013
|
-
|
|
1014
1054
|
if z in z_fores:
|
|
1015
|
-
|
|
1016
1055
|
for y, x in z_fores[z]:
|
|
1017
1056
|
# Get the feature vector for this foreground point
|
|
1018
1057
|
feature_vector = current_map[y, x]
|
|
1019
|
-
|
|
1020
1058
|
# Add to our collection
|
|
1021
1059
|
foreground_features.append(cp.asnumpy(feature_vector))
|
|
1022
|
-
|
|
1023
1060
|
if z in z_backs:
|
|
1024
|
-
|
|
1025
1061
|
for y, x in z_backs[z]:
|
|
1026
|
-
# Get the feature vector for this
|
|
1062
|
+
# Get the feature vector for this background point
|
|
1027
1063
|
feature_vector = current_map[y, x]
|
|
1028
|
-
|
|
1029
1064
|
# Add to our collection
|
|
1030
1065
|
background_features.append(cp.asnumpy(feature_vector))
|
|
1031
1066
|
|
|
@@ -1045,11 +1080,11 @@ class InteractiveSegmenter:
|
|
|
1045
1080
|
z_back = cp.argwhere(foreground_array_gpu == 2)
|
|
1046
1081
|
|
|
1047
1082
|
# Convert back to NumPy for compatibility with the rest of the code
|
|
1048
|
-
z_fore_cpu = cp.asnumpy(z_fore)
|
|
1049
|
-
z_back_cpu = cp.asnumpy(z_back)
|
|
1083
|
+
#z_fore_cpu = cp.asnumpy(z_fore)
|
|
1084
|
+
#z_back_cpu = cp.asnumpy(z_back)
|
|
1050
1085
|
|
|
1051
1086
|
# If no scribbles, return empty lists
|
|
1052
|
-
if len(
|
|
1087
|
+
if len(z_fore) == 0 and len(z_back) == 0:
|
|
1053
1088
|
return foreground_features, background_features
|
|
1054
1089
|
|
|
1055
1090
|
# Get dimensions of the input array
|
|
@@ -1070,7 +1105,7 @@ class InteractiveSegmenter:
|
|
|
1070
1105
|
grid_cells_with_scribbles = set()
|
|
1071
1106
|
|
|
1072
1107
|
# Map original coordinates to grid cells
|
|
1073
|
-
for z, y, x in cp.vstack((
|
|
1108
|
+
for z, y, x in cp.vstack((z_fore, z_back)) if len(z_back) > 0 else z_fore:
|
|
1074
1109
|
grid_z = int(z // box_size)
|
|
1075
1110
|
grid_y = int(y // box_size)
|
|
1076
1111
|
grid_x = int(x // box_size)
|
|
@@ -1111,96 +1146,207 @@ class InteractiveSegmenter:
|
|
|
1111
1146
|
|
|
1112
1147
|
if self.previous_foreground is not None:
|
|
1113
1148
|
failed = True
|
|
1149
|
+
|
|
1114
1150
|
try:
|
|
1115
|
-
#
|
|
1151
|
+
# Handle foreground features
|
|
1116
1152
|
if isinstance(foreground_features, list):
|
|
1117
|
-
|
|
1153
|
+
if len(foreground_features) > 0:
|
|
1154
|
+
# Check if first element is CuPy or NumPy
|
|
1155
|
+
if hasattr(foreground_features[0], 'get'): # CuPy array
|
|
1156
|
+
foreground_features = cp.stack(foreground_features)
|
|
1157
|
+
else: # NumPy array
|
|
1158
|
+
import numpy as np
|
|
1159
|
+
foreground_features = cp.asarray(np.stack(foreground_features))
|
|
1160
|
+
else:
|
|
1161
|
+
foreground_features = cp.array([])
|
|
1118
1162
|
|
|
1119
|
-
# Convert CuPy arrays to NumPy if necessary
|
|
1163
|
+
# Convert CuPy arrays to NumPy if necessary for consistent handling
|
|
1120
1164
|
if hasattr(foreground_features, 'get'):
|
|
1121
1165
|
foreground_features = foreground_features.get()
|
|
1122
1166
|
|
|
1123
|
-
|
|
1167
|
+
# Combine with previous foreground features
|
|
1168
|
+
if len(foreground_features) > 0:
|
|
1169
|
+
foreground_features = np.vstack([self.previous_foreground, foreground_features])
|
|
1170
|
+
else:
|
|
1171
|
+
foreground_features = self.previous_foreground
|
|
1172
|
+
|
|
1124
1173
|
failed = False
|
|
1125
1174
|
except Exception as e:
|
|
1126
|
-
|
|
1175
|
+
print(f"Error combining foreground features: {e}")
|
|
1176
|
+
# Keep only new features if combination fails
|
|
1177
|
+
if isinstance(foreground_features, list):
|
|
1178
|
+
if len(foreground_features) > 0:
|
|
1179
|
+
# Check if first element is CuPy or NumPy
|
|
1180
|
+
if hasattr(foreground_features[0], 'get'): # CuPy array
|
|
1181
|
+
foreground_features = cp.stack(foreground_features)
|
|
1182
|
+
else: # NumPy array
|
|
1183
|
+
import numpy as np
|
|
1184
|
+
foreground_features = cp.asarray(np.stack(foreground_features))
|
|
1185
|
+
else:
|
|
1186
|
+
foreground_features = cp.array([])
|
|
1187
|
+
if hasattr(foreground_features, 'get'):
|
|
1188
|
+
foreground_features = foreground_features.get()
|
|
1127
1189
|
|
|
1128
1190
|
try:
|
|
1129
|
-
#
|
|
1191
|
+
# Handle background features
|
|
1130
1192
|
if isinstance(background_features, list):
|
|
1131
|
-
|
|
1193
|
+
if len(background_features) > 0:
|
|
1194
|
+
# Check if first element is CuPy or NumPy
|
|
1195
|
+
if hasattr(background_features[0], 'get'): # CuPy array
|
|
1196
|
+
background_features = cp.stack(background_features)
|
|
1197
|
+
else: # NumPy array
|
|
1198
|
+
import numpy as np
|
|
1199
|
+
background_features = cp.asarray(np.stack(background_features))
|
|
1200
|
+
else:
|
|
1201
|
+
background_features = cp.array([])
|
|
1132
1202
|
|
|
1133
|
-
# Convert CuPy arrays to NumPy if necessary
|
|
1203
|
+
# Convert CuPy arrays to NumPy if necessary for consistent handling
|
|
1134
1204
|
if hasattr(background_features, 'get'):
|
|
1135
1205
|
background_features = background_features.get()
|
|
1136
1206
|
|
|
1137
|
-
|
|
1207
|
+
# Combine with previous background features
|
|
1208
|
+
if len(background_features) > 0:
|
|
1209
|
+
background_features = np.vstack([self.previous_background, background_features])
|
|
1210
|
+
else:
|
|
1211
|
+
background_features = self.previous_background
|
|
1212
|
+
|
|
1138
1213
|
failed = False
|
|
1139
1214
|
except Exception as e:
|
|
1140
|
-
|
|
1215
|
+
print(f"Error combining background features: {e}")
|
|
1216
|
+
# Keep only new features if combination fails
|
|
1217
|
+
if isinstance(background_features, list):
|
|
1218
|
+
if len(background_features) > 0:
|
|
1219
|
+
# Check if first element is CuPy or NumPy
|
|
1220
|
+
if hasattr(background_features[0], 'get'): # CuPy array
|
|
1221
|
+
background_features = cp.stack(background_features)
|
|
1222
|
+
else: # NumPy array
|
|
1223
|
+
import numpy as np
|
|
1224
|
+
background_features = cp.asarray(np.stack(background_features))
|
|
1225
|
+
else:
|
|
1226
|
+
background_features = cp.array([])
|
|
1227
|
+
if hasattr(background_features, 'get'):
|
|
1228
|
+
background_features = background_features.get()
|
|
1229
|
+
|
|
1141
1230
|
try:
|
|
1142
|
-
#
|
|
1143
|
-
if hasattr(
|
|
1144
|
-
|
|
1231
|
+
# Handle foreground coordinates - always combine when we have new ones
|
|
1232
|
+
if hasattr(z_fore, 'get'):
|
|
1233
|
+
z_fore_numpy = z_fore.get()
|
|
1234
|
+
else:
|
|
1235
|
+
z_fore_numpy = z_fore
|
|
1236
|
+
|
|
1145
1237
|
if hasattr(self.previous_z_fore, 'get'):
|
|
1146
|
-
|
|
1238
|
+
prev_z_fore_numpy = self.previous_z_fore.get()
|
|
1239
|
+
else:
|
|
1240
|
+
prev_z_fore_numpy = self.previous_z_fore
|
|
1147
1241
|
|
|
1148
|
-
|
|
1242
|
+
# Always combine coordinates when we have new ones
|
|
1243
|
+
if len(z_fore_numpy) > 0: # We have new coordinates
|
|
1244
|
+
z_fore = np.concatenate([prev_z_fore_numpy, z_fore_numpy])
|
|
1245
|
+
else: # No new coordinates, keep old ones
|
|
1246
|
+
z_fore = prev_z_fore_numpy
|
|
1247
|
+
|
|
1149
1248
|
except Exception as e:
|
|
1150
|
-
|
|
1249
|
+
print(f"Error combining foreground coordinates: {e}")
|
|
1250
|
+
# Fallback: keep new coordinates if combination fails
|
|
1251
|
+
if hasattr(z_fore, 'get'):
|
|
1252
|
+
z_fore = z_fore.get()
|
|
1253
|
+
|
|
1151
1254
|
try:
|
|
1152
|
-
#
|
|
1153
|
-
if hasattr(
|
|
1154
|
-
|
|
1255
|
+
# Handle background coordinates - always combine when we have new ones
|
|
1256
|
+
if hasattr(z_back, 'get'):
|
|
1257
|
+
z_back_numpy = z_back.get()
|
|
1258
|
+
else:
|
|
1259
|
+
z_back_numpy = z_back
|
|
1260
|
+
|
|
1155
1261
|
if hasattr(self.previous_z_back, 'get'):
|
|
1156
|
-
|
|
1262
|
+
prev_z_back_numpy = self.previous_z_back.get()
|
|
1263
|
+
else:
|
|
1264
|
+
prev_z_back_numpy = self.previous_z_back
|
|
1157
1265
|
|
|
1158
|
-
|
|
1266
|
+
# Always combine coordinates when we have new ones
|
|
1267
|
+
if len(z_back_numpy) > 0: # We have new coordinates
|
|
1268
|
+
z_back = np.concatenate([prev_z_back_numpy, z_back_numpy])
|
|
1269
|
+
else: # No new coordinates, keep old ones
|
|
1270
|
+
z_back = prev_z_back_numpy
|
|
1271
|
+
|
|
1159
1272
|
except Exception as e:
|
|
1160
|
-
|
|
1273
|
+
print(f"Error combining background coordinates: {e}")
|
|
1274
|
+
# Fallback: keep new coordinates if combination fails
|
|
1275
|
+
if hasattr(z_back, 'get'):
|
|
1276
|
+
z_back = z_back.get()
|
|
1277
|
+
|
|
1161
1278
|
if failed:
|
|
1162
1279
|
print("Could not combine new model with old loaded model. Perhaps you are trying to combine a quick model with a deep model? I cannot combine these...")
|
|
1163
1280
|
|
|
1164
1281
|
if saving:
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
if hasattr(background_features, 'get'):
|
|
1169
|
-
background_features = background_features.get()
|
|
1170
|
-
if hasattr(z_fore_cpu, 'get'):
|
|
1171
|
-
z_fore_cpu = z_fore_cpu.get()
|
|
1172
|
-
if hasattr(z_back_cpu, 'get'):
|
|
1173
|
-
z_back_cpu = z_back_cpu.get()
|
|
1174
|
-
|
|
1175
|
-
return foreground_features, background_features, z_fore_cpu, z_back_cpu
|
|
1176
|
-
|
|
1177
|
-
# Make sure foreground_features and background_features are NumPy arrays
|
|
1282
|
+
return foreground_features, background_features, z_fore, z_back
|
|
1283
|
+
|
|
1284
|
+
# Ensure features are proper arrays for training
|
|
1178
1285
|
if isinstance(foreground_features, list):
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1286
|
+
if len(foreground_features) > 0:
|
|
1287
|
+
# Check if first element is CuPy or NumPy
|
|
1288
|
+
if hasattr(foreground_features[0], 'get'): # CuPy array
|
|
1289
|
+
foreground_features = cp.stack(foreground_features)
|
|
1290
|
+
else: # NumPy array
|
|
1291
|
+
import numpy as np
|
|
1292
|
+
foreground_features = cp.asarray(np.stack(foreground_features))
|
|
1293
|
+
else:
|
|
1294
|
+
foreground_features = cp.array([])
|
|
1295
|
+
|
|
1183
1296
|
if isinstance(background_features, list):
|
|
1184
|
-
|
|
1185
|
-
|
|
1297
|
+
if len(background_features) > 0:
|
|
1298
|
+
# Check if first element is CuPy or NumPy
|
|
1299
|
+
if hasattr(background_features[0], 'get'): # CuPy array
|
|
1300
|
+
background_features = cp.stack(background_features)
|
|
1301
|
+
else: # NumPy array
|
|
1302
|
+
import numpy as np
|
|
1303
|
+
background_features = cp.asarray(np.stack(background_features))
|
|
1304
|
+
else:
|
|
1305
|
+
background_features = cp.array([])
|
|
1306
|
+
|
|
1307
|
+
# Convert to NumPy for sklearn
|
|
1308
|
+
if hasattr(foreground_features, 'get'):
|
|
1309
|
+
foreground_features = foreground_features.get()
|
|
1310
|
+
if hasattr(background_features, 'get'):
|
|
1186
1311
|
background_features = background_features.get()
|
|
1187
|
-
|
|
1312
|
+
|
|
1313
|
+
# Validate dimensions before training
|
|
1314
|
+
|
|
1315
|
+
# Ensure we have matching numbers of features and coordinates
|
|
1316
|
+
if len(foreground_features) != len(z_fore):
|
|
1317
|
+
print(f"Warning: Foreground features ({len(foreground_features)}) and coordinates ({len(z_fore)}) don't match!")
|
|
1318
|
+
# Trim to the smaller size
|
|
1319
|
+
min_len = min(len(foreground_features), len(z_fore))
|
|
1320
|
+
foreground_features = foreground_features[:min_len]
|
|
1321
|
+
z_fore = z_fore[:min_len]
|
|
1322
|
+
|
|
1323
|
+
if len(background_features) != len(z_back):
|
|
1324
|
+
print(f"Warning: Background features ({len(background_features)}) and coordinates ({len(z_back)}) don't match!")
|
|
1325
|
+
# Trim to the smaller size
|
|
1326
|
+
min_len = min(len(background_features), len(z_back))
|
|
1327
|
+
background_features = background_features[:min_len]
|
|
1328
|
+
z_back = z_back[:min_len]
|
|
1329
|
+
|
|
1188
1330
|
# Combine features and labels for training
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1331
|
+
if len(foreground_features) > 0 and len(background_features) > 0:
|
|
1332
|
+
X = np.vstack([foreground_features, background_features])
|
|
1333
|
+
y = np.hstack([np.ones(len(z_fore)), np.zeros(len(z_back))])
|
|
1334
|
+
|
|
1335
|
+
# Train the model
|
|
1336
|
+
try:
|
|
1337
|
+
self.model.fit(X, y)
|
|
1338
|
+
except Exception as e:
|
|
1339
|
+
print(f"Error during model training: {e}")
|
|
1340
|
+
import traceback
|
|
1341
|
+
traceback.print_exc()
|
|
1342
|
+
else:
|
|
1343
|
+
print("Not enough features to train the model")
|
|
1344
|
+
|
|
1199
1345
|
self.current_speed = speed
|
|
1200
|
-
|
|
1346
|
+
|
|
1201
1347
|
# Clean up GPU memory
|
|
1202
1348
|
cp.get_default_memory_pool().free_all_blocks()
|
|
1203
|
-
|
|
1349
|
+
|
|
1204
1350
|
print("Done")
|
|
1205
1351
|
|
|
1206
1352
|
|
|
@@ -1211,7 +1357,7 @@ class InteractiveSegmenter:
|
|
|
1211
1357
|
foreground_features, background_features, z_fore, z_back = self.train_batch(foreground_array, speed = self.speed, use_gpu = self.use_gpu, use_two = self.use_two, mem_lock = self.mem_lock, saving = True)
|
|
1212
1358
|
|
|
1213
1359
|
|
|
1214
|
-
|
|
1360
|
+
cp.savez(file_name,
|
|
1215
1361
|
foreground_features=foreground_features,
|
|
1216
1362
|
background_features=background_features,
|
|
1217
1363
|
z_fore=z_fore,
|
|
@@ -1221,14 +1367,14 @@ class InteractiveSegmenter:
|
|
|
1221
1367
|
use_two=self.use_two,
|
|
1222
1368
|
mem_lock=self.mem_lock)
|
|
1223
1369
|
|
|
1224
|
-
print(f"Model data saved to {file_name}.
|
|
1370
|
+
print(f"Model data saved to {file_name}.")
|
|
1225
1371
|
|
|
1226
1372
|
|
|
1227
1373
|
def load_model(self, file_name):
|
|
1228
1374
|
|
|
1229
1375
|
print("Loading model data")
|
|
1230
1376
|
|
|
1231
|
-
data =
|
|
1377
|
+
data = cp.load(file_name)
|
|
1232
1378
|
|
|
1233
1379
|
# Unpack the arrays
|
|
1234
1380
|
self.previous_foreground = data['foreground_features']
|
|
@@ -1240,8 +1386,10 @@ class InteractiveSegmenter:
|
|
|
1240
1386
|
self.use_two = bool(data['use_two'])
|
|
1241
1387
|
self.mem_lock = bool(data['mem_lock'])
|
|
1242
1388
|
|
|
1243
|
-
X =
|
|
1244
|
-
y =
|
|
1389
|
+
X = cp.vstack([self.previous_foreground, self.previous_background])
|
|
1390
|
+
y = cp.hstack([cp.ones(len(self.previous_z_fore)), cp.zeros(len(self.previous_z_back))])
|
|
1391
|
+
X = cp.asnumpy(X)
|
|
1392
|
+
y = cp.asnumpy(y)
|
|
1245
1393
|
|
|
1246
1394
|
try:
|
|
1247
1395
|
self.model.fit(X, y)
|