nettracer3d 0.8.4__py3-none-any.whl → 0.8.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nettracer3d might be problematic. Click here for more details.
- nettracer3d/community_extractor.py +3 -2
- nettracer3d/neighborhoods.py +140 -31
- nettracer3d/nettracer.py +10 -3
- nettracer3d/nettracer_gui.py +467 -703
- nettracer3d/painting.py +373 -0
- nettracer3d/proximity.py +2 -2
- nettracer3d/segmenter.py +849 -851
- nettracer3d/segmenter_GPU.py +806 -658
- nettracer3d/smart_dilate.py +2 -2
- {nettracer3d-0.8.4.dist-info → nettracer3d-0.8.5.dist-info}/METADATA +5 -2
- nettracer3d-0.8.5.dist-info/RECORD +25 -0
- {nettracer3d-0.8.4.dist-info → nettracer3d-0.8.5.dist-info}/licenses/LICENSE +2 -4
- nettracer3d-0.8.4.dist-info/RECORD +0 -24
- {nettracer3d-0.8.4.dist-info → nettracer3d-0.8.5.dist-info}/WHEEL +0 -0
- {nettracer3d-0.8.4.dist-info → nettracer3d-0.8.5.dist-info}/entry_points.txt +0 -0
- {nettracer3d-0.8.4.dist-info → nettracer3d-0.8.5.dist-info}/top_level.txt +0 -0
nettracer3d/segmenter.py
CHANGED
|
@@ -6,6 +6,7 @@ import threading
|
|
|
6
6
|
from scipy import ndimage
|
|
7
7
|
import multiprocessing
|
|
8
8
|
from collections import defaultdict
|
|
9
|
+
from typing import List, Dict, Tuple, Any
|
|
9
10
|
|
|
10
11
|
class InteractiveSegmenter:
|
|
11
12
|
def __init__(self, image_3d, use_gpu=False):
|
|
@@ -37,7 +38,6 @@ class InteractiveSegmenter:
|
|
|
37
38
|
self.two_slices = []
|
|
38
39
|
self.speed = True
|
|
39
40
|
self.cur_gpu = False
|
|
40
|
-
self.map_slice = None
|
|
41
41
|
self.prev_z = None
|
|
42
42
|
self.previewing = False
|
|
43
43
|
|
|
@@ -48,10 +48,12 @@ class InteractiveSegmenter:
|
|
|
48
48
|
self.mem_lock = False
|
|
49
49
|
|
|
50
50
|
#Adjustable feature map params:
|
|
51
|
-
self.
|
|
51
|
+
self.sigmas = [1,2,4,8]
|
|
52
52
|
self.windows = 10
|
|
53
53
|
self.dogs = [(1, 2), (2, 4), (4, 8)]
|
|
54
54
|
self.master_chunk = 49
|
|
55
|
+
self.twod_chunk_size = 262144
|
|
56
|
+
self.batch_amplifier = 1
|
|
55
57
|
|
|
56
58
|
#Data when loading prev model:
|
|
57
59
|
self.previous_foreground = None
|
|
@@ -59,618 +61,473 @@ class InteractiveSegmenter:
|
|
|
59
61
|
self.previous_z_fore = None
|
|
60
62
|
self.previous_z_back = None
|
|
61
63
|
|
|
62
|
-
def
|
|
63
|
-
"""
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
"""
|
|
67
|
-
# Check if we're already processing this slice
|
|
68
|
-
if self._currently_processing and self._currently_processing == slice_z:
|
|
69
|
-
return
|
|
64
|
+
def compute_deep_feature_maps_cpu_2d(self, z=None, image_2d = None):
|
|
65
|
+
"""Vectorized detailed version with Gaussian gradient magnitudes, Laplacians, and largest Hessian eigenvalue for 2D images"""
|
|
66
|
+
if z is None:
|
|
67
|
+
z = self.image_3d.shape[0] // 2 # Use middle slice if not specified
|
|
70
68
|
|
|
71
|
-
|
|
72
|
-
|
|
69
|
+
if image_2d is None:
|
|
70
|
+
image_2d = self.image_3d[z, :, :]
|
|
71
|
+
|
|
72
|
+
if image_2d.ndim == 3 and image_2d.shape[-1] == 3:
|
|
73
|
+
# RGB case - process each channel
|
|
74
|
+
features_per_channel = []
|
|
75
|
+
for channel in range(3):
|
|
76
|
+
channel_features = self.compute_deep_feature_maps_cpu_2d(image_2d = image_2d[..., channel])
|
|
77
|
+
features_per_channel.append(channel_features)
|
|
78
|
+
|
|
79
|
+
# Stack all channel features
|
|
80
|
+
return np.concatenate(features_per_channel, axis=-1)
|
|
73
81
|
|
|
74
|
-
try:
|
|
75
|
-
|
|
76
|
-
# First attempt to get the feature map
|
|
77
|
-
feature_map = None
|
|
78
|
-
|
|
79
|
-
try:
|
|
80
|
-
if slice_z in self.feature_cache:
|
|
81
|
-
feature_map = self.feature_cache[slice_z]
|
|
82
|
-
elif hasattr(self, 'map_slice') and self.map_slice is not None and slice_z == self.current_z:
|
|
83
|
-
feature_map = self.map_slice
|
|
84
|
-
else:
|
|
85
|
-
# Generate new feature map
|
|
86
|
-
try:
|
|
87
|
-
feature_map = self.get_feature_map_slice(slice_z, self.current_speed, False)
|
|
88
|
-
self.map_slice = feature_map
|
|
89
|
-
# Cache the feature map for future use
|
|
90
|
-
#if not hasattr(self, 'feature_cache'):
|
|
91
|
-
#self.feature_cache = {}
|
|
92
|
-
#self.feature_cache[slice_z] = feature_map
|
|
93
|
-
except Exception as e:
|
|
94
|
-
print(f"Error generating feature map: {e}")
|
|
95
|
-
import traceback
|
|
96
|
-
traceback.print_exc()
|
|
97
|
-
return # Exit if we can't generate the feature map
|
|
98
|
-
except:
|
|
99
|
-
# Generate new feature map
|
|
100
|
-
#self.feature_cache = {}
|
|
101
|
-
try:
|
|
102
|
-
feature_map = self.get_feature_map_slice(slice_z, self.current_speed, False)
|
|
103
|
-
self.map_slice = feature_map
|
|
104
|
-
# Cache the feature map for future use
|
|
105
|
-
#if not hasattr(self, 'feature_cache'):
|
|
106
|
-
#self.feature_cache = {}
|
|
107
|
-
#self.feature_cache[slice_z] = feature_map
|
|
108
|
-
except Exception as e:
|
|
109
|
-
print(f"Error generating feature map: {e}")
|
|
110
|
-
import traceback
|
|
111
|
-
traceback.print_exc()
|
|
112
|
-
return # Exit if we can't generate the feature map
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
# Check that we have a valid feature map
|
|
116
|
-
if feature_map is None:
|
|
117
|
-
return
|
|
118
|
-
|
|
119
|
-
# Get dimensions of the slice
|
|
120
|
-
y_size, x_size = self.image_3d.shape[1], self.image_3d.shape[2]
|
|
121
|
-
chunk_count = 0
|
|
122
|
-
|
|
123
|
-
# Process in blocks for chunked feedback
|
|
124
|
-
for y_start in range(0, y_size, block_size):
|
|
125
|
-
if self._currently_processing != slice_z:
|
|
126
|
-
return
|
|
127
|
-
|
|
128
|
-
for x_start in range(0, x_size, block_size):
|
|
129
|
-
if self._currently_processing != slice_z:
|
|
130
|
-
return
|
|
131
|
-
|
|
132
|
-
y_end = min(y_start + block_size, y_size)
|
|
133
|
-
x_end = min(x_start + block_size, x_size)
|
|
134
|
-
|
|
135
|
-
# Create coordinates and features for this block
|
|
136
|
-
coords = []
|
|
137
|
-
features = []
|
|
138
|
-
|
|
139
|
-
for y in range(y_start, y_end):
|
|
140
|
-
for x in range(x_start, x_end):
|
|
141
|
-
coords.append((slice_z, y, x))
|
|
142
|
-
features.append(feature_map[y, x])
|
|
143
|
-
|
|
144
|
-
# Skip empty blocks
|
|
145
|
-
if not coords:
|
|
146
|
-
continue
|
|
147
|
-
|
|
148
|
-
# Predict
|
|
149
|
-
try:
|
|
150
|
-
try:
|
|
151
|
-
predictions = self.model.predict(features)
|
|
152
|
-
except ValueError:
|
|
153
|
-
self.feature_cache = None
|
|
154
|
-
self.map_slice = None
|
|
155
|
-
return None, None
|
|
156
|
-
|
|
157
|
-
# Split results
|
|
158
|
-
foreground = set()
|
|
159
|
-
background = set()
|
|
160
|
-
|
|
161
|
-
for coord, pred in zip(coords, predictions):
|
|
162
|
-
if pred:
|
|
163
|
-
foreground.add(coord)
|
|
164
|
-
else:
|
|
165
|
-
background.add(coord)
|
|
166
|
-
|
|
167
|
-
# Yield this chunk
|
|
168
|
-
chunk_count += 1
|
|
169
|
-
yield foreground, background
|
|
170
|
-
|
|
171
|
-
except Exception as e:
|
|
172
|
-
print(f"Error processing chunk: {e}")
|
|
173
|
-
import traceback
|
|
174
|
-
traceback.print_exc()
|
|
175
|
-
|
|
176
82
|
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
83
|
+
# Calculate total number of features
|
|
84
|
+
num_basic_features = 1 + len(self.sigmas) + len(self.dogs) # original + gaussians + dogs
|
|
85
|
+
num_gradient_features = len(self.sigmas) # gradient magnitude for each sigma
|
|
86
|
+
num_laplacian_features = len(self.sigmas) # laplacian for each sigma
|
|
87
|
+
num_hessian_features = len(self.sigmas) * 1 # 1 eigenvalue (largest) for each sigma
|
|
88
|
+
|
|
89
|
+
total_features = num_basic_features + num_gradient_features + num_laplacian_features + num_hessian_features
|
|
90
|
+
|
|
91
|
+
# Pre-allocate result array
|
|
92
|
+
features = np.empty(image_2d.shape + (total_features,), dtype=image_2d.dtype)
|
|
93
|
+
features[..., 0] = image_2d
|
|
94
|
+
|
|
95
|
+
feature_idx = 1
|
|
96
|
+
|
|
97
|
+
# Cache for Gaussian filters - only compute each sigma once
|
|
98
|
+
gaussian_cache = {}
|
|
99
|
+
|
|
100
|
+
# Compute all unique sigmas needed (from both sigmas and dogs)
|
|
101
|
+
all_sigmas = set(self.sigmas)
|
|
102
|
+
for s1, s2 in self.dogs:
|
|
103
|
+
all_sigmas.add(s1)
|
|
104
|
+
all_sigmas.add(s2)
|
|
105
|
+
|
|
106
|
+
# Pre-compute all Gaussian filters
|
|
107
|
+
for sigma in all_sigmas:
|
|
108
|
+
gaussian_cache[sigma] = ndimage.gaussian_filter(image_2d, sigma)
|
|
109
|
+
|
|
110
|
+
# Gaussian smoothing - use cached results
|
|
111
|
+
for sigma in self.sigmas:
|
|
112
|
+
features[..., feature_idx] = gaussian_cache[sigma]
|
|
113
|
+
feature_idx += 1
|
|
114
|
+
|
|
115
|
+
# Difference of Gaussians - use cached results
|
|
116
|
+
for s1, s2 in self.dogs:
|
|
117
|
+
features[..., feature_idx] = gaussian_cache[s1] - gaussian_cache[s2]
|
|
118
|
+
feature_idx += 1
|
|
119
|
+
|
|
120
|
+
# Gaussian gradient magnitudes for each sigma (vectorized, 2D version)
|
|
121
|
+
for sigma in self.sigmas:
|
|
122
|
+
gaussian_img = gaussian_cache[sigma]
|
|
123
|
+
gx = ndimage.sobel(gaussian_img, axis=1, mode='reflect') # x direction
|
|
124
|
+
gy = ndimage.sobel(gaussian_img, axis=0, mode='reflect') # y direction
|
|
125
|
+
features[..., feature_idx] = np.sqrt(gx**2 + gy**2)
|
|
126
|
+
feature_idx += 1
|
|
127
|
+
|
|
128
|
+
# Laplacian of Gaussian for each sigma (vectorized, 2D version)
|
|
129
|
+
for sigma in self.sigmas:
|
|
130
|
+
gaussian_img = gaussian_cache[sigma]
|
|
131
|
+
features[..., feature_idx] = ndimage.laplace(gaussian_img, mode='reflect')
|
|
132
|
+
feature_idx += 1
|
|
133
|
+
|
|
134
|
+
# Largest Hessian eigenvalue for each sigma (fully vectorized, 2D version)
|
|
135
|
+
for sigma in self.sigmas:
|
|
136
|
+
gaussian_img = gaussian_cache[sigma]
|
|
137
|
+
|
|
138
|
+
# Compute second derivatives (Hessian components) - all vectorized for 2D
|
|
139
|
+
hxx = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[0, 2], mode='reflect')
|
|
140
|
+
hyy = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[2, 0], mode='reflect')
|
|
141
|
+
hxy = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[1, 1], mode='reflect')
|
|
142
|
+
|
|
143
|
+
# Vectorized eigenvalue computation using numpy broadcasting
|
|
144
|
+
# Create arrays with shape (d0, d1, 2, 2) for all 2D Hessian matrices
|
|
145
|
+
shape = image_2d.shape
|
|
146
|
+
hessian_matrices = np.zeros(shape + (2, 2))
|
|
147
|
+
|
|
148
|
+
# Fill the symmetric 2D Hessian matrices
|
|
149
|
+
hessian_matrices[..., 0, 0] = hxx
|
|
150
|
+
hessian_matrices[..., 1, 1] = hyy
|
|
151
|
+
hessian_matrices[..., 0, 1] = hessian_matrices[..., 1, 0] = hxy
|
|
152
|
+
|
|
153
|
+
# Reshape for batch eigenvalue computation
|
|
154
|
+
original_shape = hessian_matrices.shape[:-2] # (d0, d1)
|
|
155
|
+
batch_size = np.prod(original_shape)
|
|
156
|
+
hessian_batch = hessian_matrices.reshape(batch_size, 2, 2)
|
|
157
|
+
|
|
158
|
+
# Compute eigenvalues for all matrices at once
|
|
159
|
+
eigenvalues_batch = np.real(np.linalg.eigvals(hessian_batch))
|
|
160
|
+
|
|
161
|
+
# Get only the largest eigenvalue for each matrix
|
|
162
|
+
largest_eigenvalues = np.max(eigenvalues_batch, axis=1)
|
|
163
|
+
|
|
164
|
+
# Reshape back to original spatial dimensions
|
|
165
|
+
largest_eigenvalues = largest_eigenvalues.reshape(original_shape)
|
|
166
|
+
|
|
167
|
+
# Add the largest eigenvalue as a feature
|
|
168
|
+
features[..., feature_idx] = largest_eigenvalues
|
|
169
|
+
feature_idx += 1
|
|
170
|
+
|
|
171
|
+
# Normalize only morphological features, keep intensity features raw
|
|
172
|
+
intensity_features = features[..., :num_basic_features] # original + gaussians + DoGs
|
|
173
|
+
morphology_features = features[..., num_basic_features:] # gradients + laplacians + eigenvalues
|
|
174
|
+
|
|
175
|
+
# Normalize only morphological features
|
|
176
|
+
morph_means = np.mean(morphology_features, axis=(0, 1), keepdims=True)
|
|
177
|
+
morph_stds = np.std(morphology_features, axis=(0, 1), keepdims=True)
|
|
178
|
+
morph_stds = np.where(morph_stds == 0, 1, morph_stds)
|
|
179
|
+
morphology_features = (morphology_features - morph_means) / morph_stds
|
|
180
|
+
|
|
181
|
+
# Recombine
|
|
182
|
+
features = np.concatenate([intensity_features, morphology_features], axis=-1)
|
|
183
|
+
|
|
184
|
+
return features
|
|
182
185
|
|
|
183
186
|
|
|
187
|
+
def compute_feature_maps_cpu_2d(self, z=None, image_2d = None):
|
|
188
|
+
"""Compute feature maps for 2D images using CPU with caching optimization"""
|
|
189
|
+
if image_2d is None:
|
|
190
|
+
image_2d = self.image_3d[z, :, :]
|
|
184
191
|
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
192
|
+
if image_2d.ndim == 3 and image_2d.shape[-1] == 3:
|
|
193
|
+
# RGB case - process each channel
|
|
194
|
+
features_per_channel = []
|
|
195
|
+
for channel in range(3):
|
|
196
|
+
channel_features = self.compute_feature_maps_cpu_2d(image_2d = image_2d[..., channel])
|
|
197
|
+
features_per_channel.append(channel_features)
|
|
198
|
+
|
|
199
|
+
# Stack all channel features
|
|
200
|
+
return np.concatenate(features_per_channel, axis=-1)
|
|
189
201
|
|
|
190
|
-
|
|
202
|
+
# Pre-allocate result array
|
|
203
|
+
num_features = len(self.sigmas) + len(self.dogs) + 2 # +2 for original image + gradient
|
|
204
|
+
features = np.empty(image_2d.shape + (num_features,), dtype=image_2d.dtype)
|
|
191
205
|
|
|
192
|
-
#
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
206
|
+
# Include original image as first feature
|
|
207
|
+
features[..., 0] = image_2d
|
|
208
|
+
feature_idx = 1
|
|
209
|
+
|
|
210
|
+
# Cache for Gaussian filters - only compute each sigma once
|
|
211
|
+
gaussian_cache = {}
|
|
212
|
+
|
|
213
|
+
# Compute all unique sigmas needed (from both sigmas and dogs)
|
|
214
|
+
all_sigmas = set(self.sigmas)
|
|
215
|
+
for s1, s2 in self.dogs:
|
|
216
|
+
all_sigmas.add(s1)
|
|
217
|
+
all_sigmas.add(s2)
|
|
218
|
+
|
|
219
|
+
# Pre-compute all Gaussian filters
|
|
220
|
+
for sigma in all_sigmas:
|
|
221
|
+
gaussian_cache[sigma] = ndimage.gaussian_filter(image_2d, sigma)
|
|
222
|
+
|
|
223
|
+
# Gaussian smoothing - use cached results
|
|
224
|
+
for sigma in self.sigmas:
|
|
225
|
+
features[..., feature_idx] = gaussian_cache[sigma]
|
|
226
|
+
feature_idx += 1
|
|
227
|
+
|
|
228
|
+
# Difference of Gaussians - use cached results
|
|
229
|
+
for s1, s2 in self.dogs:
|
|
230
|
+
features[..., feature_idx] = gaussian_cache[s1] - gaussian_cache[s2]
|
|
231
|
+
feature_idx += 1
|
|
232
|
+
|
|
233
|
+
# Gradient magnitude (2D version)
|
|
234
|
+
gx = ndimage.sobel(image_2d, axis=1, mode='reflect') # x direction
|
|
235
|
+
gy = ndimage.sobel(image_2d, axis=0, mode='reflect') # y direction
|
|
236
|
+
features[..., feature_idx] = np.sqrt(gx**2 + gy**2)
|
|
237
|
+
|
|
238
|
+
return features
|
|
204
239
|
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
240
|
+
def compute_deep_feature_maps_cpu(self, image_3d=None):
|
|
241
|
+
"""Vectorized detailed version with Gaussian gradient magnitudes, Laplacians, and largest Hessian eigenvalue only"""
|
|
242
|
+
if image_3d is None:
|
|
243
|
+
image_3d = self.image_3d
|
|
209
244
|
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
# Local statistics computation
|
|
217
|
-
def compute_local_mean():
|
|
218
|
-
window_size = self.windows
|
|
219
|
-
kernel = np.ones((window_size, window_size, window_size)) / (window_size**3)
|
|
220
|
-
return ndimage.convolve(image_3d, kernel, mode='reflect')
|
|
221
|
-
|
|
222
|
-
future = executor.submit(compute_local_mean)
|
|
223
|
-
futures.append(('local_mean', None, future))
|
|
224
|
-
|
|
225
|
-
def compute_local_variance():
|
|
226
|
-
window_size = self.windows
|
|
227
|
-
kernel = np.ones((window_size, window_size, window_size)) / (window_size**3)
|
|
228
|
-
mean = np.mean(image_3d)
|
|
229
|
-
return ndimage.convolve((image_3d - mean)**2, kernel, mode='reflect')
|
|
230
|
-
|
|
231
|
-
future = executor.submit(compute_local_variance)
|
|
232
|
-
futures.append(('local_var', None, future))
|
|
233
|
-
|
|
234
|
-
# Gradient computation
|
|
235
|
-
def compute_gradients():
|
|
236
|
-
gx = ndimage.sobel(image_3d, axis=2, mode='reflect')
|
|
237
|
-
gy = ndimage.sobel(image_3d, axis=1, mode='reflect')
|
|
238
|
-
gz = ndimage.sobel(image_3d, axis=0, mode='reflect')
|
|
239
|
-
return gx, gy, gz
|
|
240
|
-
|
|
241
|
-
future = executor.submit(compute_gradients)
|
|
242
|
-
futures.append(('gradients', None, future))
|
|
243
|
-
|
|
244
|
-
# Collect results for the independent computations
|
|
245
|
-
results = {}
|
|
246
|
-
for task_type, params, future in futures:
|
|
247
|
-
try:
|
|
248
|
-
result = future.result()
|
|
249
|
-
if task_type == 'gradients':
|
|
250
|
-
# Store the gradient components separately
|
|
251
|
-
gx, gy, gz = result
|
|
252
|
-
results['gx'] = gx
|
|
253
|
-
results['gy'] = gy
|
|
254
|
-
results['gz'] = gz
|
|
255
|
-
else:
|
|
256
|
-
results[f"{task_type}_{params}" if params is not None else task_type] = result
|
|
257
|
-
except Exception as e:
|
|
258
|
-
raise RuntimeError(f"Error in task {task_type}: {str(e)}")
|
|
259
|
-
|
|
260
|
-
# Stage 2: Dependent computations that need results from Stage 1
|
|
261
|
-
futures = []
|
|
262
|
-
|
|
263
|
-
# Gradient magnitude (depends on gradients)
|
|
264
|
-
def compute_gradient_magnitude(gx, gy, gz):
|
|
265
|
-
return np.sqrt(gx**2 + gy**2 + gz**2)
|
|
266
|
-
|
|
267
|
-
future = executor.submit(compute_gradient_magnitude,
|
|
268
|
-
results['gx'], results['gy'], results['gz'])
|
|
269
|
-
futures.append(('gradient_magnitude', None, future))
|
|
270
|
-
|
|
271
|
-
# Second-order gradients (depend on first gradients)
|
|
272
|
-
def compute_second_derivatives(gx, gy, gz):
|
|
273
|
-
gxx = ndimage.sobel(gx, axis=2, mode='reflect')
|
|
274
|
-
gyy = ndimage.sobel(gy, axis=1, mode='reflect')
|
|
275
|
-
gzz = ndimage.sobel(gz, axis=0, mode='reflect')
|
|
276
|
-
return gxx, gyy, gzz
|
|
277
|
-
|
|
278
|
-
future = executor.submit(compute_second_derivatives,
|
|
279
|
-
results['gx'], results['gy'], results['gz'])
|
|
280
|
-
futures.append(('second_derivatives', None, future))
|
|
281
|
-
|
|
282
|
-
# Collect results for the dependent computations
|
|
283
|
-
for task_type, params, future in futures:
|
|
284
|
-
try:
|
|
285
|
-
result = future.result()
|
|
286
|
-
if task_type == 'second_derivatives':
|
|
287
|
-
# Store the second derivative components separately
|
|
288
|
-
gxx, gyy, gzz = result
|
|
289
|
-
results['gxx'] = gxx
|
|
290
|
-
results['gyy'] = gyy
|
|
291
|
-
results['gzz'] = gzz
|
|
292
|
-
else:
|
|
293
|
-
results[task_type] = result
|
|
294
|
-
except Exception as e:
|
|
295
|
-
raise RuntimeError(f"Error in task {task_type}: {str(e)}")
|
|
296
|
-
|
|
297
|
-
# Stage 3: Final computations that depend on Stage 2 results
|
|
298
|
-
futures = []
|
|
299
|
-
|
|
300
|
-
# Laplacian and Hessian determinant (depend on second derivatives)
|
|
301
|
-
def compute_laplacian(gxx, gyy, gzz):
|
|
302
|
-
return gxx + gyy + gzz
|
|
245
|
+
if image_3d.ndim == 4 and image_3d.shape[-1] == 3:
|
|
246
|
+
# RGB case - process each channel
|
|
247
|
+
features_per_channel = []
|
|
248
|
+
for channel in range(3):
|
|
249
|
+
channel_features = self.compute_deep_feature_maps_cpu(image_3d[..., channel])
|
|
250
|
+
features_per_channel.append(channel_features)
|
|
303
251
|
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
futures.append(('laplacian', None, future))
|
|
307
|
-
|
|
308
|
-
def compute_hessian_det(gxx, gyy, gzz):
|
|
309
|
-
return gxx * gyy * gzz
|
|
310
|
-
|
|
311
|
-
future = executor.submit(compute_hessian_det,
|
|
312
|
-
results['gxx'], results['gyy'], results['gzz'])
|
|
313
|
-
futures.append(('hessian_det', None, future))
|
|
314
|
-
|
|
315
|
-
# Collect final results
|
|
316
|
-
for task_type, params, future in futures:
|
|
317
|
-
try:
|
|
318
|
-
result = future.result()
|
|
319
|
-
results[task_type] = result
|
|
320
|
-
except Exception as e:
|
|
321
|
-
raise RuntimeError(f"Error in task {task_type}: {str(e)}")
|
|
252
|
+
# Stack all channel features
|
|
253
|
+
return np.concatenate(features_per_channel, axis=-1)
|
|
322
254
|
|
|
323
|
-
# Organize results in the expected order
|
|
324
|
-
features = []
|
|
325
255
|
|
|
326
|
-
#
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
features.append(results[f'dog_{sigma[0]}'])
|
|
256
|
+
# Calculate total number of features
|
|
257
|
+
num_basic_features = 1 + len(self.sigmas) + len(self.dogs) # original + gaussians + dogs
|
|
258
|
+
num_gradient_features = len(self.sigmas) # gradient magnitude for each sigma
|
|
259
|
+
num_laplacian_features = len(self.sigmas) # laplacian for each sigma
|
|
260
|
+
num_hessian_features = len(self.sigmas) * 1 # 1 eigenvalue (largest) for each sigma
|
|
332
261
|
|
|
333
|
-
|
|
334
|
-
features.append(results['local_mean'])
|
|
335
|
-
features.append(results['local_var'])
|
|
262
|
+
total_features = num_basic_features + num_gradient_features + num_laplacian_features + num_hessian_features
|
|
336
263
|
|
|
337
|
-
#
|
|
338
|
-
features.
|
|
264
|
+
# Pre-allocate result array
|
|
265
|
+
features = np.empty(image_3d.shape + (total_features,), dtype=image_3d.dtype)
|
|
266
|
+
features[..., 0] = image_3d
|
|
339
267
|
|
|
340
|
-
|
|
341
|
-
features.append(results['laplacian'])
|
|
342
|
-
features.append(results['hessian_det'])
|
|
268
|
+
feature_idx = 1
|
|
343
269
|
|
|
344
|
-
#
|
|
345
|
-
|
|
346
|
-
if feat.shape != original_shape:
|
|
347
|
-
feat_adjusted = np.expand_dims(feat, axis=0)
|
|
348
|
-
if feat_adjusted.shape != original_shape:
|
|
349
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
350
|
-
features[i] = feat_adjusted
|
|
270
|
+
# Cache for Gaussian filters - only compute each sigma once
|
|
271
|
+
gaussian_cache = {}
|
|
351
272
|
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
image_2d = self.image_3d[z, :, :]
|
|
358
|
-
original_shape = image_2d.shape
|
|
273
|
+
# Compute all unique sigmas needed (from both sigmas and dogs)
|
|
274
|
+
all_sigmas = set(self.sigmas)
|
|
275
|
+
for s1, s2 in self.dogs:
|
|
276
|
+
all_sigmas.add(s1)
|
|
277
|
+
all_sigmas.add(s2)
|
|
359
278
|
|
|
360
|
-
#
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
futures = []
|
|
364
|
-
|
|
365
|
-
# Gaussian smoothing
|
|
366
|
-
def compute_gaussian(sigma):
|
|
367
|
-
return ndimage.gaussian_filter(image_2d, sigma)
|
|
368
|
-
|
|
369
|
-
for sigma in self.alphas:
|
|
370
|
-
future = executor.submit(compute_gaussian, sigma)
|
|
371
|
-
futures.append(('gaussian', sigma, future))
|
|
372
|
-
|
|
373
|
-
# Difference of Gaussians
|
|
374
|
-
def compute_dog(s1, s2):
|
|
375
|
-
g1 = ndimage.gaussian_filter(image_2d, s1)
|
|
376
|
-
g2 = ndimage.gaussian_filter(image_2d, s2)
|
|
377
|
-
return g1 - g2
|
|
378
|
-
|
|
379
|
-
dog_pairs = self.dogs
|
|
380
|
-
for (s1, s2) in dog_pairs:
|
|
381
|
-
future = executor.submit(compute_dog, s1, s2)
|
|
382
|
-
futures.append(('dog', s1, future))
|
|
383
|
-
|
|
384
|
-
# Local statistics computation
|
|
385
|
-
def compute_local_mean():
|
|
386
|
-
window_size = self.windows
|
|
387
|
-
kernel = np.ones((window_size, window_size)) / (window_size**2)
|
|
388
|
-
return ndimage.convolve(image_2d, kernel, mode='reflect')
|
|
389
|
-
|
|
390
|
-
future = executor.submit(compute_local_mean)
|
|
391
|
-
futures.append(('local_mean', None, future))
|
|
392
|
-
|
|
393
|
-
def compute_local_variance():
|
|
394
|
-
window_size = self.windows
|
|
395
|
-
kernel = np.ones((window_size, window_size)) / (window_size**2)
|
|
396
|
-
mean = np.mean(image_2d)
|
|
397
|
-
return ndimage.convolve((image_2d - mean)**2, kernel, mode='reflect')
|
|
398
|
-
|
|
399
|
-
future = executor.submit(compute_local_variance)
|
|
400
|
-
futures.append(('local_var', None, future))
|
|
401
|
-
|
|
402
|
-
# Gradient computation
|
|
403
|
-
def compute_gradients():
|
|
404
|
-
gx = ndimage.sobel(image_2d, axis=1, mode='reflect') # x direction
|
|
405
|
-
gy = ndimage.sobel(image_2d, axis=0, mode='reflect') # y direction
|
|
406
|
-
return gx, gy
|
|
407
|
-
|
|
408
|
-
future = executor.submit(compute_gradients)
|
|
409
|
-
futures.append(('gradients', None, future))
|
|
410
|
-
|
|
411
|
-
# Collect results for the independent computations
|
|
412
|
-
results = {}
|
|
413
|
-
for task_type, params, future in futures:
|
|
414
|
-
try:
|
|
415
|
-
result = future.result()
|
|
416
|
-
if task_type == 'gradients':
|
|
417
|
-
# Store the gradient components separately
|
|
418
|
-
gx, gy = result
|
|
419
|
-
results['gx'] = gx
|
|
420
|
-
results['gy'] = gy
|
|
421
|
-
else:
|
|
422
|
-
results[f"{task_type}_{params}" if params is not None else task_type] = result
|
|
423
|
-
except Exception as e:
|
|
424
|
-
raise RuntimeError(f"Error in task {task_type}: {str(e)}")
|
|
425
|
-
|
|
426
|
-
# Stage 2: Dependent computations that need results from Stage 1
|
|
427
|
-
futures = []
|
|
428
|
-
|
|
429
|
-
# Gradient magnitude (depends on gradients)
|
|
430
|
-
def compute_gradient_magnitude(gx, gy):
|
|
431
|
-
return np.sqrt(gx**2 + gy**2)
|
|
432
|
-
|
|
433
|
-
future = executor.submit(compute_gradient_magnitude, results['gx'], results['gy'])
|
|
434
|
-
futures.append(('gradient_magnitude', None, future))
|
|
435
|
-
|
|
436
|
-
# Second-order gradients (depend on first gradients)
|
|
437
|
-
def compute_second_derivatives(gx, gy):
|
|
438
|
-
gxx = ndimage.sobel(gx, axis=1, mode='reflect')
|
|
439
|
-
gyy = ndimage.sobel(gy, axis=0, mode='reflect')
|
|
440
|
-
# Cross derivatives for Hessian determinant
|
|
441
|
-
gxy = ndimage.sobel(gx, axis=0, mode='reflect')
|
|
442
|
-
gyx = ndimage.sobel(gy, axis=1, mode='reflect')
|
|
443
|
-
return gxx, gyy, gxy, gyx
|
|
444
|
-
|
|
445
|
-
future = executor.submit(compute_second_derivatives, results['gx'], results['gy'])
|
|
446
|
-
futures.append(('second_derivatives', None, future))
|
|
447
|
-
|
|
448
|
-
# Collect results for the dependent computations
|
|
449
|
-
for task_type, params, future in futures:
|
|
450
|
-
try:
|
|
451
|
-
result = future.result()
|
|
452
|
-
if task_type == 'second_derivatives':
|
|
453
|
-
# Store the second derivative components separately
|
|
454
|
-
gxx, gyy, gxy, gyx = result
|
|
455
|
-
results['gxx'] = gxx
|
|
456
|
-
results['gyy'] = gyy
|
|
457
|
-
results['gxy'] = gxy
|
|
458
|
-
results['gyx'] = gyx
|
|
459
|
-
else:
|
|
460
|
-
results[task_type] = result
|
|
461
|
-
except Exception as e:
|
|
462
|
-
raise RuntimeError(f"Error in task {task_type}: {str(e)}")
|
|
463
|
-
|
|
464
|
-
# Stage 3: Final computations that depend on Stage 2 results
|
|
465
|
-
futures = []
|
|
466
|
-
|
|
467
|
-
# Laplacian and Hessian determinant (depend on second derivatives)
|
|
468
|
-
def compute_laplacian(gxx, gyy):
|
|
469
|
-
return gxx + gyy
|
|
470
|
-
|
|
471
|
-
future = executor.submit(compute_laplacian, results['gxx'], results['gyy'])
|
|
472
|
-
futures.append(('laplacian', None, future))
|
|
473
|
-
|
|
474
|
-
def compute_hessian_det(gxx, gyy, gxy, gyx):
|
|
475
|
-
return gxx * gyy - gxy * gyx
|
|
476
|
-
|
|
477
|
-
future = executor.submit(compute_hessian_det,
|
|
478
|
-
results['gxx'], results['gyy'],
|
|
479
|
-
results['gxy'], results['gyx'])
|
|
480
|
-
futures.append(('hessian_det', None, future))
|
|
481
|
-
|
|
482
|
-
# Collect final results
|
|
483
|
-
for task_type, params, future in futures:
|
|
484
|
-
try:
|
|
485
|
-
result = future.result()
|
|
486
|
-
results[task_type] = result
|
|
487
|
-
except Exception as e:
|
|
488
|
-
raise RuntimeError(f"Error in task {task_type}: {str(e)}")
|
|
489
|
-
|
|
490
|
-
# Organize results in the expected order
|
|
491
|
-
features = []
|
|
492
|
-
|
|
493
|
-
# Add Gaussian features
|
|
494
|
-
for sigma in self.alphas:
|
|
495
|
-
features.append(results[f'gaussian_{sigma}'])
|
|
496
|
-
|
|
497
|
-
for sigma in self.dogs:
|
|
498
|
-
features.append(results[f'dog_{sigma[0]}'])
|
|
499
|
-
|
|
500
|
-
# Add local statistics
|
|
501
|
-
features.append(results['local_mean'])
|
|
502
|
-
features.append(results['local_var'])
|
|
503
|
-
|
|
504
|
-
# Add gradient magnitude
|
|
505
|
-
features.append(results['gradient_magnitude'])
|
|
506
|
-
|
|
507
|
-
# Add Laplacian and Hessian determinant
|
|
508
|
-
features.append(results['laplacian'])
|
|
509
|
-
features.append(results['hessian_det'])
|
|
510
|
-
|
|
511
|
-
# Verify shapes
|
|
512
|
-
for i, feat in enumerate(features):
|
|
513
|
-
if feat.shape != original_shape:
|
|
514
|
-
# Check dimensionality and expand if needed
|
|
515
|
-
if len(feat.shape) < len(original_shape):
|
|
516
|
-
feat_adjusted = feat
|
|
517
|
-
missing_dims = len(original_shape) - len(feat.shape)
|
|
518
|
-
for _ in range(missing_dims):
|
|
519
|
-
feat_adjusted = np.expand_dims(feat_adjusted, axis=0)
|
|
520
|
-
|
|
521
|
-
if feat_adjusted.shape != original_shape:
|
|
522
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
523
|
-
|
|
524
|
-
features[i] = feat_adjusted
|
|
279
|
+
# Pre-compute all Gaussian filters
|
|
280
|
+
for sigma in all_sigmas:
|
|
281
|
+
gaussian_cache[sigma] = ndimage.gaussian_filter(image_3d, sigma)
|
|
525
282
|
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
"""Compute feature maps for 2D images using CPU with thread-based parallelism"""
|
|
531
|
-
image_2d = self.image_3d[z, :, :]
|
|
532
|
-
original_shape = image_2d.shape
|
|
283
|
+
# Gaussian smoothing - use cached results
|
|
284
|
+
for sigma in self.sigmas:
|
|
285
|
+
features[..., feature_idx] = gaussian_cache[sigma]
|
|
286
|
+
feature_idx += 1
|
|
533
287
|
|
|
534
|
-
#
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
features.append(results['gradient_magnitude'])
|
|
597
|
-
|
|
598
|
-
# Verify shapes
|
|
599
|
-
for i, feat in enumerate(features):
|
|
600
|
-
if feat.shape != original_shape:
|
|
601
|
-
# Check dimensionality and expand if needed
|
|
602
|
-
if len(feat.shape) < len(original_shape):
|
|
603
|
-
feat_adjusted = feat
|
|
604
|
-
missing_dims = len(original_shape) - len(feat.shape)
|
|
605
|
-
for _ in range(missing_dims):
|
|
606
|
-
feat_adjusted = np.expand_dims(feat_adjusted, axis=0)
|
|
607
|
-
|
|
608
|
-
if feat_adjusted.shape != original_shape:
|
|
609
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
610
|
-
|
|
611
|
-
features[i] = feat_adjusted
|
|
288
|
+
# Difference of Gaussians - use cached results
|
|
289
|
+
for s1, s2 in self.dogs:
|
|
290
|
+
features[..., feature_idx] = gaussian_cache[s1] - gaussian_cache[s2]
|
|
291
|
+
feature_idx += 1
|
|
292
|
+
|
|
293
|
+
# Gaussian gradient magnitudes for each sigma (vectorized)
|
|
294
|
+
for sigma in self.sigmas:
|
|
295
|
+
gaussian_img = gaussian_cache[sigma]
|
|
296
|
+
gx = ndimage.sobel(gaussian_img, axis=2, mode='reflect')
|
|
297
|
+
gy = ndimage.sobel(gaussian_img, axis=1, mode='reflect')
|
|
298
|
+
gz = ndimage.sobel(gaussian_img, axis=0, mode='reflect')
|
|
299
|
+
features[..., feature_idx] = np.sqrt(gx**2 + gy**2 + gz**2)
|
|
300
|
+
feature_idx += 1
|
|
301
|
+
|
|
302
|
+
# Laplacian of Gaussian for each sigma (vectorized)
|
|
303
|
+
for sigma in self.sigmas:
|
|
304
|
+
gaussian_img = gaussian_cache[sigma]
|
|
305
|
+
features[..., feature_idx] = ndimage.laplace(gaussian_img, mode='reflect')
|
|
306
|
+
feature_idx += 1
|
|
307
|
+
|
|
308
|
+
# Largest Hessian eigenvalue for each sigma (fully vectorized)
|
|
309
|
+
for sigma in self.sigmas:
|
|
310
|
+
gaussian_img = gaussian_cache[sigma]
|
|
311
|
+
|
|
312
|
+
# Compute second derivatives (Hessian components) - all vectorized
|
|
313
|
+
hxx = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[0, 0, 2], mode='reflect')
|
|
314
|
+
hyy = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[0, 2, 0], mode='reflect')
|
|
315
|
+
hzz = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[2, 0, 0], mode='reflect')
|
|
316
|
+
hxy = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[0, 1, 1], mode='reflect')
|
|
317
|
+
hxz = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[1, 0, 1], mode='reflect')
|
|
318
|
+
hyz = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[1, 1, 0], mode='reflect')
|
|
319
|
+
|
|
320
|
+
# Vectorized eigenvalue computation using numpy broadcasting
|
|
321
|
+
# Create arrays with shape (d0, d1, d2, 3, 3) for all Hessian matrices
|
|
322
|
+
shape = image_3d.shape
|
|
323
|
+
hessian_matrices = np.zeros(shape + (3, 3))
|
|
324
|
+
|
|
325
|
+
# Fill the symmetric Hessian matrices
|
|
326
|
+
hessian_matrices[..., 0, 0] = hxx
|
|
327
|
+
hessian_matrices[..., 1, 1] = hyy
|
|
328
|
+
hessian_matrices[..., 2, 2] = hzz
|
|
329
|
+
hessian_matrices[..., 0, 1] = hessian_matrices[..., 1, 0] = hxy
|
|
330
|
+
hessian_matrices[..., 0, 2] = hessian_matrices[..., 2, 0] = hxz
|
|
331
|
+
hessian_matrices[..., 1, 2] = hessian_matrices[..., 2, 1] = hyz
|
|
332
|
+
|
|
333
|
+
# Reshape for batch eigenvalue computation
|
|
334
|
+
original_shape = hessian_matrices.shape[:-2] # (d0, d1, d2)
|
|
335
|
+
batch_size = np.prod(original_shape)
|
|
336
|
+
hessian_batch = hessian_matrices.reshape(batch_size, 3, 3)
|
|
337
|
+
|
|
338
|
+
# Compute eigenvalues for all matrices at once
|
|
339
|
+
eigenvalues_batch = np.real(np.linalg.eigvals(hessian_batch))
|
|
340
|
+
|
|
341
|
+
# Get only the largest eigenvalue for each matrix
|
|
342
|
+
largest_eigenvalues = np.max(eigenvalues_batch, axis=1)
|
|
343
|
+
|
|
344
|
+
# Reshape back to original spatial dimensions
|
|
345
|
+
largest_eigenvalues = largest_eigenvalues.reshape(original_shape)
|
|
346
|
+
|
|
347
|
+
# Add the largest eigenvalue as a feature
|
|
348
|
+
features[..., feature_idx] = largest_eigenvalues
|
|
349
|
+
feature_idx += 1
|
|
612
350
|
|
|
613
|
-
return np.stack(features, axis=-1)
|
|
614
351
|
|
|
352
|
+
# Normalize only morphological features, keep intensity features raw
|
|
353
|
+
intensity_features = features[..., :num_basic_features] # original + gaussians + DoGs
|
|
354
|
+
morphology_features = features[..., num_basic_features:] # gradients + laplacians + eigenvalues
|
|
615
355
|
|
|
616
|
-
|
|
617
|
-
|
|
356
|
+
# Normalize only morphological features
|
|
357
|
+
morph_means = np.mean(morphology_features, axis=(0,1,2), keepdims=True)
|
|
358
|
+
morph_stds = np.std(morphology_features, axis=(0,1,2), keepdims=True)
|
|
359
|
+
morph_stds = np.where(morph_stds == 0, 1, morph_stds)
|
|
360
|
+
morphology_features = (morphology_features - morph_means) / morph_stds
|
|
361
|
+
|
|
362
|
+
# Recombine
|
|
363
|
+
features = np.concatenate([intensity_features, morphology_features], axis=-1)
|
|
618
364
|
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
365
|
+
return features
|
|
366
|
+
|
|
367
|
+
def compute_deep_feature_maps_cpu_smaller(self, image_3d=None): #smaller
|
|
368
|
+
"""Optimized version using determinant instead of full eigenvalue computation. Currently not in use anywhere"""
|
|
622
369
|
if image_3d is None:
|
|
623
370
|
image_3d = self.image_3d
|
|
624
|
-
original_shape = image_3d.shape
|
|
625
371
|
|
|
626
|
-
features
|
|
372
|
+
# Calculate total number of features (using determinant instead of 3 eigenvalues)
|
|
373
|
+
num_basic_features = 1 + len(self.sigmas) + len(self.dogs)
|
|
374
|
+
num_gradient_features = len(self.sigmas)
|
|
375
|
+
num_laplacian_features = len(self.sigmas)
|
|
376
|
+
num_hessian_features = len(self.sigmas) * 3 # determinant + trace + frobenius norm
|
|
627
377
|
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
378
|
+
total_features = num_basic_features + num_gradient_features + num_laplacian_features + num_hessian_features
|
|
379
|
+
|
|
380
|
+
# Pre-allocate result array
|
|
381
|
+
features = np.empty(image_3d.shape + (total_features,), dtype=image_3d.dtype)
|
|
382
|
+
features[..., 0] = image_3d
|
|
383
|
+
|
|
384
|
+
feature_idx = 1
|
|
385
|
+
|
|
386
|
+
# Cache for Gaussian filters
|
|
387
|
+
gaussian_cache = {}
|
|
388
|
+
all_sigmas = set(self.sigmas)
|
|
389
|
+
for s1, s2 in self.dogs:
|
|
390
|
+
all_sigmas.add(s1)
|
|
391
|
+
all_sigmas.add(s2)
|
|
392
|
+
|
|
393
|
+
# Pre-compute all Gaussian filters
|
|
394
|
+
for sigma in all_sigmas:
|
|
395
|
+
gaussian_cache[sigma] = ndimage.gaussian_filter(image_3d, sigma)
|
|
396
|
+
|
|
397
|
+
# Gaussian smoothing
|
|
398
|
+
for sigma in self.sigmas:
|
|
399
|
+
features[..., feature_idx] = gaussian_cache[sigma]
|
|
400
|
+
feature_idx += 1
|
|
401
|
+
|
|
402
|
+
# Difference of Gaussians
|
|
403
|
+
for s1, s2 in self.dogs:
|
|
404
|
+
features[..., feature_idx] = gaussian_cache[s1] - gaussian_cache[s2]
|
|
405
|
+
feature_idx += 1
|
|
406
|
+
|
|
407
|
+
# Gaussian gradient magnitudes
|
|
408
|
+
for sigma in self.sigmas:
|
|
409
|
+
gaussian_img = gaussian_cache[sigma]
|
|
410
|
+
gx = ndimage.sobel(gaussian_img, axis=2, mode='reflect')
|
|
411
|
+
gy = ndimage.sobel(gaussian_img, axis=1, mode='reflect')
|
|
412
|
+
gz = ndimage.sobel(gaussian_img, axis=0, mode='reflect')
|
|
413
|
+
features[..., feature_idx] = np.sqrt(gx**2 + gy**2 + gz**2)
|
|
414
|
+
feature_idx += 1
|
|
415
|
+
|
|
416
|
+
# Laplacian of Gaussian
|
|
417
|
+
for sigma in self.sigmas:
|
|
418
|
+
gaussian_img = gaussian_cache[sigma]
|
|
419
|
+
features[..., feature_idx] = ndimage.laplace(gaussian_img, mode='reflect')
|
|
420
|
+
feature_idx += 1
|
|
421
|
+
|
|
422
|
+
# Hessian-based features (much faster than full eigenvalue computation)
|
|
423
|
+
for sigma in self.sigmas:
|
|
424
|
+
gaussian_img = gaussian_cache[sigma]
|
|
425
|
+
|
|
426
|
+
# Compute second derivatives
|
|
427
|
+
hxx = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[0, 0, 2], mode='reflect')
|
|
428
|
+
hyy = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[0, 2, 0], mode='reflect')
|
|
429
|
+
hzz = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[2, 0, 0], mode='reflect')
|
|
430
|
+
hxy = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[0, 1, 1], mode='reflect')
|
|
431
|
+
hxz = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[1, 0, 1], mode='reflect')
|
|
432
|
+
hyz = ndimage.gaussian_filter(gaussian_img, sigma=0, order=[1, 1, 0], mode='reflect')
|
|
433
|
+
|
|
434
|
+
# Hessian determinant (captures overall curvature)
|
|
435
|
+
determinant = (hxx * (hyy * hzz - hyz**2) -
|
|
436
|
+
hxy * (hxy * hzz - hxz * hyz) +
|
|
437
|
+
hxz * (hxy * hyz - hyy * hxz))
|
|
438
|
+
features[..., feature_idx] = determinant
|
|
439
|
+
feature_idx += 1
|
|
440
|
+
|
|
441
|
+
# Hessian trace (sum of eigenvalues)
|
|
442
|
+
trace = hxx + hyy + hzz
|
|
443
|
+
features[..., feature_idx] = trace
|
|
444
|
+
feature_idx += 1
|
|
445
|
+
|
|
446
|
+
# Frobenius norm (overall curvature magnitude)
|
|
447
|
+
frobenius_norm = np.sqrt(hxx**2 + hyy**2 + hzz**2 + 2*(hxy**2 + hxz**2 + hyz**2))
|
|
448
|
+
features[..., feature_idx] = frobenius_norm
|
|
449
|
+
feature_idx += 1
|
|
642
450
|
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
451
|
+
"""
|
|
452
|
+
# Normalize features: zero-mean, unit variance per feature band
|
|
453
|
+
# Compute mean and std across spatial dimensions (0,1,2), keeping feature dimension
|
|
454
|
+
feature_means = np.mean(features, axis=(0, 1, 2), keepdims=True)
|
|
455
|
+
feature_stds = np.std(features, axis=(0, 1, 2), keepdims=True)
|
|
456
|
+
|
|
457
|
+
# Avoid division by zero for constant features
|
|
458
|
+
feature_stds = np.where(feature_stds == 0, 1, feature_stds)
|
|
459
|
+
|
|
460
|
+
# Normalize in-place for memory efficiency
|
|
461
|
+
features = (features - feature_means) / feature_stds
|
|
462
|
+
"""
|
|
463
|
+
# Normalize only morphological features, keep intensity features raw
|
|
464
|
+
intensity_features = features[..., :num_basic_features] # original + gaussians + DoGs
|
|
465
|
+
morphology_features = features[..., num_basic_features:] # gradients + laplacians + eigenvalues
|
|
466
|
+
|
|
467
|
+
# Normalize only morphological features
|
|
468
|
+
morph_means = np.mean(morphology_features, axis=(0,1,2), keepdims=True)
|
|
469
|
+
morph_stds = np.std(morphology_features, axis=(0,1,2), keepdims=True)
|
|
470
|
+
morph_stds = np.where(morph_stds == 0, 1, morph_stds)
|
|
471
|
+
morphology_features = (morphology_features - morph_means) / morph_stds
|
|
472
|
+
|
|
473
|
+
# Recombine
|
|
474
|
+
features = np.concatenate([intensity_features, morphology_features], axis=-1)
|
|
475
|
+
|
|
476
|
+
return features
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
def compute_feature_maps_cpu(self, image_3d=None): #lil
|
|
480
|
+
"""Optimized version that caches Gaussian filters to avoid redundant computation"""
|
|
481
|
+
if image_3d is None:
|
|
482
|
+
image_3d = self.image_3d
|
|
483
|
+
|
|
484
|
+
if image_3d.ndim == 4 and image_3d.shape[-1] == 3:
|
|
485
|
+
# RGB case - process each channel
|
|
486
|
+
features_per_channel = []
|
|
487
|
+
for channel in range(3):
|
|
488
|
+
channel_features = self.compute_feature_maps_cpu(image_3d[..., channel])
|
|
489
|
+
features_per_channel.append(channel_features)
|
|
658
490
|
|
|
659
|
-
#
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
491
|
+
# Stack all channel features
|
|
492
|
+
return np.concatenate(features_per_channel, axis=-1)
|
|
493
|
+
|
|
494
|
+
# Pre-allocate result array
|
|
495
|
+
num_features = len(self.sigmas) + len(self.dogs) + 2
|
|
496
|
+
features = np.empty(image_3d.shape + (num_features,), dtype=image_3d.dtype)
|
|
497
|
+
features[..., 0] = image_3d
|
|
498
|
+
|
|
499
|
+
feature_idx = 1
|
|
500
|
+
|
|
501
|
+
# Cache for Gaussian filters - only compute each sigma once
|
|
502
|
+
gaussian_cache = {}
|
|
663
503
|
|
|
664
|
-
#
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
raise ValueError(f"Feature {i} has shape {feat.shape}, expected {original_shape}")
|
|
670
|
-
features[i] = feat_adjusted
|
|
504
|
+
# Compute all unique sigmas needed (from both sigmas and dogs)
|
|
505
|
+
all_sigmas = set(self.sigmas)
|
|
506
|
+
for s1, s2 in self.dogs:
|
|
507
|
+
all_sigmas.add(s1)
|
|
508
|
+
all_sigmas.add(s2)
|
|
671
509
|
|
|
672
|
-
|
|
510
|
+
# Pre-compute all Gaussian filters
|
|
511
|
+
for sigma in all_sigmas:
|
|
512
|
+
gaussian_cache[sigma] = ndimage.gaussian_filter(image_3d, sigma)
|
|
513
|
+
|
|
514
|
+
# Gaussian smoothing - use cached results
|
|
515
|
+
for sigma in self.sigmas:
|
|
516
|
+
features[..., feature_idx] = gaussian_cache[sigma]
|
|
517
|
+
feature_idx += 1
|
|
518
|
+
|
|
519
|
+
# Difference of Gaussians - use cached results
|
|
520
|
+
for s1, s2 in self.dogs:
|
|
521
|
+
features[..., feature_idx] = gaussian_cache[s1] - gaussian_cache[s2]
|
|
522
|
+
feature_idx += 1
|
|
523
|
+
|
|
524
|
+
# Gradient magnitude
|
|
525
|
+
gx = ndimage.sobel(image_3d, axis=2, mode='reflect')
|
|
526
|
+
gy = ndimage.sobel(image_3d, axis=1, mode='reflect')
|
|
527
|
+
gz = ndimage.sobel(image_3d, axis=0, mode='reflect')
|
|
528
|
+
features[..., feature_idx] = np.sqrt(gx**2 + gy**2 + gz**2)
|
|
673
529
|
|
|
530
|
+
return features
|
|
674
531
|
|
|
675
532
|
def organize_by_z(self, coordinates):
|
|
676
533
|
"""
|
|
@@ -692,113 +549,71 @@ class InteractiveSegmenter:
|
|
|
692
549
|
|
|
693
550
|
def process_chunk(self, chunk_coords):
|
|
694
551
|
"""
|
|
695
|
-
|
|
696
|
-
|
|
552
|
+
Vectorized process_chunk that releases GIL more effectively
|
|
553
|
+
"""
|
|
554
|
+
if self.realtimechunks is None:
|
|
555
|
+
# Generate coordinates using vectorized operations
|
|
556
|
+
z_min, z_max = chunk_coords[0], chunk_coords[1]
|
|
557
|
+
y_min, y_max = chunk_coords[2], chunk_coords[3]
|
|
558
|
+
x_min, x_max = chunk_coords[4], chunk_coords[5]
|
|
559
|
+
|
|
560
|
+
# More efficient coordinate generation
|
|
561
|
+
z_range = np.arange(z_min, z_max)
|
|
562
|
+
y_range = np.arange(y_min, y_max)
|
|
563
|
+
x_range = np.arange(x_min, x_max)
|
|
564
|
+
|
|
565
|
+
# Create coordinate grid efficiently
|
|
566
|
+
z_grid, y_grid, x_grid = np.meshgrid(z_range, y_range, x_range, indexing='ij')
|
|
567
|
+
chunk_coords_array = np.column_stack([
|
|
568
|
+
z_grid.ravel(),
|
|
569
|
+
y_grid.ravel(),
|
|
570
|
+
x_grid.ravel()
|
|
571
|
+
])
|
|
572
|
+
else:
|
|
573
|
+
# Convert to numpy array for vectorized operations
|
|
574
|
+
chunk_coords_array = np.array(chunk_coords)
|
|
575
|
+
z_coords, y_coords, x_coords = chunk_coords_array[:, 0], chunk_coords_array[:, 1], chunk_coords_array[:, 2]
|
|
576
|
+
z_min, z_max = z_coords.min(), z_coords.max()
|
|
577
|
+
y_min, y_max = y_coords.min(), y_coords.max()
|
|
578
|
+
x_min, x_max = x_coords.min(), x_coords.max()
|
|
579
|
+
|
|
580
|
+
# Extract subarray
|
|
581
|
+
subarray = self.image_3d[z_min:z_max+1, y_min:y_max+1, x_min:x_max+1]
|
|
697
582
|
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
583
|
+
# Compute features for entire subarray at once
|
|
584
|
+
if self.speed:
|
|
585
|
+
feature_map = self.compute_feature_maps_cpu(subarray)
|
|
586
|
+
else:
|
|
587
|
+
feature_map = self.compute_deep_feature_maps_cpu(subarray)
|
|
702
588
|
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
Sets of coordinates classified as foreground or background
|
|
707
|
-
"""
|
|
708
|
-
foreground = set()
|
|
709
|
-
background = set()
|
|
589
|
+
# Vectorized feature extraction
|
|
590
|
+
# Convert global coordinates to local coordinates in one operation
|
|
591
|
+
local_coords = chunk_coords_array - np.array([z_min, y_min, x_min])
|
|
710
592
|
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
# Consider moving this to process chunk ??
|
|
719
|
-
chunk_coords = np.stack(np.meshgrid(
|
|
720
|
-
np.arange(z_min, z_max),
|
|
721
|
-
np.arange(y_min, y_max),
|
|
722
|
-
np.arange(x_min, x_max),
|
|
723
|
-
indexing='ij'
|
|
724
|
-
)).reshape(3, -1).T
|
|
725
|
-
|
|
726
|
-
chunk_coords = (list(map(tuple, chunk_coords)))
|
|
727
|
-
else: #Presumes we're not segmenting all
|
|
728
|
-
# Find min/max bounds of the coordinates to get the smallest containing subarray
|
|
729
|
-
z_coords = [z for z, y, x in chunk_coords]
|
|
730
|
-
y_coords = [y for z, y, x in chunk_coords]
|
|
731
|
-
x_coords = [x for z, y, x in chunk_coords]
|
|
732
|
-
|
|
733
|
-
z_min, z_max = min(z_coords), max(z_coords)
|
|
734
|
-
y_min, y_max = min(y_coords), max(y_coords)
|
|
735
|
-
x_min, x_max = min(x_coords), max(x_coords)
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
# Extract the subarray
|
|
739
|
-
subarray = self.image_3d[z_min:z_max+1, y_min:y_max+1, x_min:x_max+1]
|
|
740
|
-
|
|
741
|
-
# Compute features for this subarray
|
|
742
|
-
if self.speed:
|
|
743
|
-
feature_map = self.compute_feature_maps_cpu_parallel(subarray) #If the interactive segmenter is slow
|
|
744
|
-
else: #Due to the parallel, consider singleton implementation for it specifically
|
|
745
|
-
feature_map = self.compute_deep_feature_maps_cpu_parallel(subarray)
|
|
746
|
-
|
|
747
|
-
# Extract features for each coordinate, adjusting for subarray offset
|
|
748
|
-
features = []
|
|
749
|
-
for z, y, x in chunk_coords:
|
|
750
|
-
# Transform global coordinates to local subarray coordinates
|
|
751
|
-
local_z = z - z_min
|
|
752
|
-
local_y = y - y_min
|
|
753
|
-
local_x = x - x_min
|
|
754
|
-
|
|
755
|
-
# Get feature at this position
|
|
756
|
-
feature = feature_map[local_z, local_y, local_x]
|
|
757
|
-
features.append(feature)
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
# Make predictions
|
|
593
|
+
# Extract all features at once using advanced indexing
|
|
594
|
+
features = feature_map[local_coords[:, 0], local_coords[:, 1], local_coords[:, 2]]
|
|
595
|
+
|
|
596
|
+
# Vectorized predictions (assuming your model can handle batch predictions)
|
|
597
|
+
if hasattr(self.model, 'predict_batch') or features.ndim > 1:
|
|
598
|
+
# If model supports batch prediction
|
|
761
599
|
predictions = self.model.predict(features)
|
|
762
|
-
|
|
763
|
-
# Assign coordinates based on predictions
|
|
764
|
-
for coord, pred in zip(chunk_coords, predictions):
|
|
765
|
-
if pred:
|
|
766
|
-
foreground.add(coord)
|
|
767
|
-
else:
|
|
768
|
-
background.add(coord)
|
|
769
|
-
|
|
770
600
|
else:
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
if self.feature_cache is None:
|
|
779
|
-
features = self.get_feature_map_slice(z, self.speed, self.cur_gpu)
|
|
780
|
-
features = [features[y, x] for y, x in coords]
|
|
781
|
-
elif z not in self.feature_cache and not self.previewing:
|
|
782
|
-
features = self.get_feature_map_slice(z, self.speed, self.cur_gpu)
|
|
783
|
-
features = [features[y, x] for y, x in coords]
|
|
784
|
-
elif z not in self.feature_cache or self.feature_cache is None and self.previewing:
|
|
785
|
-
features = self.map_slice
|
|
786
|
-
try:
|
|
787
|
-
features = [features[y, x] for y, x in coords]
|
|
788
|
-
except:
|
|
789
|
-
return [], []
|
|
790
|
-
else:
|
|
791
|
-
features = [self.feature_cache[z][y, x] for y, x in coords]
|
|
792
|
-
|
|
793
|
-
predictions = self.model.predict(features)
|
|
794
|
-
|
|
795
|
-
for (y, x), pred in zip(coords, predictions):
|
|
796
|
-
coord = (z, y, x) # Reconstruct the 3D coordinate as a tuple
|
|
797
|
-
if pred:
|
|
798
|
-
foreground.add(coord)
|
|
799
|
-
else:
|
|
800
|
-
background.add(coord)
|
|
601
|
+
# Fallback to individual predictions but still vectorized preparation
|
|
602
|
+
predictions = np.array([self.model.predict([feat]) for feat in features])
|
|
603
|
+
|
|
604
|
+
# Vectorized coordinate assignment
|
|
605
|
+
predictions = np.array(predictions, dtype=bool)
|
|
606
|
+
foreground_mask = predictions
|
|
607
|
+
background_mask = ~predictions
|
|
801
608
|
|
|
609
|
+
# Use boolean indexing to separate coordinates
|
|
610
|
+
foreground_coords = chunk_coords_array[foreground_mask]
|
|
611
|
+
background_coords = chunk_coords_array[background_mask]
|
|
612
|
+
|
|
613
|
+
# Convert to sets (still needed for your return format)
|
|
614
|
+
foreground = set(map(tuple, foreground_coords))
|
|
615
|
+
background = set(map(tuple, background_coords))
|
|
616
|
+
|
|
802
617
|
return foreground, background
|
|
803
618
|
|
|
804
619
|
def twodim_coords(self, y_dim, x_dim, z, chunk_size = None, subrange = None):
|
|
@@ -861,62 +676,38 @@ class InteractiveSegmenter:
|
|
|
861
676
|
|
|
862
677
|
|
|
863
678
|
def segment_volume(self, array, chunk_size=None, gpu=False):
|
|
864
|
-
"""
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
self.
|
|
869
|
-
|
|
870
|
-
chunk_size = self.master_chunk #memory efficient chunk
|
|
871
|
-
|
|
679
|
+
"""
|
|
680
|
+
Process chunks in batches equal to CPU cores for optimal GIL handling
|
|
681
|
+
"""
|
|
682
|
+
|
|
683
|
+
self.realtimechunks = None
|
|
684
|
+
chunk_size = self.master_chunk
|
|
872
685
|
|
|
873
686
|
def create_2d_chunks():
|
|
874
|
-
"""
|
|
875
|
-
|
|
876
|
-
Each chunk is a complete z-slice with all y,x coordinates,
|
|
877
|
-
unless the slice exceeds 262144 pixels, in which case it's divided into subchunks.
|
|
878
|
-
|
|
879
|
-
Returns:
|
|
880
|
-
List of chunks, where each chunk contains the coordinates for one z-slice or subchunk
|
|
881
|
-
"""
|
|
882
|
-
MAX_CHUNK_SIZE = 262144
|
|
883
|
-
|
|
687
|
+
"""Same as your existing implementation"""
|
|
688
|
+
MAX_CHUNK_SIZE = self.twod_chunk_size
|
|
884
689
|
chunks = []
|
|
885
690
|
|
|
886
691
|
for z in range(self.image_3d.shape[0]):
|
|
887
|
-
# Get the dimensions of this z-slice
|
|
888
692
|
y_dim = self.image_3d.shape[1]
|
|
889
693
|
x_dim = self.image_3d.shape[2]
|
|
890
694
|
total_pixels = y_dim * x_dim
|
|
891
695
|
|
|
892
|
-
# If the slice is small enough, do not subchunk
|
|
893
696
|
if total_pixels <= MAX_CHUNK_SIZE:
|
|
894
|
-
|
|
895
697
|
chunks.append([y_dim, x_dim, z, total_pixels, None])
|
|
896
|
-
|
|
897
698
|
else:
|
|
898
|
-
# Determine which dimension to divide (the largest one)
|
|
899
699
|
largest_dim = 'y' if y_dim >= x_dim else 'x'
|
|
900
|
-
|
|
901
|
-
# Calculate how many divisions we need
|
|
902
700
|
num_divisions = int(np.ceil(total_pixels / MAX_CHUNK_SIZE))
|
|
903
701
|
|
|
904
|
-
# Calculate the approx size of each division along the largest dimension
|
|
905
702
|
if largest_dim == 'y':
|
|
906
703
|
div_size = int(np.ceil(y_dim / num_divisions))
|
|
907
|
-
# Create subchunks by dividing the y-dimension
|
|
908
704
|
for i in range(0, y_dim, div_size):
|
|
909
705
|
end_i = min(i + div_size, y_dim)
|
|
910
|
-
|
|
911
|
-
|
|
912
706
|
chunks.append([y_dim, x_dim, z, None, ['y', i, end_i]])
|
|
913
|
-
|
|
914
|
-
else: # largest_dim == 'x'
|
|
707
|
+
else:
|
|
915
708
|
div_size = int(np.ceil(x_dim / num_divisions))
|
|
916
|
-
# Create subchunks by dividing the x-dimension
|
|
917
709
|
for i in range(0, x_dim, div_size):
|
|
918
710
|
end_i = min(i + div_size, x_dim)
|
|
919
|
-
|
|
920
711
|
chunks.append([y_dim, x_dim, z, None, ['x', i, end_i]])
|
|
921
712
|
|
|
922
713
|
return chunks
|
|
@@ -924,30 +715,20 @@ class InteractiveSegmenter:
|
|
|
924
715
|
print("Chunking data...")
|
|
925
716
|
|
|
926
717
|
if not self.use_two:
|
|
927
|
-
#
|
|
718
|
+
# Create smaller chunks for better load balancing
|
|
928
719
|
if chunk_size is None:
|
|
929
720
|
total_cores = multiprocessing.cpu_count()
|
|
930
|
-
|
|
931
|
-
# Calculate total volume and target volume per core
|
|
932
721
|
total_volume = np.prod(self.image_3d.shape)
|
|
933
|
-
target_volume_per_chunk = total_volume / total_cores
|
|
722
|
+
target_volume_per_chunk = total_volume / (total_cores * 4) # 4x more chunks
|
|
934
723
|
|
|
935
|
-
# Calculate chunk size that would give us roughly one chunk per core
|
|
936
|
-
# Using cube root since we want roughly equal sizes in all dimensions
|
|
937
724
|
chunk_size = int(np.cbrt(target_volume_per_chunk))
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
chunk_size = max(32, min(chunk_size, min(self.image_3d.shape)))
|
|
941
|
-
|
|
942
|
-
# Round to nearest multiple of 32 for better memory alignment
|
|
943
|
-
chunk_size = ((chunk_size + 15) // 32) * 32
|
|
725
|
+
chunk_size = max(16, min(chunk_size, min(self.image_3d.shape) // 2))
|
|
726
|
+
chunk_size = ((chunk_size + 7) // 16) * 16
|
|
944
727
|
|
|
945
|
-
# Calculate number of chunks in each dimension
|
|
946
728
|
z_chunks = (self.image_3d.shape[0] + chunk_size - 1) // chunk_size
|
|
947
729
|
y_chunks = (self.image_3d.shape[1] + chunk_size - 1) // chunk_size
|
|
948
730
|
x_chunks = (self.image_3d.shape[2] + chunk_size - 1) // chunk_size
|
|
949
731
|
|
|
950
|
-
# Create start indices for all chunks at once
|
|
951
732
|
chunk_starts = np.array(np.meshgrid(
|
|
952
733
|
np.arange(z_chunks) * chunk_size,
|
|
953
734
|
np.arange(y_chunks) * chunk_size,
|
|
@@ -960,38 +741,148 @@ class InteractiveSegmenter:
|
|
|
960
741
|
z_end = min(z_start + chunk_size, self.image_3d.shape[0])
|
|
961
742
|
y_end = min(y_start + chunk_size, self.image_3d.shape[1])
|
|
962
743
|
x_end = min(x_start + chunk_size, self.image_3d.shape[2])
|
|
963
|
-
|
|
964
744
|
coords = [z_start, z_end, y_start, y_end, x_start, x_end]
|
|
965
745
|
chunks.append(coords)
|
|
966
|
-
|
|
967
|
-
|
|
968
746
|
else:
|
|
969
747
|
chunks = create_2d_chunks()
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
748
|
+
|
|
749
|
+
print("Processing chunks in batches...")
|
|
750
|
+
|
|
751
|
+
# Process chunks in batches equal to CPU count
|
|
752
|
+
max_workers = self.batch_amplifier * multiprocessing.cpu_count()
|
|
753
|
+
batch_size = max_workers # One batch per core
|
|
754
|
+
total_processed = 0
|
|
755
|
+
|
|
756
|
+
# Configure sklearn for maximum parallelism
|
|
757
|
+
if hasattr(self.model, 'n_jobs'):
|
|
758
|
+
original_n_jobs = self.model.n_jobs
|
|
759
|
+
self.model.n_jobs = -1 # Use all cores for sklearn prediction
|
|
760
|
+
|
|
761
|
+
try:
|
|
762
|
+
for batch_start in range(0, len(chunks), batch_size):
|
|
763
|
+
batch_end = min(batch_start + batch_size, len(chunks))
|
|
764
|
+
chunk_batch = chunks[batch_start:batch_end]
|
|
765
|
+
|
|
766
|
+
print(f"Processing batch {batch_start//batch_size + 1}/{(len(chunks) + batch_size - 1)//batch_size}")
|
|
767
|
+
|
|
768
|
+
# PHASE 1: Extract features in parallel (releases GIL)
|
|
769
|
+
batch_results = []
|
|
770
|
+
|
|
771
|
+
with ThreadPoolExecutor(max_workers=len(chunk_batch)) as executor:
|
|
772
|
+
futures = []
|
|
773
|
+
for chunk in chunk_batch:
|
|
774
|
+
future = executor.submit(self.extract_chunk_features, chunk)
|
|
775
|
+
futures.append(future)
|
|
776
|
+
|
|
777
|
+
# Collect feature results
|
|
778
|
+
for future in futures:
|
|
779
|
+
features, coords = future.result()
|
|
780
|
+
if len(features) > 0:
|
|
781
|
+
batch_results.append((features, coords))
|
|
782
|
+
|
|
783
|
+
# PHASE 2: Batch predict with sklearn's parallelism (no GIL issues)
|
|
784
|
+
if batch_results:
|
|
785
|
+
# Combine all features from this batch
|
|
786
|
+
all_batch_features = np.vstack([result[0] for result in batch_results])
|
|
787
|
+
all_batch_coords = np.vstack([result[1] for result in batch_results])
|
|
788
|
+
|
|
789
|
+
# Single prediction call using sklearn's internal parallelism
|
|
790
|
+
predictions = self.model.predict(all_batch_features)
|
|
791
|
+
predictions = np.array(predictions, dtype=bool)
|
|
792
|
+
|
|
793
|
+
# Apply predictions to array
|
|
794
|
+
foreground_coords = all_batch_coords[predictions]
|
|
795
|
+
if len(foreground_coords) > 0:
|
|
796
|
+
z_coords, y_coords, x_coords = foreground_coords[:, 0], foreground_coords[:, 1], foreground_coords[:, 2]
|
|
797
|
+
array[z_coords, y_coords, x_coords] = 255
|
|
798
|
+
|
|
799
|
+
# Clean up batch data for memory management
|
|
800
|
+
del all_batch_features, all_batch_coords, predictions, foreground_coords
|
|
801
|
+
|
|
802
|
+
total_processed += len(chunk_batch)
|
|
803
|
+
print(f"Completed {total_processed}/{len(chunks)} chunks")
|
|
804
|
+
|
|
805
|
+
finally:
|
|
806
|
+
# Restore original sklearn settings
|
|
807
|
+
if hasattr(self.model, 'n_jobs'):
|
|
808
|
+
self.model.n_jobs = original_n_jobs
|
|
991
809
|
|
|
992
|
-
#Ok so this should be returned one chunk at a time I presume.
|
|
993
810
|
return array
|
|
994
811
|
|
|
812
|
+
def extract_chunk_features(self, chunk_coords):
|
|
813
|
+
"""
|
|
814
|
+
Extract features for a single chunk without prediction
|
|
815
|
+
Designed to release GIL effectively
|
|
816
|
+
"""
|
|
817
|
+
|
|
818
|
+
if self.previewing or not self.use_two:
|
|
819
|
+
if self.realtimechunks is None:
|
|
820
|
+
z_min, z_max = chunk_coords[0], chunk_coords[1]
|
|
821
|
+
y_min, y_max = chunk_coords[2], chunk_coords[3]
|
|
822
|
+
x_min, x_max = chunk_coords[4], chunk_coords[5]
|
|
823
|
+
|
|
824
|
+
# Vectorized coordinate generation (releases GIL)
|
|
825
|
+
z_range = np.arange(z_min, z_max)
|
|
826
|
+
y_range = np.arange(y_min, y_max)
|
|
827
|
+
x_range = np.arange(x_min, x_max)
|
|
828
|
+
|
|
829
|
+
z_grid, y_grid, x_grid = np.meshgrid(z_range, y_range, x_range, indexing='ij')
|
|
830
|
+
chunk_coords_array = np.column_stack([
|
|
831
|
+
z_grid.ravel(), y_grid.ravel(), x_grid.ravel()
|
|
832
|
+
])
|
|
833
|
+
else:
|
|
834
|
+
chunk_coords_array = np.array(chunk_coords)
|
|
835
|
+
z_coords, y_coords, x_coords = chunk_coords_array[:, 0], chunk_coords_array[:, 1], chunk_coords_array[:, 2]
|
|
836
|
+
z_min, z_max = z_coords.min(), z_coords.max()
|
|
837
|
+
y_min, y_max = y_coords.min(), y_coords.max()
|
|
838
|
+
x_min, x_max = x_coords.min(), x_coords.max()
|
|
839
|
+
|
|
840
|
+
# Extract subarray and compute features (releases GIL)
|
|
841
|
+
subarray = self.image_3d[z_min:z_max+1, y_min:y_max+1, x_min:x_max+1]
|
|
842
|
+
|
|
843
|
+
if self.speed:
|
|
844
|
+
feature_map = self.compute_feature_maps_cpu(subarray)
|
|
845
|
+
else:
|
|
846
|
+
feature_map = self.compute_deep_feature_maps_cpu(subarray)
|
|
847
|
+
|
|
848
|
+
# Vectorized feature extraction (releases GIL)
|
|
849
|
+
local_coords = chunk_coords_array - np.array([z_min, y_min, x_min])
|
|
850
|
+
features = feature_map[local_coords[:, 0], local_coords[:, 1], local_coords[:, 2]]
|
|
851
|
+
|
|
852
|
+
return features, chunk_coords_array
|
|
853
|
+
|
|
854
|
+
else:
|
|
855
|
+
# Handle 2D case
|
|
856
|
+
chunk_coords_list = self.twodim_coords(chunk_coords[0], chunk_coords[1],
|
|
857
|
+
chunk_coords[2], chunk_coords[3], chunk_coords[4])
|
|
858
|
+
chunk_coords_by_z = self.organize_by_z(chunk_coords_list)
|
|
859
|
+
|
|
860
|
+
all_features = []
|
|
861
|
+
all_coords = []
|
|
862
|
+
|
|
863
|
+
for z, coords in chunk_coords_by_z.items():
|
|
864
|
+
coords_array = np.array(coords)
|
|
865
|
+
|
|
866
|
+
# Get features for this z-slice
|
|
867
|
+
features_slice = self.get_feature_map_slice(z, self.speed, self.cur_gpu)
|
|
868
|
+
features = features_slice[coords_array[:, 0], coords_array[:, 1]]
|
|
869
|
+
|
|
870
|
+
|
|
871
|
+
# Convert to 3D coordinates
|
|
872
|
+
coords_3d = np.column_stack([
|
|
873
|
+
np.full(len(coords_array), z),
|
|
874
|
+
coords_array[:, 0],
|
|
875
|
+
coords_array[:, 1]
|
|
876
|
+
])
|
|
877
|
+
|
|
878
|
+
all_features.append(features)
|
|
879
|
+
all_coords.append(coords_3d)
|
|
880
|
+
|
|
881
|
+
if all_features:
|
|
882
|
+
return np.vstack(all_features), np.vstack(all_coords)
|
|
883
|
+
else:
|
|
884
|
+
return np.array([]), np.array([])
|
|
885
|
+
|
|
995
886
|
def update_position(self, z=None, x=None, y=None):
|
|
996
887
|
"""Update current position for chunk prioritization with safeguards"""
|
|
997
888
|
|
|
@@ -1019,10 +910,7 @@ class InteractiveSegmenter:
|
|
|
1019
910
|
|
|
1020
911
|
# Only clear map_slice if z changes and we're not already generating a new one
|
|
1021
912
|
if self.current_z != self.prev_z:
|
|
1022
|
-
|
|
1023
|
-
if hasattr(self, 'feature_cache') and self.feature_cache is not None:
|
|
1024
|
-
if self.current_z not in self.feature_cache:
|
|
1025
|
-
self.map_slice = None
|
|
913
|
+
|
|
1026
914
|
self._currently_segmenting = None
|
|
1027
915
|
|
|
1028
916
|
# Update previous z
|
|
@@ -1077,12 +965,115 @@ class InteractiveSegmenter:
|
|
|
1077
965
|
|
|
1078
966
|
print("Ready!")
|
|
1079
967
|
|
|
968
|
+
def get_realtime_chunks_2d(self, chunk_size=None):
|
|
969
|
+
"""
|
|
970
|
+
Create square chunks with 1 z-thickness (2D chunks across XY planes)
|
|
971
|
+
"""
|
|
972
|
+
|
|
973
|
+
if chunk_size is None:
|
|
974
|
+
chunk_size = int(np.sqrt(self.twod_chunk_size))
|
|
975
|
+
|
|
976
|
+
# Determine if we need to chunk XY planes
|
|
977
|
+
small_dims = (self.image_3d.shape[1] <= chunk_size and
|
|
978
|
+
self.image_3d.shape[2] <= chunk_size)
|
|
979
|
+
few_z = self.image_3d.shape[0] <= 100 # arbitrary threshold
|
|
980
|
+
|
|
981
|
+
# If small enough, each Z is one chunk
|
|
982
|
+
if small_dims and few_z:
|
|
983
|
+
chunk_size_xy = max(self.image_3d.shape[1], self.image_3d.shape[2])
|
|
984
|
+
else:
|
|
985
|
+
chunk_size_xy = chunk_size
|
|
986
|
+
|
|
987
|
+
# Calculate chunks for XY plane
|
|
988
|
+
y_chunks = (self.image_3d.shape[1] + chunk_size_xy - 1) // chunk_size_xy
|
|
989
|
+
x_chunks = (self.image_3d.shape[2] + chunk_size_xy - 1) // chunk_size_xy
|
|
990
|
+
|
|
991
|
+
# Populate chunk dictionary
|
|
992
|
+
chunk_dict = {}
|
|
993
|
+
|
|
994
|
+
# Create chunks for each Z plane (single Z thickness)
|
|
995
|
+
for z in range(self.image_3d.shape[0]):
|
|
996
|
+
if small_dims:
|
|
997
|
+
chunk_dict[(z, 0, 0)] = {
|
|
998
|
+
'coords': [0, self.image_3d.shape[1], 0, self.image_3d.shape[2]],
|
|
999
|
+
'processed': False,
|
|
1000
|
+
'z': z # Keep for backward compatibility
|
|
1001
|
+
}
|
|
1002
|
+
else:
|
|
1003
|
+
# Multiple chunks per Z plane
|
|
1004
|
+
for y_chunk in range(y_chunks):
|
|
1005
|
+
for x_chunk in range(x_chunks):
|
|
1006
|
+
y_start = y_chunk * chunk_size_xy
|
|
1007
|
+
x_start = x_chunk * chunk_size_xy
|
|
1008
|
+
y_end = min(y_start + chunk_size_xy, self.image_3d.shape[1])
|
|
1009
|
+
x_end = min(x_start + chunk_size_xy, self.image_3d.shape[2])
|
|
1010
|
+
|
|
1011
|
+
chunk_dict[(z, y_start, x_start)] = {
|
|
1012
|
+
'coords': [y_start, y_end, x_start, x_end],
|
|
1013
|
+
'processed': False,
|
|
1014
|
+
'z': z # Keep for backward compatibility
|
|
1015
|
+
}
|
|
1016
|
+
|
|
1017
|
+
self.realtimechunks = chunk_dict
|
|
1018
|
+
print("Ready!")
|
|
1019
|
+
|
|
1020
|
+
def process_slice_features(self, z: int, speed: Any, use_gpu: bool,
|
|
1021
|
+
z_fores: Dict[int, List[Tuple[int, int]]],
|
|
1022
|
+
z_backs: Dict[int, List[Tuple[int, int]]]) -> Tuple[List[Any], List[Any]]:
|
|
1023
|
+
"""
|
|
1024
|
+
Helper function to process a single slice and extract features.
|
|
1025
|
+
Returns tuple of (foreground_features, background_features) for this slice.
|
|
1026
|
+
"""
|
|
1027
|
+
slice_foreground_features = []
|
|
1028
|
+
slice_background_features = []
|
|
1029
|
+
|
|
1030
|
+
current_map = self.get_feature_map_slice(z, speed, use_gpu)
|
|
1031
|
+
|
|
1032
|
+
if z in z_fores:
|
|
1033
|
+
for y, x in z_fores[z]:
|
|
1034
|
+
feature_vector = current_map[y, x]
|
|
1035
|
+
slice_foreground_features.append(feature_vector)
|
|
1036
|
+
|
|
1037
|
+
if z in z_backs:
|
|
1038
|
+
for y, x in z_backs[z]:
|
|
1039
|
+
feature_vector = current_map[y, x]
|
|
1040
|
+
slice_background_features.append(feature_vector)
|
|
1041
|
+
|
|
1042
|
+
return slice_foreground_features, slice_background_features
|
|
1043
|
+
|
|
1044
|
+
def extract_features_parallel(self, slices: List[int], speed: Any, use_gpu: bool,
|
|
1045
|
+
z_fores: Dict[int, List[Tuple[int, int]]],
|
|
1046
|
+
z_backs: Dict[int, List[Tuple[int, int]]]) -> Tuple[List[Any], List[Any]]:
|
|
1047
|
+
"""
|
|
1048
|
+
Process feature extraction using ThreadPoolExecutor for parallel execution.
|
|
1049
|
+
"""
|
|
1050
|
+
max_cores = multiprocessing.cpu_count()
|
|
1051
|
+
foreground_features = []
|
|
1052
|
+
background_features = []
|
|
1053
|
+
|
|
1054
|
+
with ThreadPoolExecutor(max_workers=max_cores) as executor:
|
|
1055
|
+
# Submit all slice processing tasks
|
|
1056
|
+
future_to_slice = {
|
|
1057
|
+
executor.submit(self.process_slice_features, z, speed, use_gpu, z_fores, z_backs): z
|
|
1058
|
+
for z in slices
|
|
1059
|
+
}
|
|
1060
|
+
|
|
1061
|
+
# Collect results as they complete
|
|
1062
|
+
for future in future_to_slice:
|
|
1063
|
+
slice_foreground, slice_background = future.result()
|
|
1064
|
+
foreground_features.extend(slice_foreground)
|
|
1065
|
+
background_features.extend(slice_background)
|
|
1066
|
+
|
|
1067
|
+
return foreground_features, background_features
|
|
1080
1068
|
|
|
1081
1069
|
def segment_volume_realtime(self, gpu = False):
|
|
1082
1070
|
|
|
1083
1071
|
|
|
1084
1072
|
if self.realtimechunks is None:
|
|
1085
|
-
self.
|
|
1073
|
+
if not self.use_two:
|
|
1074
|
+
self.get_realtime_chunks()
|
|
1075
|
+
else:
|
|
1076
|
+
self.get_realtime_chunks_2d()
|
|
1086
1077
|
else:
|
|
1087
1078
|
for chunk_pos in self.realtimechunks: # chunk_pos is the (z, y_start, x_start) tuple
|
|
1088
1079
|
self.realtimechunks[chunk_pos]['processed'] = False
|
|
@@ -1170,45 +1161,121 @@ class InteractiveSegmenter:
|
|
|
1170
1161
|
except:
|
|
1171
1162
|
pass
|
|
1172
1163
|
|
|
1164
|
+
def process_grid_cell(self, grid_cell_info):
|
|
1165
|
+
"""
|
|
1166
|
+
Process a single grid cell and return foreground and background features.
|
|
1167
|
+
|
|
1168
|
+
Args:
|
|
1169
|
+
grid_cell_info: tuple of (grid_z, grid_y, grid_x, box_size, depth, height, width, foreground_array)
|
|
1170
|
+
|
|
1171
|
+
Returns:
|
|
1172
|
+
tuple: (foreground_features, background_features)
|
|
1173
|
+
"""
|
|
1174
|
+
grid_z, grid_y, grid_x, box_size, depth, height, width, foreground_array = grid_cell_info
|
|
1175
|
+
|
|
1176
|
+
# Calculate the boundaries of this grid cell
|
|
1177
|
+
z_min = grid_z * box_size
|
|
1178
|
+
y_min = grid_y * box_size
|
|
1179
|
+
x_min = grid_x * box_size
|
|
1180
|
+
|
|
1181
|
+
z_max = min(z_min + box_size, depth)
|
|
1182
|
+
y_max = min(y_min + box_size, height)
|
|
1183
|
+
x_max = min(x_min + box_size, width)
|
|
1184
|
+
|
|
1185
|
+
# Extract the subarray
|
|
1186
|
+
subarray = self.image_3d[z_min:z_max, y_min:y_max, x_min:x_max]
|
|
1187
|
+
subarray2 = foreground_array[z_min:z_max, y_min:y_max, x_min:x_max]
|
|
1188
|
+
|
|
1189
|
+
# Compute features for this subarray
|
|
1190
|
+
if self.speed:
|
|
1191
|
+
subarray_features = self.compute_feature_maps_cpu(subarray)
|
|
1192
|
+
else:
|
|
1193
|
+
subarray_features = self.compute_deep_feature_maps_cpu(subarray)
|
|
1194
|
+
|
|
1195
|
+
# Extract foreground features
|
|
1196
|
+
local_fore_coords = np.argwhere(subarray2 == 1)
|
|
1197
|
+
foreground_features = []
|
|
1198
|
+
for local_z, local_y, local_x in local_fore_coords:
|
|
1199
|
+
feature = subarray_features[local_z, local_y, local_x]
|
|
1200
|
+
foreground_features.append(feature)
|
|
1201
|
+
|
|
1202
|
+
# Extract background features
|
|
1203
|
+
local_back_coords = np.argwhere(subarray2 == 2)
|
|
1204
|
+
background_features = []
|
|
1205
|
+
for local_z, local_y, local_x in local_back_coords:
|
|
1206
|
+
feature = subarray_features[local_z, local_y, local_x]
|
|
1207
|
+
background_features.append(feature)
|
|
1208
|
+
|
|
1209
|
+
return foreground_features, background_features
|
|
1210
|
+
|
|
1211
|
+
# Modified main processing code
|
|
1212
|
+
def process_grid_cells_parallel(self, grid_cells_with_scribbles, box_size, depth, height, width, foreground_array, max_workers=None):
|
|
1213
|
+
"""
|
|
1214
|
+
Process grid cells in parallel using ThreadPoolExecutor.
|
|
1215
|
+
|
|
1216
|
+
Args:
|
|
1217
|
+
grid_cells_with_scribbles: List of grid cell coordinates
|
|
1218
|
+
box_size: Size of each grid cell
|
|
1219
|
+
depth, height, width: Dimensions of the 3D image
|
|
1220
|
+
foreground_array: Array marking foreground/background points
|
|
1221
|
+
max_workers: Maximum number of threads (None for default)
|
|
1222
|
+
|
|
1223
|
+
Returns:
|
|
1224
|
+
tuple: (foreground_features, background_features)
|
|
1225
|
+
"""
|
|
1226
|
+
# Prepare data for each grid cell
|
|
1227
|
+
grid_cell_data = [
|
|
1228
|
+
(grid_z, grid_y, grid_x, box_size, depth, height, width, foreground_array)
|
|
1229
|
+
for grid_z, grid_y, grid_x in grid_cells_with_scribbles
|
|
1230
|
+
]
|
|
1231
|
+
|
|
1232
|
+
foreground_features = []
|
|
1233
|
+
background_features = []
|
|
1234
|
+
|
|
1235
|
+
# Process grid cells in parallel
|
|
1236
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
1237
|
+
# Submit all tasks
|
|
1238
|
+
futures = [executor.submit(self.process_grid_cell, cell_data) for cell_data in grid_cell_data]
|
|
1239
|
+
|
|
1240
|
+
# Collect results as they complete
|
|
1241
|
+
for future in futures:
|
|
1242
|
+
fore_features, back_features = future.result()
|
|
1243
|
+
foreground_features.extend(fore_features)
|
|
1244
|
+
background_features.extend(back_features)
|
|
1245
|
+
|
|
1246
|
+
return foreground_features, background_features
|
|
1247
|
+
|
|
1173
1248
|
def train_batch(self, foreground_array, speed = True, use_gpu = False, use_two = False, mem_lock = False, saving = False):
|
|
1174
1249
|
"""Train directly on foreground and background arrays"""
|
|
1175
1250
|
|
|
1176
1251
|
if not saving:
|
|
1177
1252
|
print("Training model...")
|
|
1253
|
+
self.model = RandomForestClassifier(
|
|
1254
|
+
n_estimators=100,
|
|
1255
|
+
n_jobs=-1,
|
|
1256
|
+
max_depth=None
|
|
1257
|
+
)
|
|
1258
|
+
|
|
1178
1259
|
self.speed = speed
|
|
1179
1260
|
self.cur_gpu = use_gpu
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
self.
|
|
1261
|
+
|
|
1262
|
+
if use_two != self.use_two:
|
|
1263
|
+
self.realtimechunks = None
|
|
1183
1264
|
|
|
1184
1265
|
if not use_two:
|
|
1185
1266
|
self.use_two = False
|
|
1186
1267
|
|
|
1187
1268
|
self.mem_lock = mem_lock
|
|
1188
1269
|
|
|
1189
|
-
if self.current_speed != speed:
|
|
1190
|
-
self.feature_cache = None
|
|
1191
|
-
|
|
1192
|
-
self.model = RandomForestClassifier(
|
|
1193
|
-
n_estimators=100,
|
|
1194
|
-
n_jobs=-1,
|
|
1195
|
-
max_depth=None
|
|
1196
|
-
)
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
1270
|
if use_two:
|
|
1200
1271
|
|
|
1201
1272
|
#changed = [] #Track which slices need feature maps
|
|
1202
1273
|
|
|
1203
1274
|
if not self.use_two: #Clarifies if we need to redo feature cache for 2D
|
|
1204
|
-
self.feature_cache = None
|
|
1205
1275
|
self.use_two = True
|
|
1206
1276
|
|
|
1207
|
-
self.feature_cache = None #Decided this should reset, can remove this line to have it retain prev feature maps
|
|
1208
1277
|
self.two_slices = []
|
|
1209
1278
|
|
|
1210
|
-
if self.feature_cache == None:
|
|
1211
|
-
self.feature_cache = {}
|
|
1212
1279
|
|
|
1213
1280
|
# Get foreground coordinates and features
|
|
1214
1281
|
z_fore, y_fore, x_fore = np.where(foreground_array == 1)
|
|
@@ -1221,14 +1288,6 @@ class InteractiveSegmenter:
|
|
|
1221
1288
|
|
|
1222
1289
|
back_coords = list(zip(z_back, y_back, x_back))
|
|
1223
1290
|
|
|
1224
|
-
|
|
1225
|
-
#slices = set(list(z_back) + list(z_fore))
|
|
1226
|
-
|
|
1227
|
-
#for z in slices:
|
|
1228
|
-
#if z not in self.two_slices:
|
|
1229
|
-
#changed.append(z)
|
|
1230
|
-
#self.two_slices.append(z) #Tracks assigning coords to feature map slices
|
|
1231
|
-
|
|
1232
1291
|
foreground_features = []
|
|
1233
1292
|
background_features = []
|
|
1234
1293
|
|
|
@@ -1236,28 +1295,9 @@ class InteractiveSegmenter:
|
|
|
1236
1295
|
z_backs = self.organize_by_z(back_coords)
|
|
1237
1296
|
slices = set(list(z_fores.keys()) + list(z_backs.keys()))
|
|
1238
1297
|
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
current_map = self.get_feature_map_slice(z, speed, use_gpu)
|
|
1243
|
-
|
|
1244
|
-
if z in z_fores:
|
|
1245
|
-
|
|
1246
|
-
for y, x in z_fores[z]:
|
|
1247
|
-
# Get the feature vector for this foreground point
|
|
1248
|
-
feature_vector = current_map[y, x]
|
|
1249
|
-
|
|
1250
|
-
# Add to our collection
|
|
1251
|
-
foreground_features.append(feature_vector)
|
|
1252
|
-
|
|
1253
|
-
if z in z_backs:
|
|
1254
|
-
|
|
1255
|
-
for y, x in z_backs[z]:
|
|
1256
|
-
# Get the feature vector for this foreground point
|
|
1257
|
-
feature_vector = current_map[y, x]
|
|
1258
|
-
|
|
1259
|
-
# Add to our collection
|
|
1260
|
-
background_features.append(feature_vector)
|
|
1298
|
+
foreground_features, background_features = self.extract_features_parallel(
|
|
1299
|
+
slices, speed, use_gpu, z_fores, z_backs
|
|
1300
|
+
)
|
|
1261
1301
|
|
|
1262
1302
|
|
|
1263
1303
|
else: #Forces ram efficiency
|
|
@@ -1304,49 +1344,8 @@ class InteractiveSegmenter:
|
|
|
1304
1344
|
coord_mapping = {}
|
|
1305
1345
|
|
|
1306
1346
|
# Step 2: Process each grid cell that contains scribbles
|
|
1307
|
-
for grid_z, grid_y, grid_x in grid_cells_with_scribbles:
|
|
1308
|
-
# Calculate the boundaries of this grid cell
|
|
1309
|
-
z_min = grid_z * box_size
|
|
1310
|
-
y_min = grid_y * box_size
|
|
1311
|
-
x_min = grid_x * box_size
|
|
1312
|
-
|
|
1313
|
-
z_max = min(z_min + box_size, depth)
|
|
1314
|
-
y_max = min(y_min + box_size, height)
|
|
1315
|
-
x_max = min(x_min + box_size, width)
|
|
1316
|
-
|
|
1317
|
-
# Extract the subarray
|
|
1318
|
-
subarray = self.image_3d[z_min:z_max, y_min:y_max, x_min:x_max]
|
|
1319
|
-
subarray2 = foreground_array[z_min:z_max, y_min:y_max, x_min:x_max]
|
|
1320
|
-
|
|
1321
|
-
# Compute features for this subarray
|
|
1322
|
-
if self.speed:
|
|
1323
|
-
subarray_features = self.compute_feature_maps_cpu_parallel(subarray)
|
|
1324
|
-
else:
|
|
1325
|
-
subarray_features = self.compute_deep_feature_maps_cpu_parallel(subarray)
|
|
1326
|
-
|
|
1327
|
-
# For each foreground point in this grid cell, extract its feature
|
|
1328
|
-
# Extract foreground features using a direct mask comparison
|
|
1329
|
-
local_fore_coords = np.argwhere(subarray2 == 1)
|
|
1330
|
-
for local_z, local_y, local_x in local_fore_coords:
|
|
1331
|
-
feature = subarray_features[local_z, local_y, local_x]
|
|
1332
|
-
foreground_features.append(feature)
|
|
1333
|
-
|
|
1334
|
-
# Extract background features using a direct mask comparison
|
|
1335
|
-
local_back_coords = np.argwhere(subarray2 == 2)
|
|
1336
|
-
for local_z, local_y, local_x in local_back_coords:
|
|
1337
|
-
feature = subarray_features[local_z, local_y, local_x]
|
|
1338
|
-
background_features.append(feature)
|
|
1339
|
-
try:
|
|
1340
|
-
# Get foreground coordinates and features
|
|
1341
|
-
z_fore, y_fore, x_fore = np.where(foreground_array == 1)
|
|
1342
|
-
foreground_features = self.feature_cache[z_fore, y_fore, x_fore]
|
|
1343
|
-
|
|
1344
|
-
# Get background coordinates and features
|
|
1345
|
-
z_back, y_back, x_back = np.where(foreground_array == 2)
|
|
1346
|
-
background_features = self.feature_cache[z_back, y_back, x_back]
|
|
1347
|
-
except:
|
|
1348
|
-
pass
|
|
1349
1347
|
|
|
1348
|
+
foreground_features, background_features = self.process_grid_cells_parallel(grid_cells_with_scribbles, box_size, depth, height, width, foreground_array)
|
|
1350
1349
|
|
|
1351
1350
|
if self.previous_foreground is not None:
|
|
1352
1351
|
failed = True
|
|
@@ -1378,7 +1377,6 @@ class InteractiveSegmenter:
|
|
|
1378
1377
|
# Combine features and labels
|
|
1379
1378
|
X = np.vstack([foreground_features, background_features])
|
|
1380
1379
|
y = np.hstack([np.ones(len(z_fore)), np.zeros(len(z_back))])
|
|
1381
|
-
|
|
1382
1380
|
|
|
1383
1381
|
# Train the model
|
|
1384
1382
|
try:
|
|
@@ -1412,7 +1410,7 @@ class InteractiveSegmenter:
|
|
|
1412
1410
|
use_two=self.use_two,
|
|
1413
1411
|
mem_lock=self.mem_lock)
|
|
1414
1412
|
|
|
1415
|
-
print(f"Model data saved to {file_name}.
|
|
1413
|
+
print(f"Model data saved to {file_name}.")
|
|
1416
1414
|
|
|
1417
1415
|
|
|
1418
1416
|
def load_model(self, file_name):
|
|
@@ -1448,10 +1446,10 @@ class InteractiveSegmenter:
|
|
|
1448
1446
|
return
|
|
1449
1447
|
|
|
1450
1448
|
if speed:
|
|
1451
|
-
output = self.
|
|
1449
|
+
output = self.compute_feature_maps_cpu_2d(z = z)
|
|
1452
1450
|
|
|
1453
1451
|
elif not speed:
|
|
1454
|
-
output = self.
|
|
1452
|
+
output = self.compute_deep_feature_maps_cpu_2d(z = z)
|
|
1455
1453
|
|
|
1456
1454
|
return output
|
|
1457
1455
|
|