neuro-sam 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neuro_sam/__init__.py +1 -0
- neuro_sam/brightest_path_lib/__init__.py +5 -0
- neuro_sam/brightest_path_lib/algorithm/__init__.py +3 -0
- neuro_sam/brightest_path_lib/algorithm/astar.py +586 -0
- neuro_sam/brightest_path_lib/algorithm/waypointastar.py +449 -0
- neuro_sam/brightest_path_lib/algorithm/waypointastar_speedup.py +1007 -0
- neuro_sam/brightest_path_lib/connected_componen.py +329 -0
- neuro_sam/brightest_path_lib/cost/__init__.py +8 -0
- neuro_sam/brightest_path_lib/cost/cost.py +33 -0
- neuro_sam/brightest_path_lib/cost/reciprocal.py +90 -0
- neuro_sam/brightest_path_lib/cost/reciprocal_transonic.py +86 -0
- neuro_sam/brightest_path_lib/heuristic/__init__.py +2 -0
- neuro_sam/brightest_path_lib/heuristic/euclidean.py +101 -0
- neuro_sam/brightest_path_lib/heuristic/heuristic.py +29 -0
- neuro_sam/brightest_path_lib/image/__init__.py +1 -0
- neuro_sam/brightest_path_lib/image/stats.py +197 -0
- neuro_sam/brightest_path_lib/input/__init__.py +1 -0
- neuro_sam/brightest_path_lib/input/inputs.py +14 -0
- neuro_sam/brightest_path_lib/node/__init__.py +2 -0
- neuro_sam/brightest_path_lib/node/bidirectional_node.py +240 -0
- neuro_sam/brightest_path_lib/node/node.py +125 -0
- neuro_sam/brightest_path_lib/visualization/__init__.py +4 -0
- neuro_sam/brightest_path_lib/visualization/flythrough.py +133 -0
- neuro_sam/brightest_path_lib/visualization/flythrough_all.py +394 -0
- neuro_sam/brightest_path_lib/visualization/tube_data.py +385 -0
- neuro_sam/brightest_path_lib/visualization/tube_flythrough.py +227 -0
- neuro_sam/napari_utils/anisotropic_scaling.py +503 -0
- neuro_sam/napari_utils/color_utils.py +135 -0
- neuro_sam/napari_utils/contrasting_color_system.py +169 -0
- neuro_sam/napari_utils/main_widget.py +1016 -0
- neuro_sam/napari_utils/path_tracing_module.py +1016 -0
- neuro_sam/napari_utils/punet_widget.py +424 -0
- neuro_sam/napari_utils/segmentation_model.py +769 -0
- neuro_sam/napari_utils/segmentation_module.py +649 -0
- neuro_sam/napari_utils/visualization_module.py +574 -0
- neuro_sam/plugin.py +260 -0
- neuro_sam/punet/__init__.py +0 -0
- neuro_sam/punet/deepd3_model.py +231 -0
- neuro_sam/punet/prob_unet_deepd3.py +431 -0
- neuro_sam/punet/prob_unet_with_tversky.py +375 -0
- neuro_sam/punet/punet_inference.py +236 -0
- neuro_sam/punet/run_inference.py +145 -0
- neuro_sam/punet/unet_blocks.py +81 -0
- neuro_sam/punet/utils.py +52 -0
- neuro_sam-0.1.0.dist-info/METADATA +269 -0
- neuro_sam-0.1.0.dist-info/RECORD +93 -0
- neuro_sam-0.1.0.dist-info/WHEEL +5 -0
- neuro_sam-0.1.0.dist-info/entry_points.txt +2 -0
- neuro_sam-0.1.0.dist-info/licenses/LICENSE +21 -0
- neuro_sam-0.1.0.dist-info/top_level.txt +2 -0
- sam2/__init__.py +11 -0
- sam2/automatic_mask_generator.py +454 -0
- sam2/benchmark.py +92 -0
- sam2/build_sam.py +174 -0
- sam2/configs/sam2/sam2_hiera_b+.yaml +113 -0
- sam2/configs/sam2/sam2_hiera_l.yaml +117 -0
- sam2/configs/sam2/sam2_hiera_s.yaml +116 -0
- sam2/configs/sam2/sam2_hiera_t.yaml +118 -0
- sam2/configs/sam2.1/sam2.1_hiera_b+.yaml +116 -0
- sam2/configs/sam2.1/sam2.1_hiera_l.yaml +120 -0
- sam2/configs/sam2.1/sam2.1_hiera_s.yaml +119 -0
- sam2/configs/sam2.1/sam2.1_hiera_t.yaml +121 -0
- sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml +339 -0
- sam2/configs/train.yaml +335 -0
- sam2/modeling/__init__.py +5 -0
- sam2/modeling/backbones/__init__.py +5 -0
- sam2/modeling/backbones/hieradet.py +317 -0
- sam2/modeling/backbones/image_encoder.py +134 -0
- sam2/modeling/backbones/utils.py +93 -0
- sam2/modeling/memory_attention.py +169 -0
- sam2/modeling/memory_encoder.py +181 -0
- sam2/modeling/position_encoding.py +239 -0
- sam2/modeling/sam/__init__.py +5 -0
- sam2/modeling/sam/mask_decoder.py +295 -0
- sam2/modeling/sam/prompt_encoder.py +202 -0
- sam2/modeling/sam/transformer.py +311 -0
- sam2/modeling/sam2_base.py +911 -0
- sam2/modeling/sam2_utils.py +323 -0
- sam2/sam2.1_hiera_b+.yaml +116 -0
- sam2/sam2.1_hiera_l.yaml +120 -0
- sam2/sam2.1_hiera_s.yaml +119 -0
- sam2/sam2.1_hiera_t.yaml +121 -0
- sam2/sam2_hiera_b+.yaml +113 -0
- sam2/sam2_hiera_l.yaml +117 -0
- sam2/sam2_hiera_s.yaml +116 -0
- sam2/sam2_hiera_t.yaml +118 -0
- sam2/sam2_image_predictor.py +475 -0
- sam2/sam2_video_predictor.py +1222 -0
- sam2/sam2_video_predictor_legacy.py +1172 -0
- sam2/utils/__init__.py +5 -0
- sam2/utils/amg.py +348 -0
- sam2/utils/misc.py +349 -0
- sam2/utils/transforms.py +118 -0
|
@@ -0,0 +1,1007 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Accurate Fast Waypoint Search with Nanometer Support and Improved Z-Range Detection
|
|
3
|
+
Optimized for speed while maintaining high path accuracy
|
|
4
|
+
Now includes intelligent z-positioning with proper distribution across Z-range
|
|
5
|
+
NO SUBDIVISION - Pure waypoint-to-waypoint processing
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
from typing import Tuple, List, Optional
|
|
10
|
+
import numba as nb
|
|
11
|
+
import time
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
14
|
+
import psutil
|
|
15
|
+
import math
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@nb.njit(cache=True)
|
|
19
|
+
def calculate_segment_distance_accurate_nm(point_a_arr, point_b_arr, xy_spacing_nm):
|
|
20
|
+
"""Calculate 3D distance between points in nanometers"""
|
|
21
|
+
# Convert pixel coordinates to nanometers
|
|
22
|
+
z_diff_nm = (point_b_arr[0] - point_a_arr[0])
|
|
23
|
+
y_diff_nm = (point_b_arr[1] - point_a_arr[1]) * xy_spacing_nm
|
|
24
|
+
x_diff_nm = (point_b_arr[2] - point_a_arr[2]) * xy_spacing_nm
|
|
25
|
+
|
|
26
|
+
distance_sq_nm = z_diff_nm * z_diff_nm + y_diff_nm * y_diff_nm + x_diff_nm * x_diff_nm
|
|
27
|
+
return np.sqrt(distance_sq_nm)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@nb.njit(cache=True)
|
|
31
|
+
def calculate_segment_distance_accurate_pixels(point_a_arr, point_b_arr):
|
|
32
|
+
"""Calculate 3D distance between points in pixels (for internal use)"""
|
|
33
|
+
distance_sq = 0.0
|
|
34
|
+
for i in range(len(point_a_arr)):
|
|
35
|
+
diff = point_b_arr[i] - point_a_arr[i]
|
|
36
|
+
distance_sq += diff * diff
|
|
37
|
+
return np.sqrt(distance_sq)
|
|
38
|
+
|
|
39
|
+
# Z-range detection functions ported from enhanced_waypointastar.py
|
|
40
|
+
@nb.njit(cache=True, parallel=True)
|
|
41
|
+
def find_intensity_transitions_at_point_optimized(image, y, x, intensity_threshold=0.3, z_click=-1):
|
|
42
|
+
"""
|
|
43
|
+
Find z-frames where intensity transitions occur.
|
|
44
|
+
If z_click is provided, finds the intensity interval closest to that z-plane.
|
|
45
|
+
This handles overlapping dendrites by picking the relevant structure.
|
|
46
|
+
"""
|
|
47
|
+
z_size = image.shape[0]
|
|
48
|
+
intensities = np.zeros(z_size)
|
|
49
|
+
|
|
50
|
+
# Extract intensity profile along z-axis
|
|
51
|
+
for z in nb.prange(z_size):
|
|
52
|
+
intensities[z] = image[z, y, x]
|
|
53
|
+
|
|
54
|
+
# Find peak intensity locally or globally?
|
|
55
|
+
# Global max is safer to set threshold, even if we pick a smaller local peak later.
|
|
56
|
+
max_intensity = np.max(intensities)
|
|
57
|
+
threshold = max_intensity * intensity_threshold
|
|
58
|
+
|
|
59
|
+
# Vectorized transition detection
|
|
60
|
+
above_threshold = intensities >= threshold
|
|
61
|
+
|
|
62
|
+
# If using z_click, find the specific interval containing or closest to z_click
|
|
63
|
+
if z_click != -1:
|
|
64
|
+
# Find all intervals
|
|
65
|
+
intervals_start = []
|
|
66
|
+
intervals_end = []
|
|
67
|
+
|
|
68
|
+
in_interval = False
|
|
69
|
+
current_start = -1
|
|
70
|
+
last_valid_z = -1
|
|
71
|
+
gap_tolerance = 2
|
|
72
|
+
|
|
73
|
+
for z in range(z_size):
|
|
74
|
+
if above_threshold[z]:
|
|
75
|
+
if not in_interval:
|
|
76
|
+
# Check if this new start is close enough to merge with previous interval?
|
|
77
|
+
# Actually, simple state machine is better:
|
|
78
|
+
in_interval = True
|
|
79
|
+
current_start = z
|
|
80
|
+
last_valid_z = z
|
|
81
|
+
else:
|
|
82
|
+
if in_interval:
|
|
83
|
+
# Check if we should close the interval
|
|
84
|
+
if z - last_valid_z > gap_tolerance:
|
|
85
|
+
in_interval = False
|
|
86
|
+
intervals_start.append(current_start)
|
|
87
|
+
intervals_end.append(last_valid_z) # End at the last valid pixel
|
|
88
|
+
|
|
89
|
+
# Handle case where interval extends to the end or was kept open by tolerance
|
|
90
|
+
if in_interval:
|
|
91
|
+
intervals_start.append(current_start)
|
|
92
|
+
intervals_end.append(last_valid_z)
|
|
93
|
+
|
|
94
|
+
if len(intervals_start) > 0:
|
|
95
|
+
# Find closest interval to z_click
|
|
96
|
+
best_idx = 0
|
|
97
|
+
min_dist = 100000
|
|
98
|
+
|
|
99
|
+
for i in range(len(intervals_start)):
|
|
100
|
+
s = intervals_start[i]
|
|
101
|
+
e = intervals_end[i]
|
|
102
|
+
|
|
103
|
+
# Distance logic:
|
|
104
|
+
# If z_click is inside [s, e], dist is 0.
|
|
105
|
+
# Else dist is min(|z_click - s|, |z_click - e|)
|
|
106
|
+
if s <= z_click <= e:
|
|
107
|
+
dist = 0
|
|
108
|
+
else:
|
|
109
|
+
dist = min(abs(z_click - s), abs(z_click - e))
|
|
110
|
+
|
|
111
|
+
if dist < min_dist:
|
|
112
|
+
min_dist = dist
|
|
113
|
+
best_idx = i
|
|
114
|
+
|
|
115
|
+
return intervals_start[best_idx], intervals_end[best_idx], max_intensity
|
|
116
|
+
|
|
117
|
+
# Fallback / Default: Find global start/end
|
|
118
|
+
start_z = -1
|
|
119
|
+
for z in range(z_size):
|
|
120
|
+
if above_threshold[z]:
|
|
121
|
+
start_z = z
|
|
122
|
+
break
|
|
123
|
+
|
|
124
|
+
end_z = -1
|
|
125
|
+
for z in range(z_size - 1, -1, -1):
|
|
126
|
+
if above_threshold[z]:
|
|
127
|
+
end_z = z
|
|
128
|
+
break
|
|
129
|
+
|
|
130
|
+
# Handle edge cases
|
|
131
|
+
if start_z == -1:
|
|
132
|
+
start_z = 0
|
|
133
|
+
if end_z == -1:
|
|
134
|
+
end_z = z_size - 1
|
|
135
|
+
|
|
136
|
+
return start_z, end_z, max_intensity
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
@nb.njit(cache=True, parallel=True)
|
|
140
|
+
def batch_find_optimal_z_with_intensity_priority(image, waypoint_coords, start_z, end_z, min_intensity_threshold=0.1):
|
|
141
|
+
"""
|
|
142
|
+
Find optimal z-coordinates prioritizing intensity while maintaining reasonable distribution
|
|
143
|
+
Uses a two-stage approach: first find bright regions, then distribute
|
|
144
|
+
"""
|
|
145
|
+
num_waypoints = waypoint_coords.shape[0]
|
|
146
|
+
optimal_z_values = np.zeros(num_waypoints, dtype=np.int32)
|
|
147
|
+
|
|
148
|
+
if num_waypoints == 0:
|
|
149
|
+
return optimal_z_values
|
|
150
|
+
|
|
151
|
+
# Define the search range
|
|
152
|
+
z_min_range = min(start_z, end_z)
|
|
153
|
+
z_max_range = max(start_z, end_z)
|
|
154
|
+
|
|
155
|
+
for i in nb.prange(num_waypoints):
|
|
156
|
+
y, x = waypoint_coords[i, 0], waypoint_coords[i, 1]
|
|
157
|
+
|
|
158
|
+
# First pass: find all positions above minimum intensity threshold
|
|
159
|
+
candidate_positions = []
|
|
160
|
+
candidate_intensities = []
|
|
161
|
+
|
|
162
|
+
for z in range(max(0, z_min_range), min(image.shape[0], z_max_range + 1)):
|
|
163
|
+
intensity = image[z, y, x]
|
|
164
|
+
if intensity >= min_intensity_threshold:
|
|
165
|
+
candidate_positions.append(z)
|
|
166
|
+
candidate_intensities.append(intensity)
|
|
167
|
+
|
|
168
|
+
if len(candidate_positions) == 0:
|
|
169
|
+
# If no bright pixels found, find the best pixel in the range with distance penalty
|
|
170
|
+
best_z = z_min_range
|
|
171
|
+
best_score = -1000.0
|
|
172
|
+
|
|
173
|
+
# Use target position for distance penalty
|
|
174
|
+
if num_waypoints == 1:
|
|
175
|
+
target_z = (start_z + end_z) // 2
|
|
176
|
+
else:
|
|
177
|
+
if end_z > start_z:
|
|
178
|
+
step = (end_z - start_z) / (num_waypoints + 1)
|
|
179
|
+
target_z = int(start_z + (i + 1) * step)
|
|
180
|
+
elif start_z > end_z:
|
|
181
|
+
step = (start_z - end_z) / (num_waypoints + 1)
|
|
182
|
+
target_z = int(start_z - (i + 1) * step)
|
|
183
|
+
else:
|
|
184
|
+
target_z = start_z
|
|
185
|
+
|
|
186
|
+
for z in range(max(0, z_min_range), min(image.shape[0], z_max_range + 1)):
|
|
187
|
+
intensity = image[z, y, x]
|
|
188
|
+
# Heavy penalty to prevent jumping across the stack
|
|
189
|
+
distance_penalty = abs(z - target_z) * 0.1
|
|
190
|
+
score = intensity - distance_penalty
|
|
191
|
+
if score > best_score:
|
|
192
|
+
best_score = score
|
|
193
|
+
best_z = z
|
|
194
|
+
optimal_z_values[i] = best_z
|
|
195
|
+
else:
|
|
196
|
+
# Calculate target position for this waypoint
|
|
197
|
+
if num_waypoints == 1:
|
|
198
|
+
target_z = (start_z + end_z) // 2
|
|
199
|
+
else:
|
|
200
|
+
if end_z > start_z:
|
|
201
|
+
step = (end_z - start_z) / (num_waypoints + 1)
|
|
202
|
+
target_z = int(start_z + (i + 1) * step)
|
|
203
|
+
elif start_z > end_z:
|
|
204
|
+
step = (start_z - end_z) / (num_waypoints + 1)
|
|
205
|
+
target_z = int(start_z - (i + 1) * step)
|
|
206
|
+
else:
|
|
207
|
+
target_z = start_z
|
|
208
|
+
|
|
209
|
+
# Find the best candidate considering both intensity and proximity to target
|
|
210
|
+
best_score = -1.0
|
|
211
|
+
best_z = candidate_positions[0]
|
|
212
|
+
|
|
213
|
+
# Calculate max intensity for normalization
|
|
214
|
+
max_candidate_intensity = max(candidate_intensities)
|
|
215
|
+
|
|
216
|
+
for j in range(len(candidate_positions)):
|
|
217
|
+
z_pos = candidate_positions[j]
|
|
218
|
+
intensity = candidate_intensities[j]
|
|
219
|
+
|
|
220
|
+
# Normalize intensity (0 to 1)
|
|
221
|
+
normalized_intensity = intensity / max_candidate_intensity if max_candidate_intensity > 0 else 0
|
|
222
|
+
|
|
223
|
+
# Calculate distance penalty (0 to 1, where 0 is no penalty)
|
|
224
|
+
max_distance = abs(z_max_range - z_min_range)
|
|
225
|
+
if max_distance > 0:
|
|
226
|
+
distance_penalty = abs(z_pos - target_z) / max_distance
|
|
227
|
+
else:
|
|
228
|
+
distance_penalty = 0
|
|
229
|
+
|
|
230
|
+
# Combined score: prioritize intensity but consider distribution
|
|
231
|
+
# 70% intensity, 30% distribution
|
|
232
|
+
score = 0.7 * normalized_intensity + 0.3 * (1.0 - distance_penalty)
|
|
233
|
+
|
|
234
|
+
if score > best_score:
|
|
235
|
+
best_score = score
|
|
236
|
+
best_z = z_pos
|
|
237
|
+
|
|
238
|
+
optimal_z_values[i] = best_z
|
|
239
|
+
|
|
240
|
+
return optimal_z_values
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
@nb.njit(cache=True, parallel=True)
|
|
244
|
+
def batch_find_optimal_z_with_adaptive_search(image, waypoint_coords, start_z, end_z, min_intensity_threshold=0.05):
|
|
245
|
+
"""
|
|
246
|
+
Adaptive Z-optimization that expands search range if no bright pixels found
|
|
247
|
+
"""
|
|
248
|
+
num_waypoints = waypoint_coords.shape[0]
|
|
249
|
+
optimal_z_values = np.zeros(num_waypoints, dtype=np.int32)
|
|
250
|
+
|
|
251
|
+
if num_waypoints == 0:
|
|
252
|
+
return optimal_z_values
|
|
253
|
+
|
|
254
|
+
# Calculate initial target positions for distribution
|
|
255
|
+
target_positions = np.zeros(num_waypoints, dtype=np.int32)
|
|
256
|
+
|
|
257
|
+
if num_waypoints == 1:
|
|
258
|
+
target_positions[0] = (start_z + end_z) // 2
|
|
259
|
+
else:
|
|
260
|
+
if end_z > start_z:
|
|
261
|
+
step = (end_z - start_z) / (num_waypoints + 1)
|
|
262
|
+
for i in range(num_waypoints):
|
|
263
|
+
target_positions[i] = int(start_z + (i + 1) * step)
|
|
264
|
+
elif start_z > end_z:
|
|
265
|
+
step = (start_z - end_z) / (num_waypoints + 1)
|
|
266
|
+
for i in range(num_waypoints):
|
|
267
|
+
target_positions[i] = int(start_z - (i + 1) * step)
|
|
268
|
+
else:
|
|
269
|
+
target_positions.fill(start_z)
|
|
270
|
+
|
|
271
|
+
for i in nb.prange(num_waypoints):
|
|
272
|
+
y, x = waypoint_coords[i, 0], waypoint_coords[i, 1]
|
|
273
|
+
target_z = target_positions[i]
|
|
274
|
+
|
|
275
|
+
# Start with a small search range and expand if needed
|
|
276
|
+
search_ranges = [3, 5, 8, 12] # Progressively larger search ranges
|
|
277
|
+
found_good_position = False
|
|
278
|
+
|
|
279
|
+
for search_range in search_ranges:
|
|
280
|
+
if found_good_position:
|
|
281
|
+
break
|
|
282
|
+
|
|
283
|
+
# Define search bounds
|
|
284
|
+
z_min_search = max(0, target_z - search_range)
|
|
285
|
+
z_max_search = min(image.shape[0] - 1, target_z + search_range)
|
|
286
|
+
|
|
287
|
+
# Also respect the overall start_z to end_z range
|
|
288
|
+
z_min_search = max(z_min_search, min(start_z, end_z) - search_range)
|
|
289
|
+
z_max_search = min(z_max_search, max(start_z, end_z) + search_range)
|
|
290
|
+
|
|
291
|
+
best_z = target_z
|
|
292
|
+
best_score = -1.0
|
|
293
|
+
|
|
294
|
+
# Find the best position in this range
|
|
295
|
+
for z in range(z_min_search, z_max_search + 1):
|
|
296
|
+
intensity = image[z, y, x]
|
|
297
|
+
|
|
298
|
+
if intensity >= min_intensity_threshold:
|
|
299
|
+
# Calculate distance penalty
|
|
300
|
+
distance_penalty = abs(z - target_z) * 0.02 # Small penalty
|
|
301
|
+
|
|
302
|
+
# Score = intensity - distance penalty
|
|
303
|
+
score = intensity - distance_penalty
|
|
304
|
+
|
|
305
|
+
if score > best_score:
|
|
306
|
+
best_score = score
|
|
307
|
+
best_z = z
|
|
308
|
+
found_good_position = True
|
|
309
|
+
|
|
310
|
+
optimal_z_values[i] = best_z
|
|
311
|
+
|
|
312
|
+
# If we found a good position, stop expanding search range
|
|
313
|
+
if found_good_position and image[best_z, y, x] >= min_intensity_threshold:
|
|
314
|
+
break
|
|
315
|
+
|
|
316
|
+
# If still no good position found, use full range but with heavy distance penalty
|
|
317
|
+
if not found_good_position:
|
|
318
|
+
z_min_full = max(0, min(start_z, end_z))
|
|
319
|
+
z_max_full = min(image.shape[0] - 1, max(start_z, end_z))
|
|
320
|
+
|
|
321
|
+
best_z = target_z
|
|
322
|
+
best_score = -1000.0
|
|
323
|
+
|
|
324
|
+
for z in range(z_min_full, z_max_full + 1):
|
|
325
|
+
intensity = image[z, y, x]
|
|
326
|
+
# Heavy distance penalty to keep it close to target interpolation
|
|
327
|
+
# 0.1 means 10 slices away = -1.0 score penalty
|
|
328
|
+
distance_penalty = abs(z - target_z) * 0.1
|
|
329
|
+
score = intensity - distance_penalty
|
|
330
|
+
|
|
331
|
+
if score > best_score:
|
|
332
|
+
best_score = score
|
|
333
|
+
best_z = z
|
|
334
|
+
|
|
335
|
+
optimal_z_values[i] = best_z
|
|
336
|
+
|
|
337
|
+
return optimal_z_values
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
@nb.njit(cache=True)
|
|
341
|
+
def filter_close_waypoints_optimized(waypoints, min_distance=3):
|
|
342
|
+
"""Remove waypoints that are too close to each other - OPTIMIZED"""
|
|
343
|
+
if len(waypoints) <= 1:
|
|
344
|
+
return waypoints
|
|
345
|
+
|
|
346
|
+
# Always keep first waypoint
|
|
347
|
+
filtered_indices = [0]
|
|
348
|
+
|
|
349
|
+
for i in range(1, len(waypoints)):
|
|
350
|
+
# Check distance to all previously kept waypoints
|
|
351
|
+
keep_waypoint = True
|
|
352
|
+
for j in filtered_indices:
|
|
353
|
+
distance_sq = 0.0
|
|
354
|
+
for k in range(waypoints.shape[1]):
|
|
355
|
+
diff = waypoints[i, k] - waypoints[j, k]
|
|
356
|
+
distance_sq += diff * diff
|
|
357
|
+
|
|
358
|
+
if distance_sq < min_distance * min_distance:
|
|
359
|
+
keep_waypoint = False
|
|
360
|
+
break
|
|
361
|
+
|
|
362
|
+
if keep_waypoint:
|
|
363
|
+
filtered_indices.append(i)
|
|
364
|
+
|
|
365
|
+
# Create filtered array
|
|
366
|
+
filtered = np.zeros((len(filtered_indices), waypoints.shape[1]), dtype=waypoints.dtype)
|
|
367
|
+
for i, idx in enumerate(filtered_indices):
|
|
368
|
+
for j in range(waypoints.shape[1]):
|
|
369
|
+
filtered[i, j] = waypoints[idx, j]
|
|
370
|
+
|
|
371
|
+
return filtered
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
@nb.njit(cache=True)
|
|
375
|
+
def calculate_euclidean_distance_fast(point1, point2):
|
|
376
|
+
"""Fast euclidean distance calculation - OPTIMIZED"""
|
|
377
|
+
distance_sq = 0.0
|
|
378
|
+
for i in range(len(point1)):
|
|
379
|
+
diff = point1[i] - point2[i]
|
|
380
|
+
distance_sq += diff * diff
|
|
381
|
+
return math.sqrt(distance_sq)
|
|
382
|
+
|
|
383
|
+
|
|
384
|
+
def find_start_end_points_optimized(points_list):
|
|
385
|
+
"""
|
|
386
|
+
Find the two points with maximum distance as start and end points - OPTIMIZED
|
|
387
|
+
Returns remaining points as waypoints.
|
|
388
|
+
"""
|
|
389
|
+
if len(points_list) < 2:
|
|
390
|
+
raise ValueError("Need at least 2 points to find start and end")
|
|
391
|
+
|
|
392
|
+
# Convert to numpy array for faster computation
|
|
393
|
+
points_array = np.array(points_list, dtype=np.float64)
|
|
394
|
+
|
|
395
|
+
max_distance = 0.0
|
|
396
|
+
start_idx = 0
|
|
397
|
+
end_idx = 1
|
|
398
|
+
|
|
399
|
+
# Find the two points with maximum distance
|
|
400
|
+
for i in range(len(points_array)):
|
|
401
|
+
for j in range(i + 1, len(points_array)):
|
|
402
|
+
distance = calculate_euclidean_distance_fast(points_array[i], points_array[j])
|
|
403
|
+
if distance > max_distance:
|
|
404
|
+
max_distance = distance
|
|
405
|
+
start_idx = i
|
|
406
|
+
end_idx = j
|
|
407
|
+
|
|
408
|
+
# Extract start and end points
|
|
409
|
+
start_point = points_array[start_idx].copy()
|
|
410
|
+
end_point = points_array[end_idx].copy()
|
|
411
|
+
|
|
412
|
+
# Create waypoints array excluding start and end points
|
|
413
|
+
waypoint_indices = []
|
|
414
|
+
for i in range(len(points_array)):
|
|
415
|
+
if i != start_idx and i != end_idx:
|
|
416
|
+
waypoint_indices.append(i)
|
|
417
|
+
|
|
418
|
+
if waypoint_indices:
|
|
419
|
+
waypoints = points_array[waypoint_indices]
|
|
420
|
+
else:
|
|
421
|
+
waypoints = np.empty((0, points_array.shape[1]))
|
|
422
|
+
|
|
423
|
+
return waypoints.tolist(), start_point, end_point
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
def should_use_hierarchical_search(image, num_waypoints):
|
|
427
|
+
"""Determine if hierarchical search should be used - OPTIMIZED"""
|
|
428
|
+
image_size = np.prod(image.shape)
|
|
429
|
+
complexity_factor = image_size * (num_waypoints + 1)
|
|
430
|
+
|
|
431
|
+
# Use hierarchical search for very large images
|
|
432
|
+
return complexity_factor > 100_000_000 # 100M operations threshold
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
class ZRangeCache:
|
|
436
|
+
"""Cache z-range calculations to avoid recomputation - OPTIMIZED"""
|
|
437
|
+
def __init__(self):
|
|
438
|
+
self.cache = {}
|
|
439
|
+
|
|
440
|
+
def get_z_range(self, image, y, x, intensity_threshold, z_click=-1):
|
|
441
|
+
# Include z_click in cache key if provided, as it affects the result
|
|
442
|
+
key = (y, x, intensity_threshold, z_click)
|
|
443
|
+
if key not in self.cache:
|
|
444
|
+
self.cache[key] = find_intensity_transitions_at_point_optimized(image, y, x, intensity_threshold, z_click)
|
|
445
|
+
return self.cache[key]
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
@dataclass
|
|
449
|
+
class SearchStrategy:
|
|
450
|
+
"""Search strategy that prioritizes accuracy"""
|
|
451
|
+
use_hierarchical: bool
|
|
452
|
+
hierarchical_factor: int
|
|
453
|
+
weight_heuristic: float # Always 1.0 for medical accuracy
|
|
454
|
+
refine_path: bool # Whether to refine path at full resolution
|
|
455
|
+
suitable_for_parallel: bool # Whether this segment can be processed in parallel
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
class Optimizer:
|
|
459
|
+
"""Optimizer that maintains accuracy while improving speed - now with nanometer support"""
|
|
460
|
+
|
|
461
|
+
def __init__(self, image_shape, xy_spacing_nm=94.0, enable_parallel=True,
|
|
462
|
+
max_parallel_workers=None, my_weight_heuristic=1.0):
|
|
463
|
+
self.image_shape = image_shape
|
|
464
|
+
self.image_volume = np.prod(image_shape)
|
|
465
|
+
self.my_weight_heuristic = my_weight_heuristic
|
|
466
|
+
|
|
467
|
+
# Store spacing in nanometers
|
|
468
|
+
self.xy_spacing_nm = xy_spacing_nm
|
|
469
|
+
|
|
470
|
+
# Conservative thresholds in nanometers (converted from pixels for compatibility)
|
|
471
|
+
self.large_image_threshold = 30_000_000 # voxels
|
|
472
|
+
self.huge_image_threshold = 100_000_000 # voxels
|
|
473
|
+
|
|
474
|
+
# Parallel processing settings
|
|
475
|
+
self.enable_parallel = enable_parallel
|
|
476
|
+
if max_parallel_workers is None:
|
|
477
|
+
# Auto-detect optimal number of workers
|
|
478
|
+
cpu_count = psutil.cpu_count(logical=False) # Physical cores
|
|
479
|
+
available_memory_gb = psutil.virtual_memory().available / (1024**3)
|
|
480
|
+
|
|
481
|
+
# Conservative worker count based on memory and cores
|
|
482
|
+
max_workers_by_memory = max(1, int(available_memory_gb / 2)) # 2GB per worker
|
|
483
|
+
max_workers_by_cpu = max(1, cpu_count - 1) # Leave one core free
|
|
484
|
+
|
|
485
|
+
self.max_parallel_workers = min(max_workers_by_memory, max_workers_by_cpu, 4) # Cap at 4
|
|
486
|
+
else:
|
|
487
|
+
self.max_parallel_workers = max_parallel_workers
|
|
488
|
+
|
|
489
|
+
print(f"Parallel processing: {self.enable_parallel}, Max workers: {self.max_parallel_workers}")
|
|
490
|
+
print(f"Spacing: XY={self.xy_spacing_nm:.1f} nm/pixel")
|
|
491
|
+
|
|
492
|
+
def determine_accurate_strategy(self, distance_nm: float, segment_idx: int, total_segments: int) -> SearchStrategy:
|
|
493
|
+
"""Determine strategy that maintains accuracy with parallel processing - now using nanometer thresholds"""
|
|
494
|
+
|
|
495
|
+
# Default to high accuracy
|
|
496
|
+
strategy = SearchStrategy(
|
|
497
|
+
use_hierarchical=False,
|
|
498
|
+
hierarchical_factor=4,
|
|
499
|
+
weight_heuristic=self.my_weight_heuristic, # ALWAYS 1.0 for medical accuracy
|
|
500
|
+
refine_path=False,
|
|
501
|
+
suitable_for_parallel=False
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
# Determine if suitable for parallel processing - using nanometer thresholds
|
|
505
|
+
# Criteria: moderate to long segments in nanometers, not too complex
|
|
506
|
+
if (self.enable_parallel and
|
|
507
|
+
distance_nm > 9400.0 and # 9.4 μm (was 100 pixels × 94 nm/pixel)
|
|
508
|
+
distance_nm < 56400.0 and # 56.4 μm (was 600 pixels × 94 nm/pixel)
|
|
509
|
+
total_segments > 2): # Multiple segments available
|
|
510
|
+
strategy.suitable_for_parallel = True
|
|
511
|
+
|
|
512
|
+
# Only use hierarchical for very large images and long segments - nanometer thresholds
|
|
513
|
+
if self.image_volume > self.huge_image_threshold and distance_nm > 28200.0: # 28.2 μm (300 pixels × 94 nm)
|
|
514
|
+
# Very conservative hierarchical search
|
|
515
|
+
strategy.use_hierarchical = True
|
|
516
|
+
strategy.hierarchical_factor = 4 # Small factor to preserve detail
|
|
517
|
+
strategy.refine_path = True # Always refine for accuracy
|
|
518
|
+
strategy.weight_heuristic = self.my_weight_heuristic # Always optimal
|
|
519
|
+
|
|
520
|
+
elif self.image_volume > self.large_image_threshold and distance_nm > 37600.0: # 37.6 μm (400 pixels × 94 nm)
|
|
521
|
+
# Only for very long segments on large images
|
|
522
|
+
strategy.use_hierarchical = True
|
|
523
|
+
strategy.hierarchical_factor = 3 # Very conservative factor
|
|
524
|
+
strategy.refine_path = True
|
|
525
|
+
strategy.weight_heuristic = self.my_weight_heuristic
|
|
526
|
+
|
|
527
|
+
return strategy
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
class FasterWaypointSearch:
|
|
531
|
+
"""Fast waypoint search that maintains high accuracy with parallel processing and Z-range detection"""
|
|
532
|
+
|
|
533
|
+
def __init__(self, image, points_list, xy_spacing_nm=94.0,
|
|
534
|
+
enable_z_optimization=True, intensity_threshold=0.3,
|
|
535
|
+
min_intensity_threshold=0.1, **kwargs):
|
|
536
|
+
self.image = image
|
|
537
|
+
self.points_list = points_list
|
|
538
|
+
self.verbose = kwargs.get('verbose', True)
|
|
539
|
+
self.my_weight_heuristic = kwargs.get('weight_heuristic', 1.0)
|
|
540
|
+
self.max_workers = kwargs.get('max_workers', None)
|
|
541
|
+
|
|
542
|
+
# Store spacing
|
|
543
|
+
self.xy_spacing_nm = xy_spacing_nm
|
|
544
|
+
|
|
545
|
+
# Z-range detection settings
|
|
546
|
+
self.enable_z_optimization = enable_z_optimization
|
|
547
|
+
self.intensity_threshold = intensity_threshold
|
|
548
|
+
self.min_intensity_threshold = min_intensity_threshold
|
|
549
|
+
|
|
550
|
+
# Initialize z-range cache for performance
|
|
551
|
+
self.z_range_cache = ZRangeCache()
|
|
552
|
+
|
|
553
|
+
# Parallel processing settings
|
|
554
|
+
enable_parallel = kwargs.get('enable_parallel', True)
|
|
555
|
+
max_parallel_workers = kwargs.get('max_parallel_workers', None)
|
|
556
|
+
|
|
557
|
+
# Initialize optimizer with parallel settings and spacing
|
|
558
|
+
self.optimizer = Optimizer(
|
|
559
|
+
image.shape,
|
|
560
|
+
xy_spacing_nm=xy_spacing_nm,
|
|
561
|
+
enable_parallel=enable_parallel,
|
|
562
|
+
max_parallel_workers=max_parallel_workers,
|
|
563
|
+
my_weight_heuristic=self.my_weight_heuristic
|
|
564
|
+
)
|
|
565
|
+
|
|
566
|
+
# Configuration - conservative settings in nanometers
|
|
567
|
+
self.enable_refinement = kwargs.get('enable_refinement', True)
|
|
568
|
+
self.filter_close_waypoints = kwargs.get('filter_close_waypoints', True)
|
|
569
|
+
self.min_waypoint_distance = kwargs.get('min_waypoint_distance', 3.0)
|
|
570
|
+
|
|
571
|
+
if self.verbose:
|
|
572
|
+
print(f"Initializing enhanced accurate fast search for image shape: {image.shape}")
|
|
573
|
+
print(f"Image volume: {self.optimizer.image_volume:,} voxels")
|
|
574
|
+
print(f"Parallel processing: {self.optimizer.enable_parallel}")
|
|
575
|
+
print(f"Spacing: XY={self.xy_spacing_nm:.1f} nm/pixel")
|
|
576
|
+
print(f"Z-range optimization: {self.enable_z_optimization}")
|
|
577
|
+
print("NO SUBDIVISION - Pure waypoint-to-waypoint processing with Z-optimization")
|
|
578
|
+
|
|
579
|
+
def _process_points_list_optimized(self, points_list):
|
|
580
|
+
"""Process a list of points to extract start, end, and waypoints with intelligent Z-positioning"""
|
|
581
|
+
if len(points_list) < 2:
|
|
582
|
+
raise ValueError("Need at least 2 points")
|
|
583
|
+
|
|
584
|
+
if self.verbose:
|
|
585
|
+
print(f"Processing {len(points_list)} input points...")
|
|
586
|
+
|
|
587
|
+
# Find start and end points (farthest apart) - OPTIMIZED
|
|
588
|
+
waypoint_coords, start_coords, end_coords = find_start_end_points_optimized(points_list)
|
|
589
|
+
|
|
590
|
+
if self.verbose:
|
|
591
|
+
print(f"Auto-detected start: {start_coords}, end: {end_coords}")
|
|
592
|
+
print(f"Remaining waypoints: {len(waypoint_coords)}")
|
|
593
|
+
|
|
594
|
+
# Apply z-range detection if enabled
|
|
595
|
+
if self.enable_z_optimization and len(self.image.shape) == 3:
|
|
596
|
+
# Find optimal z-ranges for start and end points using transition detection
|
|
597
|
+
# Pass the user's Z-click to disambiguate overlapping dendrites
|
|
598
|
+
start_z_click = int(start_coords[0])
|
|
599
|
+
start_z_min, start_z_max, start_max_intensity = self.z_range_cache.get_z_range(
|
|
600
|
+
self.image, int(start_coords[1]), int(start_coords[2]), self.intensity_threshold, start_z_click)
|
|
601
|
+
|
|
602
|
+
end_z_click = int(end_coords[0])
|
|
603
|
+
end_z_min, end_z_max, end_max_intensity = self.z_range_cache.get_z_range(
|
|
604
|
+
self.image, int(end_coords[1]), int(end_coords[2]), self.intensity_threshold, end_z_click)
|
|
605
|
+
|
|
606
|
+
if self.verbose:
|
|
607
|
+
print(f"Start point transition frames: appears at {start_z_min}, disappears at {start_z_max} (max intensity: {start_max_intensity:.3f})")
|
|
608
|
+
print(f"End point transition frames: appears at {end_z_min}, disappears at {end_z_max} (max intensity: {end_max_intensity:.3f})")
|
|
609
|
+
|
|
610
|
+
# Update start and end points with transition-based z-coordinates
|
|
611
|
+
if start_z_min >= 0:
|
|
612
|
+
start_coords[0] = start_z_min # Use appearance frame for start
|
|
613
|
+
|
|
614
|
+
if end_z_max >= 0:
|
|
615
|
+
end_coords[0] = end_z_max # Use disappearance frame for end
|
|
616
|
+
|
|
617
|
+
# Store the detected z-range for waypoint processing
|
|
618
|
+
detected_z_min = int(start_coords[0])
|
|
619
|
+
detected_z_max = int(end_coords[0])
|
|
620
|
+
|
|
621
|
+
if self.verbose:
|
|
622
|
+
print(f"Using transition-based coordinates - Start: {start_coords}, End: {end_coords}")
|
|
623
|
+
print(f"Path z-range: {detected_z_min} to {detected_z_max}")
|
|
624
|
+
else:
|
|
625
|
+
detected_z_min = min(int(start_coords[0]), int(end_coords[0]))
|
|
626
|
+
detected_z_max = max(int(start_coords[0]), int(end_coords[0]))
|
|
627
|
+
|
|
628
|
+
# Process waypoints with intelligent z-positioning - OPTIMIZED with proper distribution
|
|
629
|
+
processed_waypoints = []
|
|
630
|
+
if waypoint_coords and self.enable_z_optimization:
|
|
631
|
+
if self.verbose:
|
|
632
|
+
print("Optimizing waypoint z-positions with distribution constraints...")
|
|
633
|
+
|
|
634
|
+
# Convert waypoint coordinates to numpy array for batch processing
|
|
635
|
+
waypoint_coords_array = np.array(waypoint_coords)
|
|
636
|
+
|
|
637
|
+
# Extract Y,X coordinates for batch processing
|
|
638
|
+
waypoint_yx = waypoint_coords_array[:, 1:3].astype(np.int32)
|
|
639
|
+
|
|
640
|
+
# Use the adaptive search that prioritizes intensity
|
|
641
|
+
optimal_z_values = batch_find_optimal_z_with_adaptive_search(
|
|
642
|
+
self.image, waypoint_yx, detected_z_min, detected_z_max,
|
|
643
|
+
min_intensity_threshold=self.min_intensity_threshold)
|
|
644
|
+
|
|
645
|
+
# Create processed waypoints
|
|
646
|
+
for i, waypoint in enumerate(waypoint_coords):
|
|
647
|
+
original_z = waypoint[0]
|
|
648
|
+
optimized_z = optimal_z_values[i]
|
|
649
|
+
processed_waypoint = np.array([optimized_z, waypoint[1], waypoint[2]], dtype=np.int32)
|
|
650
|
+
processed_waypoints.append(processed_waypoint)
|
|
651
|
+
|
|
652
|
+
if self.verbose:
|
|
653
|
+
intensity_at_optimized = self.image[optimized_z, int(waypoint[1]), int(waypoint[2])]
|
|
654
|
+
print(f"Waypoint {i+1}: {waypoint} -> {processed_waypoint} (intensity: {intensity_at_optimized:.3f})")
|
|
655
|
+
else:
|
|
656
|
+
# Use waypoints as-is
|
|
657
|
+
processed_waypoints = [np.array(wp, dtype=np.int32) for wp in waypoint_coords]
|
|
658
|
+
|
|
659
|
+
# Convert to numpy arrays
|
|
660
|
+
start_point = np.array(start_coords, dtype=np.int32)
|
|
661
|
+
goal_point = np.array(end_coords, dtype=np.int32)
|
|
662
|
+
waypoints = np.array(processed_waypoints) if processed_waypoints else np.empty((0, 3), dtype=np.int32)
|
|
663
|
+
|
|
664
|
+
# Filter close waypoints if enabled - OPTIMIZED
|
|
665
|
+
if self.filter_close_waypoints and len(waypoints) > 1:
|
|
666
|
+
original_count = len(waypoints)
|
|
667
|
+
waypoints = filter_close_waypoints_optimized(waypoints, self.min_waypoint_distance)
|
|
668
|
+
if self.verbose and len(waypoints) != original_count:
|
|
669
|
+
print(f"Filtered waypoints from {original_count} to {len(waypoints)} (removed close duplicates)")
|
|
670
|
+
|
|
671
|
+
return start_point, goal_point, waypoints
|
|
672
|
+
|
|
673
|
+
def search_segment_accurate_wrapper(self, segment_data):
|
|
674
|
+
"""Wrapper for parallel processing"""
|
|
675
|
+
point_a, point_b, segment_idx, strategy = segment_data
|
|
676
|
+
return self.search_segment_accurate(point_a, point_b, segment_idx, strategy)
|
|
677
|
+
|
|
678
|
+
def search_segments_parallel(self, parallel_segments):
|
|
679
|
+
"""Search multiple segments in parallel"""
|
|
680
|
+
if not parallel_segments:
|
|
681
|
+
return []
|
|
682
|
+
|
|
683
|
+
if self.verbose:
|
|
684
|
+
print(f"Processing {len(parallel_segments)} segments in parallel...")
|
|
685
|
+
|
|
686
|
+
# Use ThreadPoolExecutor for parallel processing
|
|
687
|
+
max_workers = self.max_workers
|
|
688
|
+
if max_workers is None:
|
|
689
|
+
max_workers = self.optimizer.max_parallel_workers
|
|
690
|
+
print(f"Using up to {max_workers} parallel workers")
|
|
691
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
692
|
+
# Submit all parallel segments
|
|
693
|
+
parallel_results = list(executor.map(self.search_segment_accurate_wrapper, parallel_segments))
|
|
694
|
+
|
|
695
|
+
return parallel_results
|
|
696
|
+
|
|
697
|
+
def search_segment_accurate(self, point_a, point_b, segment_idx, strategy: SearchStrategy):
|
|
698
|
+
"""Search segment with high accuracy"""
|
|
699
|
+
from neuro_sam.brightest_path_lib.algorithm.astar import BidirectionalAStarSearch
|
|
700
|
+
from neuro_sam.brightest_path_lib.input import CostFunction, HeuristicFunction
|
|
701
|
+
|
|
702
|
+
# Calculate distance in nanometers for logging
|
|
703
|
+
distance_nm = calculate_segment_distance_accurate_nm(
|
|
704
|
+
np.array(point_a, dtype=np.float64),
|
|
705
|
+
np.array(point_b, dtype=np.float64),
|
|
706
|
+
self.xy_spacing_nm,
|
|
707
|
+
)
|
|
708
|
+
|
|
709
|
+
if self.verbose:
|
|
710
|
+
print(f" Segment {segment_idx}: distance={distance_nm:.1f} nm, "
|
|
711
|
+
f"hierarchical={strategy.use_hierarchical}, "
|
|
712
|
+
f"refine={strategy.refine_path}")
|
|
713
|
+
|
|
714
|
+
start_time = time.time()
|
|
715
|
+
|
|
716
|
+
# First pass - potentially hierarchical for speed
|
|
717
|
+
if strategy.use_hierarchical:
|
|
718
|
+
# Create conservative hierarchical image
|
|
719
|
+
hierarchical_image = self._create_conservative_hierarchical_image(strategy.hierarchical_factor)
|
|
720
|
+
|
|
721
|
+
# Scale coordinates for hierarchical search
|
|
722
|
+
scale_factor = strategy.hierarchical_factor
|
|
723
|
+
start_scaled = np.array(point_a, dtype=np.int32) // scale_factor
|
|
724
|
+
goal_scaled = np.array(point_b, dtype=np.int32) // scale_factor
|
|
725
|
+
|
|
726
|
+
# Ensure scaled coordinates are valid
|
|
727
|
+
start_scaled = np.clip(start_scaled, 0, np.array(hierarchical_image.shape) - 1)
|
|
728
|
+
goal_scaled = np.clip(goal_scaled, 0, np.array(hierarchical_image.shape) - 1)
|
|
729
|
+
|
|
730
|
+
# Hierarchical search
|
|
731
|
+
hierarchical_search = BidirectionalAStarSearch(
|
|
732
|
+
image=hierarchical_image,
|
|
733
|
+
start_point=start_scaled,
|
|
734
|
+
goal_point=goal_scaled,
|
|
735
|
+
scale=(1.0, 1.0, 1.0),
|
|
736
|
+
cost_function=CostFunction.RECIPROCAL,
|
|
737
|
+
heuristic_function=HeuristicFunction.EUCLIDEAN,
|
|
738
|
+
use_hierarchical=False, # Don't nest hierarchical
|
|
739
|
+
weight_heuristic=self.my_weight_heuristic # Always optimal
|
|
740
|
+
)
|
|
741
|
+
|
|
742
|
+
hierarchical_path = hierarchical_search.search(verbose=False)
|
|
743
|
+
|
|
744
|
+
if not hierarchical_path:
|
|
745
|
+
print(f" Hierarchical search failed, falling back to full resolution")
|
|
746
|
+
strategy.use_hierarchical = False
|
|
747
|
+
strategy.refine_path = False
|
|
748
|
+
|
|
749
|
+
# Second pass - refine at full resolution if needed
|
|
750
|
+
if strategy.refine_path and strategy.use_hierarchical and hierarchical_path:
|
|
751
|
+
# Scale hierarchical path back to full resolution
|
|
752
|
+
scaled_path = [point * strategy.hierarchical_factor for point in hierarchical_path]
|
|
753
|
+
|
|
754
|
+
# Refine path by searching in a corridor around the hierarchical path
|
|
755
|
+
refined_path = self._refine_path_in_corridor(scaled_path, point_a, point_b)
|
|
756
|
+
segment_path = refined_path
|
|
757
|
+
else:
|
|
758
|
+
# Direct search at full resolution
|
|
759
|
+
search = BidirectionalAStarSearch(
|
|
760
|
+
image=self.image,
|
|
761
|
+
start_point=np.array(point_a, dtype=np.int32),
|
|
762
|
+
goal_point=np.array(point_b, dtype=np.int32),
|
|
763
|
+
scale=(1.0, 1.0, 1.0),
|
|
764
|
+
cost_function=CostFunction.RECIPROCAL,
|
|
765
|
+
heuristic_function=HeuristicFunction.EUCLIDEAN,
|
|
766
|
+
use_hierarchical=False,
|
|
767
|
+
weight_heuristic=self.my_weight_heuristic # Always optimal for accuracy
|
|
768
|
+
)
|
|
769
|
+
|
|
770
|
+
segment_path = search.search(verbose=False)
|
|
771
|
+
|
|
772
|
+
search_time = time.time() - start_time
|
|
773
|
+
|
|
774
|
+
if self.verbose:
|
|
775
|
+
print(f" Completed in {search_time:.2f}s, "
|
|
776
|
+
f"{len(segment_path) if segment_path else 0} points")
|
|
777
|
+
|
|
778
|
+
return segment_path
|
|
779
|
+
|
|
780
|
+
def _create_conservative_hierarchical_image(self, factor):
|
|
781
|
+
"""Create hierarchical image with conservative downsampling"""
|
|
782
|
+
if not hasattr(self, '_hierarchical_cache'):
|
|
783
|
+
self._hierarchical_cache = {}
|
|
784
|
+
|
|
785
|
+
if factor not in self._hierarchical_cache:
|
|
786
|
+
# Use maximum intensity to preserve bright structures
|
|
787
|
+
z, y, x = self.image.shape
|
|
788
|
+
new_z = max(1, z // factor)
|
|
789
|
+
new_y = max(1, y // factor)
|
|
790
|
+
new_x = max(1, x // factor)
|
|
791
|
+
|
|
792
|
+
downsampled = np.zeros((new_z, new_y, new_x), dtype=self.image.dtype)
|
|
793
|
+
|
|
794
|
+
for i in range(new_z):
|
|
795
|
+
for j in range(new_y):
|
|
796
|
+
for k in range(new_x):
|
|
797
|
+
z_start, z_end = i * factor, min((i + 1) * factor, z)
|
|
798
|
+
y_start, y_end = j * factor, min((j + 1) * factor, y)
|
|
799
|
+
x_start, x_end = k * factor, min((k + 1) * factor, x)
|
|
800
|
+
|
|
801
|
+
region = self.image[z_start:z_end, y_start:y_end, x_start:x_end]
|
|
802
|
+
# Use maximum to preserve bright structures
|
|
803
|
+
downsampled[i, j, k] = np.max(region)
|
|
804
|
+
|
|
805
|
+
self._hierarchical_cache[factor] = downsampled
|
|
806
|
+
|
|
807
|
+
return self._hierarchical_cache[factor]
|
|
808
|
+
|
|
809
|
+
def _refine_path_in_corridor(self, coarse_path, original_start, original_goal):
|
|
810
|
+
"""Refine a coarse path by searching in a corridor around it"""
|
|
811
|
+
from neuro_sam.brightest_path_lib.algorithm.astar import BidirectionalAStarSearch
|
|
812
|
+
from neuro_sam.brightest_path_lib.input import CostFunction, HeuristicFunction
|
|
813
|
+
|
|
814
|
+
# This ensures maximum accuracy
|
|
815
|
+
search = BidirectionalAStarSearch(
|
|
816
|
+
image=self.image,
|
|
817
|
+
start_point=np.array(original_start, dtype=np.int32),
|
|
818
|
+
goal_point=np.array(original_goal, dtype=np.int32),
|
|
819
|
+
scale=(1.0, 1.0, 1.0),
|
|
820
|
+
cost_function=CostFunction.RECIPROCAL,
|
|
821
|
+
heuristic_function=HeuristicFunction.EUCLIDEAN,
|
|
822
|
+
use_hierarchical=False,
|
|
823
|
+
weight_heuristic=self.my_weight_heuristic # Always optimal
|
|
824
|
+
)
|
|
825
|
+
|
|
826
|
+
return search.search(verbose=False)
|
|
827
|
+
|
|
828
|
+
def search(self):
|
|
829
|
+
"""Perform accurate fast search with Z-range optimization and parallel processing"""
|
|
830
|
+
start_time = time.time()
|
|
831
|
+
|
|
832
|
+
# Apply Z-range optimization if enabled
|
|
833
|
+
if self.enable_z_optimization:
|
|
834
|
+
start_point, goal_point, waypoints = self._process_points_list_optimized(self.points_list)
|
|
835
|
+
all_points = [start_point] + [waypoints[i] for i in range(len(waypoints))] + [goal_point]
|
|
836
|
+
else:
|
|
837
|
+
# NO SUBDIVISION - Use original waypoints directly (fallback)
|
|
838
|
+
waypoint_coords, start_coords, end_coords = find_start_end_points_optimized(self.points_list)
|
|
839
|
+
all_points = [start_coords] + waypoint_coords + [end_coords]
|
|
840
|
+
|
|
841
|
+
if self.verbose:
|
|
842
|
+
optimization_info = "with Z-range optimization" if self.enable_z_optimization else "without Z-optimization"
|
|
843
|
+
print(f"Starting enhanced accurate fast search {optimization_info}")
|
|
844
|
+
print(f"Original points: {len(self.points_list)} -> Optimized points: {len(all_points)}")
|
|
845
|
+
|
|
846
|
+
# Analyze all segments and determine strategies
|
|
847
|
+
segment_data = []
|
|
848
|
+
strategies = []
|
|
849
|
+
|
|
850
|
+
for i in range(len(all_points) - 1):
|
|
851
|
+
point_a = all_points[i]
|
|
852
|
+
point_b = all_points[i + 1]
|
|
853
|
+
|
|
854
|
+
# Determine strategy for this segment using nanometer distance
|
|
855
|
+
distance_nm = calculate_segment_distance_accurate_nm(
|
|
856
|
+
np.array(point_a, dtype=np.float64),
|
|
857
|
+
np.array(point_b, dtype=np.float64),
|
|
858
|
+
self.xy_spacing_nm,
|
|
859
|
+
)
|
|
860
|
+
|
|
861
|
+
strategy = self.optimizer.determine_accurate_strategy(
|
|
862
|
+
distance_nm, i + 1, len(all_points) - 1)
|
|
863
|
+
|
|
864
|
+
strategies.append(strategy)
|
|
865
|
+
segment_data.append((point_a, point_b, i + 1, strategy))
|
|
866
|
+
|
|
867
|
+
# Separate segments for parallel vs sequential processing
|
|
868
|
+
parallel_segments = []
|
|
869
|
+
sequential_segments = []
|
|
870
|
+
parallel_indices = []
|
|
871
|
+
sequential_indices = []
|
|
872
|
+
|
|
873
|
+
for idx, (point_a, point_b, segment_idx, strategy) in enumerate(segment_data):
|
|
874
|
+
if strategy.suitable_for_parallel and len(parallel_segments) < self.optimizer.max_parallel_workers:
|
|
875
|
+
parallel_segments.append((point_a, point_b, segment_idx, strategy))
|
|
876
|
+
parallel_indices.append(idx)
|
|
877
|
+
else:
|
|
878
|
+
sequential_segments.append((point_a, point_b, segment_idx, strategy))
|
|
879
|
+
sequential_indices.append(idx)
|
|
880
|
+
|
|
881
|
+
if self.verbose:
|
|
882
|
+
print(f"Parallel segments: {len(parallel_segments)}, Sequential: {len(sequential_segments)}")
|
|
883
|
+
|
|
884
|
+
# Initialize results array to maintain order
|
|
885
|
+
all_paths = [None] * len(segment_data)
|
|
886
|
+
|
|
887
|
+
# Process parallel segments
|
|
888
|
+
if parallel_segments:
|
|
889
|
+
parallel_start_time = time.time()
|
|
890
|
+
parallel_results = self.search_segments_parallel(parallel_segments)
|
|
891
|
+
parallel_time = time.time() - parallel_start_time
|
|
892
|
+
|
|
893
|
+
# Store parallel results in correct positions
|
|
894
|
+
for idx, result in zip(parallel_indices, parallel_results):
|
|
895
|
+
all_paths[idx] = result
|
|
896
|
+
|
|
897
|
+
if self.verbose:
|
|
898
|
+
print(f"Parallel processing completed in {parallel_time:.2f}s")
|
|
899
|
+
|
|
900
|
+
# Process sequential segments
|
|
901
|
+
if sequential_segments:
|
|
902
|
+
if self.verbose:
|
|
903
|
+
print("Processing sequential segments...")
|
|
904
|
+
|
|
905
|
+
for segment_idx, (point_a, point_b, seg_num, strategy) in enumerate(sequential_segments):
|
|
906
|
+
path = self.search_segment_accurate(point_a, point_b, seg_num, strategy)
|
|
907
|
+
|
|
908
|
+
# Store in correct position
|
|
909
|
+
original_idx = sequential_indices[segment_idx]
|
|
910
|
+
all_paths[original_idx] = path
|
|
911
|
+
|
|
912
|
+
# Check if all segments succeeded
|
|
913
|
+
if all(path is not None for path in all_paths):
|
|
914
|
+
# Combine paths in correct order
|
|
915
|
+
result = all_paths[0].copy()
|
|
916
|
+
for path in all_paths[1:]:
|
|
917
|
+
result.extend(path[1:]) # Skip first point to avoid duplication
|
|
918
|
+
else:
|
|
919
|
+
failed_segments = [i + 1 for i, path in enumerate(all_paths) if path is None]
|
|
920
|
+
print(f"ERROR: Failed to find path for segments: {failed_segments}")
|
|
921
|
+
return None
|
|
922
|
+
|
|
923
|
+
total_time = time.time() - start_time
|
|
924
|
+
|
|
925
|
+
if self.verbose:
|
|
926
|
+
optimization_info = "with Z-range optimization" if self.enable_z_optimization else "without Z-optimization"
|
|
927
|
+
print(f"Enhanced accurate fast search completed in {total_time:.2f}s {optimization_info}")
|
|
928
|
+
print(f"Total path length: {len(result)}")
|
|
929
|
+
|
|
930
|
+
# Calculate theoretical speedup from parallelization
|
|
931
|
+
if parallel_segments:
|
|
932
|
+
sequential_time_estimate = total_time + (len(parallel_segments) - 1) * (total_time / len(all_points))
|
|
933
|
+
parallel_speedup = sequential_time_estimate / total_time
|
|
934
|
+
print(f"Estimated parallel speedup: {parallel_speedup:.1f}x")
|
|
935
|
+
|
|
936
|
+
return result
|
|
937
|
+
|
|
938
|
+
|
|
939
|
+
# Conservative optimization settings for medical use - now with nanometer thresholds and Z-optimization
|
|
940
|
+
def create_accurate_settings_nm(xy_spacing_nm=94.0, enable_z_optimization=True):
|
|
941
|
+
"""Create settings that prioritize accuracy for medical applications - using nanometer thresholds with Z-optimization"""
|
|
942
|
+
return {
|
|
943
|
+
'enable_refinement': True, # Always refine hierarchical paths
|
|
944
|
+
'hierarchical_threshold': 100_000_000, # Only for very large images
|
|
945
|
+
'weight_heuristic': 1.0, # ALWAYS optimal for medical accuracy
|
|
946
|
+
'enable_parallel': True, # Enable parallel processing
|
|
947
|
+
'max_parallel_workers': None, # Auto-detect optimal workers
|
|
948
|
+
'xy_spacing_nm': xy_spacing_nm, # XY pixel spacing
|
|
949
|
+
'enable_z_optimization': enable_z_optimization, # Enable intelligent Z-positioning
|
|
950
|
+
'intensity_threshold': 0.3, # Threshold for Z-range detection
|
|
951
|
+
'min_intensity_threshold': 0.1, # Minimum intensity for waypoint placement
|
|
952
|
+
'filter_close_waypoints': True, # Filter waypoints that are too close
|
|
953
|
+
'min_waypoint_distance': 3.0, # Minimum distance between waypoints
|
|
954
|
+
}
|
|
955
|
+
|
|
956
|
+
|
|
957
|
+
def quick_accurate_optimized_search(image, points_list, xy_spacing_nm=94.0,
|
|
958
|
+
my_weight_heuristic=2.0, verbose=True, enable_parallel=True,
|
|
959
|
+
enable_z_optimization=True, intensity_threshold=0.3,
|
|
960
|
+
min_intensity_threshold=0.1, max_workers=None):
|
|
961
|
+
"""
|
|
962
|
+
Quick accurate optimized search with nanometer support and Z-range optimization - NO SUBDIVISION
|
|
963
|
+
|
|
964
|
+
Args:
|
|
965
|
+
image: 3D image array
|
|
966
|
+
points_list: List of [z, y, x] waypoints in pixel coordinates
|
|
967
|
+
xy_spacing_nm: XY pixel spacing in nanometers per pixel
|
|
968
|
+
my_weight_heuristic: A* weight heuristic (1.0 = optimal)
|
|
969
|
+
verbose: Print progress information
|
|
970
|
+
enable_parallel: Enable parallel processing
|
|
971
|
+
enable_z_optimization: Enable intelligent Z-positioning based on intensity transitions
|
|
972
|
+
intensity_threshold: Threshold for Z-range detection (fraction of peak intensity)
|
|
973
|
+
min_intensity_threshold: Minimum intensity required for waypoint placement
|
|
974
|
+
"""
|
|
975
|
+
|
|
976
|
+
# Use conservative settings with nanometer support and Z-optimization
|
|
977
|
+
settings = create_accurate_settings_nm(xy_spacing_nm, enable_z_optimization)
|
|
978
|
+
settings['weight_heuristic'] = my_weight_heuristic
|
|
979
|
+
settings['enable_parallel'] = enable_parallel
|
|
980
|
+
settings['intensity_threshold'] = intensity_threshold
|
|
981
|
+
settings['min_intensity_threshold'] = min_intensity_threshold
|
|
982
|
+
settings['max_workers'] = max_workers
|
|
983
|
+
|
|
984
|
+
# Remove spacing from settings to avoid duplicate keyword arguments
|
|
985
|
+
settings.pop('xy_spacing_nm', None)
|
|
986
|
+
|
|
987
|
+
search = FasterWaypointSearch(
|
|
988
|
+
image=image,
|
|
989
|
+
points_list=points_list,
|
|
990
|
+
xy_spacing_nm=xy_spacing_nm,
|
|
991
|
+
verbose=verbose,
|
|
992
|
+
**settings
|
|
993
|
+
)
|
|
994
|
+
|
|
995
|
+
return search.search()
|
|
996
|
+
|
|
997
|
+
|
|
998
|
+
# if __name__ == "__main__":
|
|
999
|
+
# print('Enhanced fast waypoint search with Z-range optimization ready - NO SUBDIVISION!')
|
|
1000
|
+
# print('Features:')
|
|
1001
|
+
# print(' - Intelligent Z-positioning based on intensity transitions')
|
|
1002
|
+
# print(' - Automatic start/end point detection with appearance/disappearance frames')
|
|
1003
|
+
# print(' - Parallel processing for speed')
|
|
1004
|
+
# print(' - Nanometer-aware thresholds')
|
|
1005
|
+
# print(' - Waypoint filtering and optimization')
|
|
1006
|
+
# print(' - Medical-grade accuracy (weight_heuristic=1.0)')
|
|
1007
|
+
# print(' - Proper Z-distribution with intensity awareness')
|