neuro-sam 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neuro_sam/__init__.py +1 -0
- neuro_sam/brightest_path_lib/__init__.py +5 -0
- neuro_sam/brightest_path_lib/algorithm/__init__.py +3 -0
- neuro_sam/brightest_path_lib/algorithm/astar.py +586 -0
- neuro_sam/brightest_path_lib/algorithm/waypointastar.py +449 -0
- neuro_sam/brightest_path_lib/algorithm/waypointastar_speedup.py +1007 -0
- neuro_sam/brightest_path_lib/connected_componen.py +329 -0
- neuro_sam/brightest_path_lib/cost/__init__.py +8 -0
- neuro_sam/brightest_path_lib/cost/cost.py +33 -0
- neuro_sam/brightest_path_lib/cost/reciprocal.py +90 -0
- neuro_sam/brightest_path_lib/cost/reciprocal_transonic.py +86 -0
- neuro_sam/brightest_path_lib/heuristic/__init__.py +2 -0
- neuro_sam/brightest_path_lib/heuristic/euclidean.py +101 -0
- neuro_sam/brightest_path_lib/heuristic/heuristic.py +29 -0
- neuro_sam/brightest_path_lib/image/__init__.py +1 -0
- neuro_sam/brightest_path_lib/image/stats.py +197 -0
- neuro_sam/brightest_path_lib/input/__init__.py +1 -0
- neuro_sam/brightest_path_lib/input/inputs.py +14 -0
- neuro_sam/brightest_path_lib/node/__init__.py +2 -0
- neuro_sam/brightest_path_lib/node/bidirectional_node.py +240 -0
- neuro_sam/brightest_path_lib/node/node.py +125 -0
- neuro_sam/brightest_path_lib/visualization/__init__.py +4 -0
- neuro_sam/brightest_path_lib/visualization/flythrough.py +133 -0
- neuro_sam/brightest_path_lib/visualization/flythrough_all.py +394 -0
- neuro_sam/brightest_path_lib/visualization/tube_data.py +385 -0
- neuro_sam/brightest_path_lib/visualization/tube_flythrough.py +227 -0
- neuro_sam/napari_utils/anisotropic_scaling.py +503 -0
- neuro_sam/napari_utils/color_utils.py +135 -0
- neuro_sam/napari_utils/contrasting_color_system.py +169 -0
- neuro_sam/napari_utils/main_widget.py +1016 -0
- neuro_sam/napari_utils/path_tracing_module.py +1016 -0
- neuro_sam/napari_utils/punet_widget.py +424 -0
- neuro_sam/napari_utils/segmentation_model.py +769 -0
- neuro_sam/napari_utils/segmentation_module.py +649 -0
- neuro_sam/napari_utils/visualization_module.py +574 -0
- neuro_sam/plugin.py +260 -0
- neuro_sam/punet/__init__.py +0 -0
- neuro_sam/punet/deepd3_model.py +231 -0
- neuro_sam/punet/prob_unet_deepd3.py +431 -0
- neuro_sam/punet/prob_unet_with_tversky.py +375 -0
- neuro_sam/punet/punet_inference.py +236 -0
- neuro_sam/punet/run_inference.py +145 -0
- neuro_sam/punet/unet_blocks.py +81 -0
- neuro_sam/punet/utils.py +52 -0
- neuro_sam-0.1.0.dist-info/METADATA +269 -0
- neuro_sam-0.1.0.dist-info/RECORD +93 -0
- neuro_sam-0.1.0.dist-info/WHEEL +5 -0
- neuro_sam-0.1.0.dist-info/entry_points.txt +2 -0
- neuro_sam-0.1.0.dist-info/licenses/LICENSE +21 -0
- neuro_sam-0.1.0.dist-info/top_level.txt +2 -0
- sam2/__init__.py +11 -0
- sam2/automatic_mask_generator.py +454 -0
- sam2/benchmark.py +92 -0
- sam2/build_sam.py +174 -0
- sam2/configs/sam2/sam2_hiera_b+.yaml +113 -0
- sam2/configs/sam2/sam2_hiera_l.yaml +117 -0
- sam2/configs/sam2/sam2_hiera_s.yaml +116 -0
- sam2/configs/sam2/sam2_hiera_t.yaml +118 -0
- sam2/configs/sam2.1/sam2.1_hiera_b+.yaml +116 -0
- sam2/configs/sam2.1/sam2.1_hiera_l.yaml +120 -0
- sam2/configs/sam2.1/sam2.1_hiera_s.yaml +119 -0
- sam2/configs/sam2.1/sam2.1_hiera_t.yaml +121 -0
- sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml +339 -0
- sam2/configs/train.yaml +335 -0
- sam2/modeling/__init__.py +5 -0
- sam2/modeling/backbones/__init__.py +5 -0
- sam2/modeling/backbones/hieradet.py +317 -0
- sam2/modeling/backbones/image_encoder.py +134 -0
- sam2/modeling/backbones/utils.py +93 -0
- sam2/modeling/memory_attention.py +169 -0
- sam2/modeling/memory_encoder.py +181 -0
- sam2/modeling/position_encoding.py +239 -0
- sam2/modeling/sam/__init__.py +5 -0
- sam2/modeling/sam/mask_decoder.py +295 -0
- sam2/modeling/sam/prompt_encoder.py +202 -0
- sam2/modeling/sam/transformer.py +311 -0
- sam2/modeling/sam2_base.py +911 -0
- sam2/modeling/sam2_utils.py +323 -0
- sam2/sam2.1_hiera_b+.yaml +116 -0
- sam2/sam2.1_hiera_l.yaml +120 -0
- sam2/sam2.1_hiera_s.yaml +119 -0
- sam2/sam2.1_hiera_t.yaml +121 -0
- sam2/sam2_hiera_b+.yaml +113 -0
- sam2/sam2_hiera_l.yaml +117 -0
- sam2/sam2_hiera_s.yaml +116 -0
- sam2/sam2_hiera_t.yaml +118 -0
- sam2/sam2_image_predictor.py +475 -0
- sam2/sam2_video_predictor.py +1222 -0
- sam2/sam2_video_predictor_legacy.py +1172 -0
- sam2/utils/__init__.py +5 -0
- sam2/utils/amg.py +348 -0
- sam2/utils/misc.py +349 -0
- sam2/utils/transforms.py +118 -0
|
@@ -0,0 +1,503 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from scipy.ndimage import zoom
|
|
3
|
+
import napari
|
|
4
|
+
|
|
5
|
+
class AnisotropicScaler:
|
|
6
|
+
"""
|
|
7
|
+
Handles anisotropic scaling of 3D datasets with separate X, Y, Z spacing
|
|
8
|
+
Similar to Neurotube's scaling system
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
def __init__(self, original_spacing_xyz=(94.0, 94.0, 500.0)):
|
|
12
|
+
"""
|
|
13
|
+
Initialize with original voxel spacing in nanometers
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
original_spacing_xyz: Tuple of (x_spacing, y_spacing, z_spacing) in nm
|
|
17
|
+
"""
|
|
18
|
+
self.original_spacing_xyz = np.array(original_spacing_xyz, dtype=float)
|
|
19
|
+
self.current_spacing_xyz = self.original_spacing_xyz.copy()
|
|
20
|
+
self.scale_factors = np.array([1.0, 1.0, 1.0]) # z, y, x order for numpy
|
|
21
|
+
self.original_image = None
|
|
22
|
+
self.scaled_image = None
|
|
23
|
+
|
|
24
|
+
def set_spacing(self, x_nm, y_nm, z_nm):
|
|
25
|
+
"""
|
|
26
|
+
Set new voxel spacing in nanometers
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
x_nm: X spacing in nanometers
|
|
30
|
+
y_nm: Y spacing in nanometers
|
|
31
|
+
z_nm: Z spacing in nanometers
|
|
32
|
+
"""
|
|
33
|
+
self.current_spacing_xyz = np.array([x_nm, y_nm, z_nm], dtype=float)
|
|
34
|
+
|
|
35
|
+
# Calculate scale factors (z, y, x order for numpy arrays)
|
|
36
|
+
# Scale factor = original_spacing / new_spacing
|
|
37
|
+
self.scale_factors = np.array([
|
|
38
|
+
self.original_spacing_xyz[2] / z_nm, # Z scale factor
|
|
39
|
+
self.original_spacing_xyz[1] / y_nm, # Y scale factor
|
|
40
|
+
self.original_spacing_xyz[0] / x_nm # X scale factor
|
|
41
|
+
])
|
|
42
|
+
|
|
43
|
+
print(f"Updated spacing: X={x_nm:.1f}, Y={y_nm:.1f}, Z={z_nm:.1f} nm")
|
|
44
|
+
print(f"Scale factors (Z,Y,X): {self.scale_factors}")
|
|
45
|
+
|
|
46
|
+
def scale_image(self, image, order=1, prefilter=True):
|
|
47
|
+
"""
|
|
48
|
+
Scale the image according to current spacing settings
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
image: Input 3D numpy array (Z, Y, X)
|
|
52
|
+
order: Interpolation order (0=nearest, 1=linear, 3=cubic)
|
|
53
|
+
prefilter: Whether to apply prefiltering for higher order interpolation
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Scaled image array
|
|
57
|
+
"""
|
|
58
|
+
if self.original_image is None:
|
|
59
|
+
self.original_image = image.copy()
|
|
60
|
+
|
|
61
|
+
print(f"Scaling image from shape {image.shape} with factors {self.scale_factors}")
|
|
62
|
+
|
|
63
|
+
# Apply scaling using scipy.ndimage.zoom
|
|
64
|
+
scaled_image = zoom(image, self.scale_factors, order=order, prefilter=prefilter)
|
|
65
|
+
|
|
66
|
+
print(f"Scaled image to shape {scaled_image.shape}")
|
|
67
|
+
|
|
68
|
+
self.scaled_image = scaled_image
|
|
69
|
+
return scaled_image
|
|
70
|
+
|
|
71
|
+
def scale_coordinates(self, coordinates):
|
|
72
|
+
"""
|
|
73
|
+
Scale coordinates from original space to scaled space
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
coordinates: Array of coordinates in (Z, Y, X) format
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
Scaled coordinates
|
|
80
|
+
"""
|
|
81
|
+
coords = np.array(coordinates)
|
|
82
|
+
if coords.ndim == 1:
|
|
83
|
+
# Single coordinate
|
|
84
|
+
return coords * self.scale_factors
|
|
85
|
+
else:
|
|
86
|
+
# Multiple coordinates
|
|
87
|
+
return coords * self.scale_factors[np.newaxis, :]
|
|
88
|
+
|
|
89
|
+
def scale_coordinates_between_spacings(self, coordinates, from_spacing_xyz, to_spacing_xyz):
|
|
90
|
+
"""
|
|
91
|
+
Scale coordinates from one spacing to another spacing
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
coordinates: Array of coordinates in (Z, Y, X) format
|
|
95
|
+
from_spacing_xyz: Source spacing (x, y, z) in nm
|
|
96
|
+
to_spacing_xyz: Target spacing (x, y, z) in nm
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Scaled coordinates
|
|
100
|
+
"""
|
|
101
|
+
# Calculate scale factors between the two spacings
|
|
102
|
+
# Scale factor = from_spacing / to_spacing
|
|
103
|
+
scale_factors = np.array([
|
|
104
|
+
from_spacing_xyz[2] / to_spacing_xyz[2], # Z scale factor
|
|
105
|
+
from_spacing_xyz[1] / to_spacing_xyz[1], # Y scale factor
|
|
106
|
+
from_spacing_xyz[0] / to_spacing_xyz[0] # X scale factor
|
|
107
|
+
])
|
|
108
|
+
|
|
109
|
+
coords = np.array(coordinates)
|
|
110
|
+
if coords.ndim == 1:
|
|
111
|
+
# Single coordinate
|
|
112
|
+
return coords * scale_factors
|
|
113
|
+
else:
|
|
114
|
+
# Multiple coordinates
|
|
115
|
+
return coords * scale_factors[np.newaxis, :]
|
|
116
|
+
|
|
117
|
+
def scale_mask(self, mask, target_shape, order=0):
|
|
118
|
+
"""
|
|
119
|
+
Scale a mask to target shape using appropriate interpolation
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
mask: Input mask array
|
|
123
|
+
target_shape: Target shape tuple
|
|
124
|
+
order: Interpolation order (0 for masks to preserve binary values)
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
Scaled mask
|
|
128
|
+
"""
|
|
129
|
+
if mask.shape == target_shape:
|
|
130
|
+
return mask
|
|
131
|
+
|
|
132
|
+
# Calculate scale factors for this specific scaling
|
|
133
|
+
scale_factors = np.array(target_shape) / np.array(mask.shape)
|
|
134
|
+
|
|
135
|
+
# Use nearest neighbor for masks to preserve binary values
|
|
136
|
+
scaled_mask = zoom(mask, scale_factors, order=order, prefilter=False)
|
|
137
|
+
|
|
138
|
+
# Ensure binary values for segmentation masks
|
|
139
|
+
if order == 0:
|
|
140
|
+
scaled_mask = (scaled_mask > 0.5).astype(mask.dtype)
|
|
141
|
+
|
|
142
|
+
return scaled_mask
|
|
143
|
+
|
|
144
|
+
def unscale_coordinates(self, scaled_coordinates):
|
|
145
|
+
"""
|
|
146
|
+
Convert coordinates from scaled space back to original space
|
|
147
|
+
|
|
148
|
+
Args:
|
|
149
|
+
scaled_coordinates: Array of coordinates in scaled space
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
Original space coordinates
|
|
153
|
+
"""
|
|
154
|
+
coords = np.array(scaled_coordinates)
|
|
155
|
+
if coords.ndim == 1:
|
|
156
|
+
# Single coordinate
|
|
157
|
+
return coords / self.scale_factors
|
|
158
|
+
else:
|
|
159
|
+
# Multiple coordinates
|
|
160
|
+
return coords / self.scale_factors[np.newaxis, :]
|
|
161
|
+
|
|
162
|
+
def get_effective_spacing(self):
|
|
163
|
+
"""
|
|
164
|
+
Get the effective voxel spacing in the scaled image
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
Tuple of (x_spacing, y_spacing, z_spacing) in nm for the scaled image
|
|
168
|
+
"""
|
|
169
|
+
return tuple(self.current_spacing_xyz)
|
|
170
|
+
|
|
171
|
+
def get_scale_factors(self):
|
|
172
|
+
"""
|
|
173
|
+
Get current scale factors in (Z, Y, X) order
|
|
174
|
+
|
|
175
|
+
Returns:
|
|
176
|
+
Array of scale factors
|
|
177
|
+
"""
|
|
178
|
+
return self.scale_factors.copy()
|
|
179
|
+
|
|
180
|
+
def reset_to_original(self):
|
|
181
|
+
"""Reset scaling to original spacing"""
|
|
182
|
+
self.current_spacing_xyz = self.original_spacing_xyz.copy()
|
|
183
|
+
self.scale_factors = np.array([1.0, 1.0, 1.0])
|
|
184
|
+
|
|
185
|
+
def get_volume_ratio(self):
|
|
186
|
+
"""
|
|
187
|
+
Get the volume ratio between scaled and original image
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
Volume ratio (scaled_volume / original_volume)
|
|
191
|
+
"""
|
|
192
|
+
return np.prod(self.scale_factors)
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
class ScalingWidget:
|
|
196
|
+
"""
|
|
197
|
+
UI widget for controlling anisotropic scaling
|
|
198
|
+
"""
|
|
199
|
+
|
|
200
|
+
def __init__(self, viewer, scaler, update_callback=None):
|
|
201
|
+
"""
|
|
202
|
+
Initialize scaling widget
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
viewer: Napari viewer instance
|
|
206
|
+
scaler: AnisotropicScaler instance
|
|
207
|
+
update_callback: Function to call when scaling changes
|
|
208
|
+
"""
|
|
209
|
+
self.viewer = viewer
|
|
210
|
+
self.scaler = scaler
|
|
211
|
+
self.update_callback = update_callback
|
|
212
|
+
|
|
213
|
+
def create_scaling_controls(self, layout):
|
|
214
|
+
"""
|
|
215
|
+
Add scaling controls to a layout
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
layout: QT layout to add controls to
|
|
219
|
+
"""
|
|
220
|
+
from qtpy.QtWidgets import (QLabel, QDoubleSpinBox, QHBoxLayout,
|
|
221
|
+
QPushButton, QGroupBox, QVBoxLayout, QCheckBox)
|
|
222
|
+
|
|
223
|
+
# Scaling section
|
|
224
|
+
scaling_group = QGroupBox("Anisotropic Voxel Spacing (Neurotube-style)")
|
|
225
|
+
scaling_layout = QVBoxLayout()
|
|
226
|
+
scaling_layout.setSpacing(2)
|
|
227
|
+
scaling_layout.setContentsMargins(5, 5, 5, 5)
|
|
228
|
+
|
|
229
|
+
# Instructions
|
|
230
|
+
info_label = QLabel("Set voxel spacing in nanometers (will reshape the dataset):")
|
|
231
|
+
info_label.setWordWrap(True)
|
|
232
|
+
scaling_layout.addWidget(info_label)
|
|
233
|
+
|
|
234
|
+
# X spacing
|
|
235
|
+
x_layout = QHBoxLayout()
|
|
236
|
+
x_layout.addWidget(QLabel("X spacing:"))
|
|
237
|
+
self.x_spacing_spin = QDoubleSpinBox()
|
|
238
|
+
self.x_spacing_spin.setRange(1.0, 10000.0)
|
|
239
|
+
self.x_spacing_spin.setSingleStep(1.0)
|
|
240
|
+
self.x_spacing_spin.setValue(self.scaler.current_spacing_xyz[0])
|
|
241
|
+
self.x_spacing_spin.setDecimals(1)
|
|
242
|
+
self.x_spacing_spin.setSuffix(" nm")
|
|
243
|
+
self.x_spacing_spin.setToolTip("X-axis voxel spacing in nanometers")
|
|
244
|
+
self.x_spacing_spin.valueChanged.connect(self._on_spacing_changed)
|
|
245
|
+
x_layout.addWidget(self.x_spacing_spin)
|
|
246
|
+
scaling_layout.addLayout(x_layout)
|
|
247
|
+
|
|
248
|
+
# Y spacing
|
|
249
|
+
y_layout = QHBoxLayout()
|
|
250
|
+
y_layout.addWidget(QLabel("Y spacing:"))
|
|
251
|
+
self.y_spacing_spin = QDoubleSpinBox()
|
|
252
|
+
self.y_spacing_spin.setRange(1.0, 10000.0)
|
|
253
|
+
self.y_spacing_spin.setSingleStep(1.0)
|
|
254
|
+
self.y_spacing_spin.setValue(self.scaler.current_spacing_xyz[1])
|
|
255
|
+
self.y_spacing_spin.setDecimals(1)
|
|
256
|
+
self.y_spacing_spin.setSuffix(" nm")
|
|
257
|
+
self.y_spacing_spin.setToolTip("Y-axis voxel spacing in nanometers")
|
|
258
|
+
self.y_spacing_spin.valueChanged.connect(self._on_spacing_changed)
|
|
259
|
+
y_layout.addWidget(self.y_spacing_spin)
|
|
260
|
+
scaling_layout.addLayout(y_layout)
|
|
261
|
+
|
|
262
|
+
# Z spacing
|
|
263
|
+
z_layout = QHBoxLayout()
|
|
264
|
+
z_layout.addWidget(QLabel("Z spacing:"))
|
|
265
|
+
self.z_spacing_spin = QDoubleSpinBox()
|
|
266
|
+
self.z_spacing_spin.setRange(1.0, 10000.0)
|
|
267
|
+
self.z_spacing_spin.setSingleStep(1.0)
|
|
268
|
+
self.z_spacing_spin.setValue(self.scaler.current_spacing_xyz[2])
|
|
269
|
+
self.z_spacing_spin.setDecimals(1)
|
|
270
|
+
self.z_spacing_spin.setSuffix(" nm")
|
|
271
|
+
self.z_spacing_spin.setToolTip("Z-axis voxel spacing in nanometers")
|
|
272
|
+
self.z_spacing_spin.valueChanged.connect(self._on_spacing_changed)
|
|
273
|
+
z_layout.addWidget(self.z_spacing_spin)
|
|
274
|
+
scaling_layout.addLayout(z_layout)
|
|
275
|
+
|
|
276
|
+
# Interpolation method
|
|
277
|
+
interp_layout = QHBoxLayout()
|
|
278
|
+
interp_layout.addWidget(QLabel("Interpolation:"))
|
|
279
|
+
from qtpy.QtWidgets import QComboBox
|
|
280
|
+
self.interp_combo = QComboBox()
|
|
281
|
+
self.interp_combo.addItems(["Nearest", "Linear", "Cubic"])
|
|
282
|
+
self.interp_combo.setCurrentIndex(1) # Default to linear
|
|
283
|
+
self.interp_combo.setToolTip("Interpolation method for scaling")
|
|
284
|
+
interp_layout.addWidget(self.interp_combo)
|
|
285
|
+
scaling_layout.addLayout(interp_layout)
|
|
286
|
+
|
|
287
|
+
# Auto-update checkbox
|
|
288
|
+
self.auto_update_cb = QCheckBox("Auto-update on change")
|
|
289
|
+
self.auto_update_cb.setChecked(False)
|
|
290
|
+
self.auto_update_cb.setToolTip("Automatically apply scaling when values change")
|
|
291
|
+
scaling_layout.addWidget(self.auto_update_cb)
|
|
292
|
+
|
|
293
|
+
# Control buttons
|
|
294
|
+
button_layout = QHBoxLayout()
|
|
295
|
+
|
|
296
|
+
self.apply_scaling_btn = QPushButton("Apply Scaling")
|
|
297
|
+
self.apply_scaling_btn.setToolTip("Apply current scaling settings to reshape the image")
|
|
298
|
+
self.apply_scaling_btn.clicked.connect(self._apply_scaling)
|
|
299
|
+
button_layout.addWidget(self.apply_scaling_btn)
|
|
300
|
+
|
|
301
|
+
self.reset_scaling_btn = QPushButton("Reset to Original")
|
|
302
|
+
self.reset_scaling_btn.setToolTip("Reset to original voxel spacing")
|
|
303
|
+
self.reset_scaling_btn.clicked.connect(self._reset_scaling)
|
|
304
|
+
button_layout.addWidget(self.reset_scaling_btn)
|
|
305
|
+
|
|
306
|
+
scaling_layout.addLayout(button_layout)
|
|
307
|
+
|
|
308
|
+
# Status info
|
|
309
|
+
self.scaling_status = QLabel("Status: Original spacing")
|
|
310
|
+
self.scaling_status.setWordWrap(True)
|
|
311
|
+
scaling_layout.addWidget(self.scaling_status)
|
|
312
|
+
|
|
313
|
+
scaling_group.setLayout(scaling_layout)
|
|
314
|
+
layout.addWidget(scaling_group)
|
|
315
|
+
|
|
316
|
+
def _on_spacing_changed(self):
|
|
317
|
+
"""Handle when spacing values change"""
|
|
318
|
+
if self.auto_update_cb.isChecked():
|
|
319
|
+
self._apply_scaling()
|
|
320
|
+
else:
|
|
321
|
+
self._update_status_only()
|
|
322
|
+
|
|
323
|
+
def _update_status_only(self):
|
|
324
|
+
"""Update status without applying scaling"""
|
|
325
|
+
x_nm = self.x_spacing_spin.value()
|
|
326
|
+
y_nm = self.y_spacing_spin.value()
|
|
327
|
+
z_nm = self.z_spacing_spin.value()
|
|
328
|
+
|
|
329
|
+
# Calculate what the scale factors would be
|
|
330
|
+
temp_scale_factors = np.array([
|
|
331
|
+
self.scaler.original_spacing_xyz[2] / z_nm, # Z
|
|
332
|
+
self.scaler.original_spacing_xyz[1] / y_nm, # Y
|
|
333
|
+
self.scaler.original_spacing_xyz[0] / x_nm # X
|
|
334
|
+
])
|
|
335
|
+
|
|
336
|
+
self.scaling_status.setText(
|
|
337
|
+
f"Pending: X={x_nm:.1f}, Y={y_nm:.1f}, Z={z_nm:.1f} nm\n"
|
|
338
|
+
f"Scale factors (Z,Y,X): {temp_scale_factors[0]:.3f}, {temp_scale_factors[1]:.3f}, {temp_scale_factors[2]:.3f}"
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
def _apply_scaling(self):
|
|
342
|
+
"""Apply current scaling settings"""
|
|
343
|
+
try:
|
|
344
|
+
x_nm = self.x_spacing_spin.value()
|
|
345
|
+
y_nm = self.y_spacing_spin.value()
|
|
346
|
+
z_nm = self.z_spacing_spin.value()
|
|
347
|
+
|
|
348
|
+
# Update scaler
|
|
349
|
+
self.scaler.set_spacing(x_nm, y_nm, z_nm)
|
|
350
|
+
|
|
351
|
+
# Get interpolation order
|
|
352
|
+
interp_order = self.interp_combo.currentIndex()
|
|
353
|
+
if interp_order == 0:
|
|
354
|
+
order = 0 # Nearest
|
|
355
|
+
elif interp_order == 1:
|
|
356
|
+
order = 1 # Linear
|
|
357
|
+
else:
|
|
358
|
+
order = 3 # Cubic
|
|
359
|
+
|
|
360
|
+
# Update status
|
|
361
|
+
volume_ratio = self.scaler.get_volume_ratio()
|
|
362
|
+
self.scaling_status.setText(
|
|
363
|
+
f"Applied: X={x_nm:.1f}, Y={y_nm:.1f}, Z={z_nm:.1f} nm\n"
|
|
364
|
+
f"Scale factors (Z,Y,X): {self.scaler.scale_factors[0]:.3f}, {self.scaler.scale_factors[1]:.3f}, {self.scaler.scale_factors[2]:.3f}\n"
|
|
365
|
+
f"Volume ratio: {volume_ratio:.3f}"
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
# Call update callback if provided
|
|
369
|
+
if self.update_callback:
|
|
370
|
+
self.update_callback(order)
|
|
371
|
+
|
|
372
|
+
napari.utils.notifications.show_info(f"Applied anisotropic scaling: X={x_nm:.1f}, Y={y_nm:.1f}, Z={z_nm:.1f} nm")
|
|
373
|
+
|
|
374
|
+
except Exception as e:
|
|
375
|
+
napari.utils.notifications.show_info(f"Error applying scaling: {str(e)}")
|
|
376
|
+
print(f"Scaling error: {str(e)}")
|
|
377
|
+
|
|
378
|
+
def _reset_scaling(self):
|
|
379
|
+
"""Reset to original scaling"""
|
|
380
|
+
self.scaler.reset_to_original()
|
|
381
|
+
|
|
382
|
+
# Update UI
|
|
383
|
+
self.x_spacing_spin.setValue(self.scaler.current_spacing_xyz[0])
|
|
384
|
+
self.y_spacing_spin.setValue(self.scaler.current_spacing_xyz[1])
|
|
385
|
+
self.z_spacing_spin.setValue(self.scaler.current_spacing_xyz[2])
|
|
386
|
+
|
|
387
|
+
self.scaling_status.setText("Status: Reset to original spacing")
|
|
388
|
+
|
|
389
|
+
# Call update callback if provided
|
|
390
|
+
if self.update_callback:
|
|
391
|
+
self.update_callback(1) # Linear interpolation for reset
|
|
392
|
+
|
|
393
|
+
napari.utils.notifications.show_info("Reset to original voxel spacing")
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
# Example integration with existing NeuroSAM system
|
|
397
|
+
class ScaledNeuroSAMWidget:
|
|
398
|
+
"""
|
|
399
|
+
Extended NeuroSAM widget with anisotropic scaling support
|
|
400
|
+
"""
|
|
401
|
+
|
|
402
|
+
def __init__(self, viewer, original_image, original_spacing_xyz=(94.0, 94.0, 500.0)):
|
|
403
|
+
"""
|
|
404
|
+
Initialize with anisotropic scaling support
|
|
405
|
+
|
|
406
|
+
Args:
|
|
407
|
+
viewer: Napari viewer
|
|
408
|
+
original_image: Original image data
|
|
409
|
+
original_spacing_xyz: Original spacing in (x, y, z) nanometers
|
|
410
|
+
"""
|
|
411
|
+
self.viewer = viewer
|
|
412
|
+
self.original_image = original_image
|
|
413
|
+
self.current_image = original_image.copy()
|
|
414
|
+
|
|
415
|
+
# Initialize scaler
|
|
416
|
+
self.scaler = AnisotropicScaler(original_spacing_xyz)
|
|
417
|
+
|
|
418
|
+
# Initialize scaling widget
|
|
419
|
+
self.scaling_widget = ScalingWidget(
|
|
420
|
+
viewer=self.viewer,
|
|
421
|
+
scaler=self.scaler,
|
|
422
|
+
update_callback=self._on_scaling_update
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
# Track the main image layer
|
|
426
|
+
self.image_layer = None
|
|
427
|
+
|
|
428
|
+
def _on_scaling_update(self, interpolation_order):
|
|
429
|
+
"""
|
|
430
|
+
Handle when scaling is updated
|
|
431
|
+
|
|
432
|
+
Args:
|
|
433
|
+
interpolation_order: Interpolation order for scaling
|
|
434
|
+
"""
|
|
435
|
+
try:
|
|
436
|
+
# Scale the image
|
|
437
|
+
scaled_image = self.scaler.scale_image(
|
|
438
|
+
self.original_image,
|
|
439
|
+
order=interpolation_order
|
|
440
|
+
)
|
|
441
|
+
|
|
442
|
+
# Update current image
|
|
443
|
+
self.current_image = scaled_image
|
|
444
|
+
|
|
445
|
+
# Update the napari layer
|
|
446
|
+
if self.image_layer is not None:
|
|
447
|
+
# Update existing layer
|
|
448
|
+
self.image_layer.data = scaled_image
|
|
449
|
+
self.image_layer.name = f"Image (scaled: {self.scaler.current_spacing_xyz[0]:.1f}, {self.scaler.current_spacing_xyz[1]:.1f}, {self.scaler.current_spacing_xyz[2]:.1f} nm)"
|
|
450
|
+
else:
|
|
451
|
+
# Create new layer
|
|
452
|
+
self.image_layer = self.viewer.add_image(
|
|
453
|
+
scaled_image,
|
|
454
|
+
name=f"Image (scaled: {self.scaler.current_spacing_xyz[0]:.1f}, {self.scaler.current_spacing_xyz[1]:.1f}, {self.scaler.current_spacing_xyz[2]:.1f} nm)",
|
|
455
|
+
colormap='gray'
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
# Clear existing paths/segmentations since they're now invalid
|
|
459
|
+
self._clear_analysis_layers()
|
|
460
|
+
|
|
461
|
+
print(f"Updated image to shape {scaled_image.shape} with spacing {self.scaler.current_spacing_xyz}")
|
|
462
|
+
|
|
463
|
+
except Exception as e:
|
|
464
|
+
napari.utils.notifications.show_info(f"Error updating scaled image: {str(e)}")
|
|
465
|
+
print(f"Scaling update error: {str(e)}")
|
|
466
|
+
|
|
467
|
+
def _clear_analysis_layers(self):
|
|
468
|
+
"""Clear path tracing and segmentation layers when scaling changes"""
|
|
469
|
+
layers_to_remove = []
|
|
470
|
+
|
|
471
|
+
for layer in self.viewer.layers:
|
|
472
|
+
layer_name = layer.name.lower()
|
|
473
|
+
if any(keyword in layer_name for keyword in [
|
|
474
|
+
'path', 'segmentation', 'spine', 'waypoint', 'traced'
|
|
475
|
+
]):
|
|
476
|
+
layers_to_remove.append(layer)
|
|
477
|
+
|
|
478
|
+
for layer in layers_to_remove:
|
|
479
|
+
self.viewer.layers.remove(layer)
|
|
480
|
+
|
|
481
|
+
napari.utils.notifications.show_info("Cleared analysis layers due to scaling change")
|
|
482
|
+
|
|
483
|
+
def get_current_image(self):
|
|
484
|
+
"""Get the currently scaled image"""
|
|
485
|
+
return self.current_image
|
|
486
|
+
|
|
487
|
+
def get_current_spacing(self):
|
|
488
|
+
"""Get current voxel spacing in (x, y, z) format"""
|
|
489
|
+
return self.scaler.get_effective_spacing()
|
|
490
|
+
|
|
491
|
+
def scale_coordinates_to_original(self, coordinates):
|
|
492
|
+
"""
|
|
493
|
+
Convert coordinates from current scaled space to original image space
|
|
494
|
+
Useful for saving results that reference the original image
|
|
495
|
+
"""
|
|
496
|
+
return self.scaler.unscale_coordinates(coordinates)
|
|
497
|
+
|
|
498
|
+
def scale_coordinates_from_original(self, coordinates):
|
|
499
|
+
"""
|
|
500
|
+
Convert coordinates from original image space to current scaled space
|
|
501
|
+
Useful for loading previous results
|
|
502
|
+
"""
|
|
503
|
+
return self.scaler.scale_coordinates(coordinates)
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
import random
|
|
2
|
+
import colorsys
|
|
3
|
+
|
|
4
|
+
def generate_random_color(exclude_neon=True, saturation_range=(0.4, 0.8), brightness_range=(0.5, 0.9)):
|
|
5
|
+
"""
|
|
6
|
+
Generate a random color for dendrite segmentation masks.
|
|
7
|
+
|
|
8
|
+
Args:
|
|
9
|
+
exclude_neon: If True, avoid very bright/saturated colors (reserved for spines)
|
|
10
|
+
saturation_range: Range for color saturation (0.0 to 1.0)
|
|
11
|
+
brightness_range: Range for color brightness (0.0 to 1.0)
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
Tuple of (R, G, B) values in range 0.0-1.0
|
|
15
|
+
"""
|
|
16
|
+
# Generate random hue (0-360 degrees)
|
|
17
|
+
hue = random.uniform(0, 1)
|
|
18
|
+
|
|
19
|
+
# Generate saturation and brightness within specified ranges
|
|
20
|
+
if exclude_neon:
|
|
21
|
+
# More muted colors for dendrites
|
|
22
|
+
saturation = random.uniform(saturation_range[0], saturation_range[1])
|
|
23
|
+
brightness = random.uniform(brightness_range[0], brightness_range[1])
|
|
24
|
+
else:
|
|
25
|
+
# Allow full range including neon colors
|
|
26
|
+
saturation = random.uniform(0.0, 1.0)
|
|
27
|
+
brightness = random.uniform(0.0, 1.0)
|
|
28
|
+
|
|
29
|
+
# Convert HSV to RGB
|
|
30
|
+
rgb = colorsys.hsv_to_rgb(hue, saturation, brightness)
|
|
31
|
+
|
|
32
|
+
return rgb
|
|
33
|
+
|
|
34
|
+
def get_neon_colors():
|
|
35
|
+
"""
|
|
36
|
+
Get a list of predefined neon colors for spine segmentation.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
List of (R, G, B) tuples for neon colors
|
|
40
|
+
"""
|
|
41
|
+
neon_colors = [
|
|
42
|
+
(0.0, 1.0, 0.0), # Neon green
|
|
43
|
+
(1.0, 0.0, 1.0), # Neon magenta
|
|
44
|
+
(0.0, 1.0, 1.0), # Neon cyan
|
|
45
|
+
(1.0, 1.0, 0.0), # Neon yellow
|
|
46
|
+
(1.0, 0.27, 0.0), # Neon orange-red
|
|
47
|
+
(0.5, 0.0, 1.0), # Neon purple
|
|
48
|
+
(0.0, 0.5, 1.0), # Neon blue
|
|
49
|
+
(1.0, 0.0, 0.5), # Neon pink
|
|
50
|
+
(0.5, 1.0, 0.0), # Neon lime
|
|
51
|
+
(1.0, 0.5, 0.0), # Neon orange
|
|
52
|
+
]
|
|
53
|
+
return neon_colors
|
|
54
|
+
|
|
55
|
+
def get_next_neon_color(index=None):
|
|
56
|
+
"""
|
|
57
|
+
Get the next neon color from the predefined list.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
index: Optional index to get specific color, if None uses random
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
(R, G, B) tuple for neon color
|
|
64
|
+
"""
|
|
65
|
+
neon_colors = get_neon_colors()
|
|
66
|
+
|
|
67
|
+
if index is None:
|
|
68
|
+
return random.choice(neon_colors)
|
|
69
|
+
else:
|
|
70
|
+
return neon_colors[index % len(neon_colors)]
|
|
71
|
+
|
|
72
|
+
class ColorManager:
|
|
73
|
+
"""
|
|
74
|
+
Manages colors for different types of segmentation masks.
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
def __init__(self):
|
|
78
|
+
self.used_dendrite_colors = []
|
|
79
|
+
self.neon_color_index = 0
|
|
80
|
+
|
|
81
|
+
def get_dendrite_color(self):
|
|
82
|
+
"""
|
|
83
|
+
Get a unique random color for dendrite segmentation.
|
|
84
|
+
Ensures no duplicate colors are used.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
(R, G, B) tuple
|
|
88
|
+
"""
|
|
89
|
+
max_attempts = 50
|
|
90
|
+
attempts = 0
|
|
91
|
+
|
|
92
|
+
while attempts < max_attempts:
|
|
93
|
+
color = generate_random_color(exclude_neon=True)
|
|
94
|
+
|
|
95
|
+
# Check if this color is too similar to existing ones
|
|
96
|
+
is_unique = True
|
|
97
|
+
for used_color in self.used_dendrite_colors:
|
|
98
|
+
# Calculate color distance (simple Euclidean distance in RGB space)
|
|
99
|
+
distance = sum((a - b) ** 2 for a, b in zip(color, used_color)) ** 0.5
|
|
100
|
+
if distance < 0.3: # Minimum distance threshold
|
|
101
|
+
is_unique = False
|
|
102
|
+
break
|
|
103
|
+
|
|
104
|
+
if is_unique:
|
|
105
|
+
self.used_dendrite_colors.append(color)
|
|
106
|
+
return color
|
|
107
|
+
|
|
108
|
+
attempts += 1
|
|
109
|
+
|
|
110
|
+
# If we can't find a unique color, just return a random one
|
|
111
|
+
fallback_color = generate_random_color(exclude_neon=True)
|
|
112
|
+
self.used_dendrite_colors.append(fallback_color)
|
|
113
|
+
return fallback_color
|
|
114
|
+
|
|
115
|
+
def get_spine_color(self):
|
|
116
|
+
"""
|
|
117
|
+
Get the next neon color for spine segmentation.
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
(R, G, B) tuple
|
|
121
|
+
"""
|
|
122
|
+
color = get_next_neon_color(self.neon_color_index)
|
|
123
|
+
self.neon_color_index += 1
|
|
124
|
+
return color
|
|
125
|
+
|
|
126
|
+
def reset_dendrite_colors(self):
|
|
127
|
+
"""Reset the used dendrite colors list."""
|
|
128
|
+
self.used_dendrite_colors = []
|
|
129
|
+
|
|
130
|
+
def reset_spine_colors(self):
|
|
131
|
+
"""Reset the spine color index."""
|
|
132
|
+
self.neon_color_index = 0
|
|
133
|
+
|
|
134
|
+
# Global color manager instance
|
|
135
|
+
color_manager = ColorManager()
|