napari-tmidas 0.2.2__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- napari_tmidas/__init__.py +35 -5
- napari_tmidas/_crop_anything.py +1520 -609
- napari_tmidas/_env_manager.py +76 -0
- napari_tmidas/_file_conversion.py +1646 -1131
- napari_tmidas/_file_selector.py +1455 -216
- napari_tmidas/_label_inspection.py +83 -8
- napari_tmidas/_processing_worker.py +309 -0
- napari_tmidas/_reader.py +6 -10
- napari_tmidas/_registry.py +2 -2
- napari_tmidas/_roi_colocalization.py +1221 -84
- napari_tmidas/_tests/test_crop_anything.py +123 -0
- napari_tmidas/_tests/test_env_manager.py +89 -0
- napari_tmidas/_tests/test_grid_view_overlay.py +193 -0
- napari_tmidas/_tests/test_init.py +98 -0
- napari_tmidas/_tests/test_intensity_label_filter.py +222 -0
- napari_tmidas/_tests/test_label_inspection.py +86 -0
- napari_tmidas/_tests/test_processing_basic.py +500 -0
- napari_tmidas/_tests/test_processing_worker.py +142 -0
- napari_tmidas/_tests/test_regionprops_analysis.py +547 -0
- napari_tmidas/_tests/test_registry.py +70 -2
- napari_tmidas/_tests/test_scipy_filters.py +168 -0
- napari_tmidas/_tests/test_skimage_filters.py +259 -0
- napari_tmidas/_tests/test_split_channels.py +217 -0
- napari_tmidas/_tests/test_spotiflow.py +87 -0
- napari_tmidas/_tests/test_tyx_display_fix.py +142 -0
- napari_tmidas/_tests/test_ui_utils.py +68 -0
- napari_tmidas/_tests/test_widget.py +30 -0
- napari_tmidas/_tests/test_windows_basic.py +66 -0
- napari_tmidas/_ui_utils.py +57 -0
- napari_tmidas/_version.py +16 -3
- napari_tmidas/_widget.py +41 -4
- napari_tmidas/processing_functions/basic.py +557 -20
- napari_tmidas/processing_functions/careamics_env_manager.py +72 -99
- napari_tmidas/processing_functions/cellpose_env_manager.py +415 -112
- napari_tmidas/processing_functions/cellpose_segmentation.py +132 -191
- napari_tmidas/processing_functions/colocalization.py +513 -56
- napari_tmidas/processing_functions/grid_view_overlay.py +703 -0
- napari_tmidas/processing_functions/intensity_label_filter.py +422 -0
- napari_tmidas/processing_functions/regionprops_analysis.py +1280 -0
- napari_tmidas/processing_functions/sam2_env_manager.py +53 -69
- napari_tmidas/processing_functions/sam2_mp4.py +274 -195
- napari_tmidas/processing_functions/scipy_filters.py +403 -8
- napari_tmidas/processing_functions/skimage_filters.py +424 -212
- napari_tmidas/processing_functions/spotiflow_detection.py +949 -0
- napari_tmidas/processing_functions/spotiflow_env_manager.py +591 -0
- napari_tmidas/processing_functions/timepoint_merger.py +334 -86
- {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/METADATA +71 -30
- napari_tmidas-0.2.5.dist-info/RECORD +63 -0
- napari_tmidas/_tests/__init__.py +0 -0
- napari_tmidas-0.2.2.dist-info/RECORD +0 -40
- {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/WHEEL +0 -0
- {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/entry_points.txt +0 -0
- {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/licenses/LICENSE +0 -0
- {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/top_level.txt +0 -0
|
@@ -16,10 +16,319 @@ from napari_tmidas._registry import BatchProcessingRegistry
|
|
|
16
16
|
|
|
17
17
|
if SCIPY_AVAILABLE:
|
|
18
18
|
|
|
19
|
+
@BatchProcessingRegistry.register(
|
|
20
|
+
name="Resize Labels (Nearest, SciPy)",
|
|
21
|
+
suffix="_scaled",
|
|
22
|
+
description="Resize a label mask or label image by a scale factor using nearest-neighbor interpolation (scipy.ndimage.zoom, grid_mode=True) to preserve label integrity without shifting position.",
|
|
23
|
+
parameters={
|
|
24
|
+
"scale_factor": {
|
|
25
|
+
"type": "float",
|
|
26
|
+
"default": 1.0,
|
|
27
|
+
"min": 0.01,
|
|
28
|
+
"max": 10.0,
|
|
29
|
+
"description": "Factor by which to resize the label image (e.g., 0.8 for 80% size, 1.2 for 120% size). 1.0 means no resizing.",
|
|
30
|
+
},
|
|
31
|
+
},
|
|
32
|
+
)
|
|
33
|
+
def resize_labels(
|
|
34
|
+
label_image: np.ndarray, scale_factor: float = 1.0
|
|
35
|
+
) -> np.ndarray:
|
|
36
|
+
"""
|
|
37
|
+
Resize labeled objects while maintaining original array dimensions.
|
|
38
|
+
|
|
39
|
+
Objects are scaled isotropically and centered within the original
|
|
40
|
+
coordinate system, preserving spatial relationships with other data.
|
|
41
|
+
|
|
42
|
+
Parameters
|
|
43
|
+
----------
|
|
44
|
+
label_image : np.ndarray
|
|
45
|
+
3D label image where each unique value represents a distinct object
|
|
46
|
+
scale_factor : float
|
|
47
|
+
Scaling factor (e.g., 0.8 = 80% size, 1.2 = 120% size)
|
|
48
|
+
|
|
49
|
+
Returns
|
|
50
|
+
-------
|
|
51
|
+
np.ndarray
|
|
52
|
+
Resized label image with same dimensions as input
|
|
53
|
+
"""
|
|
54
|
+
import numpy as np
|
|
55
|
+
from scipy.ndimage import zoom
|
|
56
|
+
|
|
57
|
+
scale_factor = float(scale_factor)
|
|
58
|
+
if scale_factor == 1.0:
|
|
59
|
+
return label_image.copy()
|
|
60
|
+
|
|
61
|
+
original_shape = np.array(label_image.shape)
|
|
62
|
+
|
|
63
|
+
# Resize the labeled objects
|
|
64
|
+
scaled = zoom(
|
|
65
|
+
label_image,
|
|
66
|
+
zoom=scale_factor,
|
|
67
|
+
order=0, # Preserve label values
|
|
68
|
+
grid_mode=True, # Consistent coordinate system
|
|
69
|
+
mode="grid-constant",
|
|
70
|
+
cval=0,
|
|
71
|
+
).astype(label_image.dtype)
|
|
72
|
+
|
|
73
|
+
new_shape = np.array(scaled.shape)
|
|
74
|
+
result = np.zeros(original_shape, dtype=label_image.dtype)
|
|
75
|
+
|
|
76
|
+
# Center the resized objects in the original array
|
|
77
|
+
offset = ((original_shape - new_shape) / 2).astype(int)
|
|
78
|
+
|
|
79
|
+
if scale_factor < 1.0:
|
|
80
|
+
# Place smaller objects in center
|
|
81
|
+
slices = tuple(slice(o, o + s) for o, s in zip(offset, new_shape))
|
|
82
|
+
result[slices] = scaled
|
|
83
|
+
else:
|
|
84
|
+
# Extract center region from larger objects
|
|
85
|
+
slices = tuple(
|
|
86
|
+
slice(-o if o < 0 else 0, s - o if o < 0 else s)
|
|
87
|
+
for o, s in zip(offset, original_shape)
|
|
88
|
+
)
|
|
89
|
+
result = scaled[slices]
|
|
90
|
+
|
|
91
|
+
return result
|
|
92
|
+
|
|
93
|
+
@BatchProcessingRegistry.register(
|
|
94
|
+
name="Subdivide Labels into 3 Layers",
|
|
95
|
+
suffix="_layers",
|
|
96
|
+
description="Subdivide each labeled object into 3 concentric layers and return a single label image where each layer receives a unique ID offset.",
|
|
97
|
+
parameters={
|
|
98
|
+
"is_half_body": {
|
|
99
|
+
"type": bool,
|
|
100
|
+
"default": False,
|
|
101
|
+
"description": "Enable this if the object is cut in half (e.g., half-spheroid). This will create layers as if it were a full body, so the cut surface shows inner/middle/outer layers.",
|
|
102
|
+
},
|
|
103
|
+
"cut_axis": {
|
|
104
|
+
"type": int,
|
|
105
|
+
"default": 0,
|
|
106
|
+
"min": 0,
|
|
107
|
+
"max": 2,
|
|
108
|
+
"description": "For half-bodies: which axis the object is cut along (0=Z, 1=Y, 2=X). Only the cut axis will be scaled 2x, not all dimensions.",
|
|
109
|
+
},
|
|
110
|
+
},
|
|
111
|
+
)
|
|
112
|
+
def subdivide_labels_3layers(
|
|
113
|
+
label_image: np.ndarray, is_half_body: bool = False, cut_axis: int = 0
|
|
114
|
+
) -> np.ndarray:
|
|
115
|
+
"""Subdivide labeled objects into three concentric layers.
|
|
116
|
+
|
|
117
|
+
Each object is partitioned into inner, middle, and outer shells of approximately
|
|
118
|
+
equal thickness. The layers are combined into a single label image using
|
|
119
|
+
non-overlapping ID ranges so they remain distinguishable.
|
|
120
|
+
|
|
121
|
+
Parameters
|
|
122
|
+
----------
|
|
123
|
+
label_image : np.ndarray
|
|
124
|
+
Label image where each unique value represents a distinct object.
|
|
125
|
+
is_half_body : bool, optional
|
|
126
|
+
If True, treats the object as a half-body (e.g., half-spheroid cut at a plane).
|
|
127
|
+
Only the specified cut_axis will be scaled by 2x (not all dimensions) to avoid
|
|
128
|
+
excessive memory usage. The cut surface will then show all three layers
|
|
129
|
+
(inner, middle, outer) as if it were the interior of a complete object.
|
|
130
|
+
Default is False.
|
|
131
|
+
cut_axis : int, optional
|
|
132
|
+
For half-bodies: specifies which axis the object is cut along (0=Z, 1=Y, 2=X).
|
|
133
|
+
Only this axis will be scaled 2x to virtually complete the object. For a
|
|
134
|
+
hemisphere cut horizontally, use axis 0 (Z). Default is 0.
|
|
135
|
+
|
|
136
|
+
Returns
|
|
137
|
+
-------
|
|
138
|
+
numpy.ndarray
|
|
139
|
+
Single label image containing all three layers with unique label IDs.
|
|
140
|
+
"""
|
|
141
|
+
# Define scale factors for the three boundaries
|
|
142
|
+
# To get equal thickness, we need to think about the "radius" reduction
|
|
143
|
+
# For 3 equal layers, we want boundaries at 1.0, ~0.67, ~0.33
|
|
144
|
+
scale_middle = 0.67 # ~67% size
|
|
145
|
+
scale_inner = 0.33 # ~33% size
|
|
146
|
+
|
|
147
|
+
original_shape = np.array(label_image.shape)
|
|
148
|
+
|
|
149
|
+
# Store information for mapping back to original coordinates
|
|
150
|
+
half_body_offset = None
|
|
151
|
+
cut_at_beginning = None
|
|
152
|
+
|
|
153
|
+
# If it's a half-body, we need to virtually "complete" the object first
|
|
154
|
+
# by mirroring it along the cut axis to create a full object
|
|
155
|
+
if is_half_body:
|
|
156
|
+
# Validate cut_axis
|
|
157
|
+
if cut_axis < 0 or cut_axis >= label_image.ndim:
|
|
158
|
+
raise ValueError(
|
|
159
|
+
f"cut_axis must be between 0 and {label_image.ndim - 1}, got {cut_axis}"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Find bounding box of the object along cut_axis
|
|
163
|
+
axes_to_sum = tuple(
|
|
164
|
+
i for i in range(label_image.ndim) if i != cut_axis
|
|
165
|
+
)
|
|
166
|
+
projection = np.sum(label_image > 0, axis=axes_to_sum)
|
|
167
|
+
nonzero_indices = np.where(projection > 0)[0]
|
|
168
|
+
|
|
169
|
+
if len(nonzero_indices) == 0:
|
|
170
|
+
# Empty image
|
|
171
|
+
return np.zeros_like(label_image)
|
|
172
|
+
|
|
173
|
+
# Get bounding box along cut axis
|
|
174
|
+
bbox_start = nonzero_indices[0]
|
|
175
|
+
bbox_end = nonzero_indices[-1] + 1
|
|
176
|
+
|
|
177
|
+
# Extract just the object portion along cut_axis
|
|
178
|
+
extract_slices = [slice(None)] * label_image.ndim
|
|
179
|
+
extract_slices[cut_axis] = slice(bbox_start, bbox_end)
|
|
180
|
+
object_portion = label_image[tuple(extract_slices)]
|
|
181
|
+
|
|
182
|
+
# Determine which end has the cut surface (max area)
|
|
183
|
+
# by checking areas at both ends of the object portion
|
|
184
|
+
object_projection = np.sum(object_portion > 0, axis=axes_to_sum)
|
|
185
|
+
first_slice_area = object_projection[0]
|
|
186
|
+
last_slice_area = object_projection[-1]
|
|
187
|
+
|
|
188
|
+
# Mirror this portion to create a complete object
|
|
189
|
+
flipped = np.flip(object_portion, axis=cut_axis)
|
|
190
|
+
|
|
191
|
+
if first_slice_area >= last_slice_area:
|
|
192
|
+
# Cut surface is at the beginning, so concatenate [flipped, original]
|
|
193
|
+
# This places the cut surface (the first slice) in the middle
|
|
194
|
+
work_image = np.concatenate(
|
|
195
|
+
[flipped, object_portion], axis=cut_axis
|
|
196
|
+
)
|
|
197
|
+
cut_at_beginning = True
|
|
198
|
+
else:
|
|
199
|
+
# Cut surface is at the end, so concatenate [original, flipped]
|
|
200
|
+
# This places the cut surface (the last slice) in the middle
|
|
201
|
+
work_image = np.concatenate(
|
|
202
|
+
[object_portion, flipped], axis=cut_axis
|
|
203
|
+
)
|
|
204
|
+
cut_at_beginning = False
|
|
205
|
+
|
|
206
|
+
work_shape = np.array(work_image.shape)
|
|
207
|
+
|
|
208
|
+
# Remember the offset for mapping back
|
|
209
|
+
half_body_offset = bbox_start
|
|
210
|
+
else:
|
|
211
|
+
work_image = label_image
|
|
212
|
+
work_shape = original_shape
|
|
213
|
+
|
|
214
|
+
# Helper function to create a scaled version centered in working space
|
|
215
|
+
def create_scaled_labels(scale_factor):
|
|
216
|
+
if scale_factor == 1.0:
|
|
217
|
+
return work_image.copy()
|
|
218
|
+
|
|
219
|
+
scaled = ndimage.zoom(
|
|
220
|
+
work_image,
|
|
221
|
+
zoom=scale_factor,
|
|
222
|
+
order=0, # Preserve label values
|
|
223
|
+
grid_mode=True, # Consistent coordinate system
|
|
224
|
+
mode="grid-constant",
|
|
225
|
+
cval=0,
|
|
226
|
+
).astype(work_image.dtype)
|
|
227
|
+
|
|
228
|
+
new_shape = np.array(scaled.shape)
|
|
229
|
+
result = np.zeros(work_shape, dtype=work_image.dtype)
|
|
230
|
+
|
|
231
|
+
# Center the resized objects in the working array
|
|
232
|
+
offset = ((work_shape - new_shape) / 2).astype(int)
|
|
233
|
+
slices = tuple(slice(o, o + s) for o, s in zip(offset, new_shape))
|
|
234
|
+
result[slices] = scaled
|
|
235
|
+
|
|
236
|
+
return result
|
|
237
|
+
|
|
238
|
+
# Create the three scaled versions
|
|
239
|
+
full_labels = work_image.copy() # Outer boundary (100%)
|
|
240
|
+
middle_labels = create_scaled_labels(
|
|
241
|
+
scale_middle
|
|
242
|
+
) # Middle boundary (~67%)
|
|
243
|
+
inner_labels = create_scaled_labels(
|
|
244
|
+
scale_inner
|
|
245
|
+
) # Inner boundary (~33%)
|
|
246
|
+
|
|
247
|
+
# Layer 3 (outermost shell): Full - Middle
|
|
248
|
+
layer3 = full_labels.copy()
|
|
249
|
+
layer3[middle_labels > 0] = 0
|
|
250
|
+
|
|
251
|
+
# Layer 2 (middle shell): Middle - Inner
|
|
252
|
+
layer2 = middle_labels.copy()
|
|
253
|
+
layer2[inner_labels > 0] = 0
|
|
254
|
+
|
|
255
|
+
# Layer 1 (innermost core): Inner
|
|
256
|
+
layer1 = inner_labels.copy()
|
|
257
|
+
|
|
258
|
+
max_label = int(label_image.max()) if label_image.size else 0
|
|
259
|
+
if max_label == 0:
|
|
260
|
+
return np.zeros_like(label_image)
|
|
261
|
+
|
|
262
|
+
if np.issubdtype(label_image.dtype, np.integer):
|
|
263
|
+
max_needed = max_label * 3
|
|
264
|
+
dtype_choices = [label_image.dtype, np.uint32, np.uint64]
|
|
265
|
+
for dtype in dtype_choices:
|
|
266
|
+
try:
|
|
267
|
+
info = np.iinfo(dtype)
|
|
268
|
+
except ValueError:
|
|
269
|
+
continue
|
|
270
|
+
if max_needed <= info.max:
|
|
271
|
+
result_dtype = dtype
|
|
272
|
+
break
|
|
273
|
+
else:
|
|
274
|
+
result_dtype = np.uint64
|
|
275
|
+
else:
|
|
276
|
+
result_dtype = np.uint32
|
|
277
|
+
|
|
278
|
+
result = np.zeros(work_shape, dtype=result_dtype)
|
|
279
|
+
|
|
280
|
+
layer1_mask = layer1 > 0
|
|
281
|
+
if np.any(layer1_mask):
|
|
282
|
+
result[layer1_mask] = layer1[layer1_mask].astype(
|
|
283
|
+
result_dtype, copy=False
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
layer2_mask = layer2 > 0
|
|
287
|
+
if np.any(layer2_mask):
|
|
288
|
+
result[layer2_mask] = (
|
|
289
|
+
layer2[layer2_mask].astype(result_dtype, copy=False)
|
|
290
|
+
+ max_label
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
layer3_mask = layer3 > 0
|
|
294
|
+
if np.any(layer3_mask):
|
|
295
|
+
result[layer3_mask] = layer3[layer3_mask].astype(
|
|
296
|
+
result_dtype, copy=False
|
|
297
|
+
) + (2 * max_label)
|
|
298
|
+
|
|
299
|
+
# If half-body mode, extract back the original half and place in original coordinates
|
|
300
|
+
if is_half_body:
|
|
301
|
+
# Extract the appropriate half depending on where the cut surface was
|
|
302
|
+
slices = [slice(None)] * result.ndim
|
|
303
|
+
mid_point = work_shape[cut_axis] // 2
|
|
304
|
+
|
|
305
|
+
if cut_at_beginning:
|
|
306
|
+
# Cut surface was at the beginning, we concatenated [flipped, original]
|
|
307
|
+
# So extract the second half to get back the original
|
|
308
|
+
slices[cut_axis] = slice(mid_point, work_shape[cut_axis])
|
|
309
|
+
else:
|
|
310
|
+
# Cut surface was at the end, we concatenated [original, flipped]
|
|
311
|
+
# So extract the first half to get back the original
|
|
312
|
+
slices[cut_axis] = slice(0, mid_point)
|
|
313
|
+
|
|
314
|
+
result_object = result[tuple(slices)]
|
|
315
|
+
|
|
316
|
+
# Place back into original volume at original position
|
|
317
|
+
final_result = np.zeros(original_shape, dtype=result.dtype)
|
|
318
|
+
place_slices = [slice(None)] * result.ndim
|
|
319
|
+
place_slices[cut_axis] = slice(
|
|
320
|
+
half_body_offset,
|
|
321
|
+
half_body_offset + result_object.shape[cut_axis],
|
|
322
|
+
)
|
|
323
|
+
final_result[tuple(place_slices)] = result_object
|
|
324
|
+
result = final_result
|
|
325
|
+
|
|
326
|
+
return result
|
|
327
|
+
|
|
19
328
|
@BatchProcessingRegistry.register(
|
|
20
329
|
name="Gaussian Blur",
|
|
21
330
|
suffix="_blurred",
|
|
22
|
-
description="Apply Gaussian blur to the image",
|
|
331
|
+
description="Apply Gaussian blur to the image. Supports dimension_order hint (TYX, ZYX, etc.) to process frame-by-frame or apply 3D blur.",
|
|
23
332
|
parameters={
|
|
24
333
|
"sigma": {
|
|
25
334
|
"type": float,
|
|
@@ -30,16 +339,53 @@ if SCIPY_AVAILABLE:
|
|
|
30
339
|
}
|
|
31
340
|
},
|
|
32
341
|
)
|
|
33
|
-
def gaussian_blur(
|
|
342
|
+
def gaussian_blur(
|
|
343
|
+
image: np.ndarray, sigma: float = 1.0, dimension_order: str = "Auto"
|
|
344
|
+
) -> np.ndarray:
|
|
34
345
|
"""
|
|
35
|
-
Apply Gaussian blur to the image
|
|
346
|
+
Apply Gaussian blur to the image.
|
|
347
|
+
|
|
348
|
+
Args:
|
|
349
|
+
image: Input image (YX, TYX, ZYX, CYX, TCYX, TZYX, etc.)
|
|
350
|
+
sigma: Standard deviation for Gaussian kernel
|
|
351
|
+
dimension_order: Dimension interpretation hint (Auto, YX, TYX, ZYX, CYX, TCYX, etc.)
|
|
352
|
+
If TYX/CYX: processes each frame/channel independently (2D blur per slice)
|
|
353
|
+
If ZYX: applies 3D blur to spatial volume
|
|
354
|
+
If YX or Auto: processes as-is
|
|
355
|
+
|
|
356
|
+
Returns:
|
|
357
|
+
Blurred image with same shape as input
|
|
36
358
|
"""
|
|
37
|
-
|
|
359
|
+
# Handle different dimension orders
|
|
360
|
+
if dimension_order in ["TYX", "CYX"] and len(image.shape) == 3:
|
|
361
|
+
# Process frame-by-frame or channel-by-channel (2D blur)
|
|
362
|
+
result = np.zeros_like(image)
|
|
363
|
+
for i in range(image.shape[0]):
|
|
364
|
+
result[i] = ndimage.gaussian_filter(image[i], sigma=sigma)
|
|
365
|
+
return result
|
|
366
|
+
elif (
|
|
367
|
+
dimension_order in ["TCYX", "TZYX", "ZCYX"]
|
|
368
|
+
and len(image.shape) == 4
|
|
369
|
+
):
|
|
370
|
+
# Process each T/Z and C slice independently (2D blur)
|
|
371
|
+
result = np.zeros_like(image)
|
|
372
|
+
for i in range(image.shape[0]):
|
|
373
|
+
for j in range(image.shape[1]):
|
|
374
|
+
result[i, j] = ndimage.gaussian_filter(
|
|
375
|
+
image[i, j], sigma=sigma
|
|
376
|
+
)
|
|
377
|
+
return result
|
|
378
|
+
elif dimension_order == "ZYX" and len(image.shape) == 3:
|
|
379
|
+
# Apply 3D blur to spatial volume
|
|
380
|
+
return ndimage.gaussian_filter(image, sigma=sigma)
|
|
381
|
+
else:
|
|
382
|
+
# YX, Auto, or other: process as-is
|
|
383
|
+
return ndimage.gaussian_filter(image, sigma=sigma)
|
|
38
384
|
|
|
39
385
|
@BatchProcessingRegistry.register(
|
|
40
386
|
name="Median Filter",
|
|
41
387
|
suffix="_median",
|
|
42
|
-
description="Apply median filter for noise reduction",
|
|
388
|
+
description="Apply median filter for noise reduction. Supports dimension_order hint (TYX, ZYX, etc.) to process frame-by-frame or apply 3D filter.",
|
|
43
389
|
parameters={
|
|
44
390
|
"size": {
|
|
45
391
|
"type": int,
|
|
@@ -50,8 +396,57 @@ if SCIPY_AVAILABLE:
|
|
|
50
396
|
}
|
|
51
397
|
},
|
|
52
398
|
)
|
|
53
|
-
def median_filter(
|
|
399
|
+
def median_filter(
|
|
400
|
+
image: np.ndarray, size: int = 3, dimension_order: str = "Auto"
|
|
401
|
+
) -> np.ndarray:
|
|
54
402
|
"""
|
|
55
|
-
Apply median filter for noise reduction
|
|
403
|
+
Apply median filter for noise reduction.
|
|
404
|
+
|
|
405
|
+
Args:
|
|
406
|
+
image: Input image (YX, TYX, ZYX, CYX, TCYX, TZYX, etc.)
|
|
407
|
+
size: Size of the median filter window
|
|
408
|
+
dimension_order: Dimension interpretation hint (Auto, YX, TYX, ZYX, CYX, TCYX, etc.)
|
|
409
|
+
If TYX/CYX: processes each frame/channel independently (2D filter per slice)
|
|
410
|
+
If ZYX: applies 3D filter to spatial volume
|
|
411
|
+
If YX or Auto: processes as-is
|
|
412
|
+
|
|
413
|
+
Returns:
|
|
414
|
+
Filtered image with same shape as input
|
|
56
415
|
"""
|
|
57
|
-
|
|
416
|
+
# Handle different dimension orders
|
|
417
|
+
if dimension_order in ["TYX", "CYX"] and len(image.shape) == 3:
|
|
418
|
+
# Process frame-by-frame or channel-by-channel (2D filter)
|
|
419
|
+
result = np.zeros_like(image)
|
|
420
|
+
for i in range(image.shape[0]):
|
|
421
|
+
result[i] = ndimage.median_filter(image[i], size=size)
|
|
422
|
+
return result
|
|
423
|
+
elif (
|
|
424
|
+
dimension_order in ["TCYX", "TZYX", "ZCYX"]
|
|
425
|
+
and len(image.shape) == 4
|
|
426
|
+
):
|
|
427
|
+
# Process each T/Z and C slice independently (2D filter)
|
|
428
|
+
result = np.zeros_like(image)
|
|
429
|
+
for i in range(image.shape[0]):
|
|
430
|
+
for j in range(image.shape[1]):
|
|
431
|
+
result[i, j] = ndimage.median_filter(
|
|
432
|
+
image[i, j], size=size
|
|
433
|
+
)
|
|
434
|
+
return result
|
|
435
|
+
elif dimension_order == "ZYX" and len(image.shape) == 3:
|
|
436
|
+
# Apply 3D filter to spatial volume
|
|
437
|
+
return ndimage.median_filter(image, size=size)
|
|
438
|
+
else:
|
|
439
|
+
# YX, Auto, or other: process as-is
|
|
440
|
+
return ndimage.median_filter(image, size=size)
|
|
441
|
+
|
|
442
|
+
else:
|
|
443
|
+
# Export stub functions that raise ImportError when called
|
|
444
|
+
def gaussian_blur(*args, **kwargs):
|
|
445
|
+
raise ImportError(
|
|
446
|
+
"SciPy is not available. Please install scipy to use this function."
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
def median_filter(*args, **kwargs):
|
|
450
|
+
raise ImportError(
|
|
451
|
+
"SciPy is not available. Please install scipy to use this function."
|
|
452
|
+
)
|