napari-tmidas 0.2.2__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- napari_tmidas/__init__.py +35 -5
- napari_tmidas/_crop_anything.py +1520 -609
- napari_tmidas/_env_manager.py +76 -0
- napari_tmidas/_file_conversion.py +1646 -1131
- napari_tmidas/_file_selector.py +1455 -216
- napari_tmidas/_label_inspection.py +83 -8
- napari_tmidas/_processing_worker.py +309 -0
- napari_tmidas/_reader.py +6 -10
- napari_tmidas/_registry.py +2 -2
- napari_tmidas/_roi_colocalization.py +1221 -84
- napari_tmidas/_tests/test_crop_anything.py +123 -0
- napari_tmidas/_tests/test_env_manager.py +89 -0
- napari_tmidas/_tests/test_grid_view_overlay.py +193 -0
- napari_tmidas/_tests/test_init.py +98 -0
- napari_tmidas/_tests/test_intensity_label_filter.py +222 -0
- napari_tmidas/_tests/test_label_inspection.py +86 -0
- napari_tmidas/_tests/test_processing_basic.py +500 -0
- napari_tmidas/_tests/test_processing_worker.py +142 -0
- napari_tmidas/_tests/test_regionprops_analysis.py +547 -0
- napari_tmidas/_tests/test_registry.py +70 -2
- napari_tmidas/_tests/test_scipy_filters.py +168 -0
- napari_tmidas/_tests/test_skimage_filters.py +259 -0
- napari_tmidas/_tests/test_split_channels.py +217 -0
- napari_tmidas/_tests/test_spotiflow.py +87 -0
- napari_tmidas/_tests/test_tyx_display_fix.py +142 -0
- napari_tmidas/_tests/test_ui_utils.py +68 -0
- napari_tmidas/_tests/test_widget.py +30 -0
- napari_tmidas/_tests/test_windows_basic.py +66 -0
- napari_tmidas/_ui_utils.py +57 -0
- napari_tmidas/_version.py +16 -3
- napari_tmidas/_widget.py +41 -4
- napari_tmidas/processing_functions/basic.py +557 -20
- napari_tmidas/processing_functions/careamics_env_manager.py +72 -99
- napari_tmidas/processing_functions/cellpose_env_manager.py +415 -112
- napari_tmidas/processing_functions/cellpose_segmentation.py +132 -191
- napari_tmidas/processing_functions/colocalization.py +513 -56
- napari_tmidas/processing_functions/grid_view_overlay.py +703 -0
- napari_tmidas/processing_functions/intensity_label_filter.py +422 -0
- napari_tmidas/processing_functions/regionprops_analysis.py +1280 -0
- napari_tmidas/processing_functions/sam2_env_manager.py +53 -69
- napari_tmidas/processing_functions/sam2_mp4.py +274 -195
- napari_tmidas/processing_functions/scipy_filters.py +403 -8
- napari_tmidas/processing_functions/skimage_filters.py +424 -212
- napari_tmidas/processing_functions/spotiflow_detection.py +949 -0
- napari_tmidas/processing_functions/spotiflow_env_manager.py +591 -0
- napari_tmidas/processing_functions/timepoint_merger.py +334 -86
- {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/METADATA +71 -30
- napari_tmidas-0.2.5.dist-info/RECORD +63 -0
- napari_tmidas/_tests/__init__.py +0 -0
- napari_tmidas-0.2.2.dist-info/RECORD +0 -40
- {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/WHEEL +0 -0
- {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/entry_points.txt +0 -0
- {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/licenses/LICENSE +0 -0
- {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/top_level.txt +0 -0
|
@@ -16,59 +16,255 @@ except ImportError:
|
|
|
16
16
|
"scikit-image not available, some processing functions will be disabled"
|
|
17
17
|
)
|
|
18
18
|
|
|
19
|
-
import contextlib
|
|
20
|
-
import os
|
|
21
19
|
|
|
22
|
-
|
|
20
|
+
# Lazy imports for optional heavy dependencies
|
|
21
|
+
try:
|
|
22
|
+
import pandas as pd
|
|
23
|
+
|
|
24
|
+
_HAS_PANDAS = True
|
|
25
|
+
except ImportError:
|
|
26
|
+
pd = None
|
|
27
|
+
_HAS_PANDAS = False
|
|
23
28
|
|
|
24
|
-
from napari_tmidas._file_selector import ProcessingWorker
|
|
25
29
|
from napari_tmidas._registry import BatchProcessingRegistry
|
|
26
30
|
|
|
27
31
|
if SKIMAGE_AVAILABLE:
|
|
28
32
|
|
|
29
|
-
#
|
|
33
|
+
# CLAHE (Contrast Limited Adaptive Histogram Equalization)
|
|
30
34
|
@BatchProcessingRegistry.register(
|
|
31
|
-
name="
|
|
32
|
-
suffix="
|
|
33
|
-
description="
|
|
35
|
+
name="CLAHE (Adaptive Histogram Equalization)",
|
|
36
|
+
suffix="_clahe",
|
|
37
|
+
description="Apply Contrast Limited Adaptive Histogram Equalization (CLAHE) to enhance local contrast, especially useful for dark images with weak bright features",
|
|
38
|
+
parameters={
|
|
39
|
+
"clip_limit": {
|
|
40
|
+
"type": float,
|
|
41
|
+
"default": 0.01,
|
|
42
|
+
"description": "Clipping limit for contrast (0.01 = 1%). Higher values give more contrast but may amplify noise. Range: 0.001-0.1",
|
|
43
|
+
},
|
|
44
|
+
"kernel_size": {
|
|
45
|
+
"type": int,
|
|
46
|
+
"default": 0,
|
|
47
|
+
"description": "Size of the local region (0 = auto-calculate based on image size). For small features use smaller values (e.g., 32), for large features use larger values (e.g., 128)",
|
|
48
|
+
},
|
|
49
|
+
},
|
|
34
50
|
)
|
|
35
51
|
def equalize_histogram(
|
|
36
|
-
image: np.ndarray, clip_limit: float = 0.01
|
|
52
|
+
image: np.ndarray, clip_limit: float = 0.01, kernel_size: int = 0
|
|
37
53
|
) -> np.ndarray:
|
|
38
54
|
"""
|
|
39
|
-
|
|
55
|
+
Apply CLAHE (Contrast Limited Adaptive Histogram Equalization) to enhance local contrast.
|
|
56
|
+
|
|
57
|
+
This is much better than standard histogram equalization for dark images with
|
|
58
|
+
weak bright features like membranes, as it works locally and prevents over-brightening
|
|
59
|
+
of background regions.
|
|
60
|
+
|
|
61
|
+
Parameters
|
|
62
|
+
----------
|
|
63
|
+
image : np.ndarray
|
|
64
|
+
Input image
|
|
65
|
+
clip_limit : float
|
|
66
|
+
Clipping limit for contrast limiting (normalized to 0-1 range, e.g., 0.01 = 1%)
|
|
67
|
+
Higher values give more contrast but may amplify noise
|
|
68
|
+
kernel_size : int
|
|
69
|
+
Size of the contextual regions (0 = auto-calculate based on image size)
|
|
70
|
+
|
|
71
|
+
Returns
|
|
72
|
+
-------
|
|
73
|
+
np.ndarray
|
|
74
|
+
CLAHE-enhanced image with same dtype as input
|
|
40
75
|
"""
|
|
76
|
+
# Store original dtype to convert back later
|
|
77
|
+
original_dtype = image.dtype
|
|
78
|
+
|
|
79
|
+
# Auto-calculate kernel size if not specified
|
|
80
|
+
if kernel_size <= 0:
|
|
81
|
+
# Use 1/8 of the smaller dimension, but cap between 16 and 128
|
|
82
|
+
min_dim = min(
|
|
83
|
+
image.shape[-2:]
|
|
84
|
+
) # Last 2 dimensions are spatial (Y, X)
|
|
85
|
+
kernel_size = max(16, min(128, min_dim // 8))
|
|
86
|
+
|
|
87
|
+
# Ensure kernel_size is odd
|
|
88
|
+
if kernel_size % 2 == 0:
|
|
89
|
+
kernel_size += 1
|
|
90
|
+
|
|
91
|
+
# Apply CLAHE using scikit-image's equalize_adapthist
|
|
92
|
+
# Note: clip_limit in equalize_adapthist is already normalized (0-1 range)
|
|
93
|
+
# This returns float64 in range [0, 1]
|
|
94
|
+
result = skimage.exposure.equalize_adapthist(
|
|
95
|
+
image, kernel_size=kernel_size, clip_limit=clip_limit
|
|
96
|
+
)
|
|
41
97
|
|
|
42
|
-
|
|
98
|
+
# Convert back to original dtype to preserve compatibility
|
|
99
|
+
if np.issubdtype(original_dtype, np.integer):
|
|
100
|
+
# For integer types, scale back to original range
|
|
101
|
+
iinfo = np.iinfo(original_dtype)
|
|
102
|
+
result = (result * (iinfo.max - iinfo.min) + iinfo.min).astype(
|
|
103
|
+
original_dtype
|
|
104
|
+
)
|
|
105
|
+
else:
|
|
106
|
+
# For float types, keep as is but match dtype
|
|
107
|
+
result = result.astype(original_dtype)
|
|
108
|
+
|
|
109
|
+
return result
|
|
43
110
|
|
|
44
111
|
# simple otsu thresholding
|
|
45
112
|
@BatchProcessingRegistry.register(
|
|
46
113
|
name="Otsu Thresholding (semantic)",
|
|
47
114
|
suffix="_otsu_semantic",
|
|
48
|
-
description="Threshold image using Otsu's method to obtain a binary image",
|
|
115
|
+
description="Threshold image using Otsu's method to obtain a binary image. Supports dimension_order hint (TYX, ZYX, etc.) to process frame-by-frame or slice-by-slice.",
|
|
49
116
|
)
|
|
50
|
-
def otsu_thresholding(
|
|
51
|
-
""
|
|
52
|
-
|
|
117
|
+
def otsu_thresholding(
|
|
118
|
+
image: np.ndarray, dimension_order: str = "Auto"
|
|
119
|
+
) -> np.ndarray:
|
|
53
120
|
"""
|
|
121
|
+
Threshold image using Otsu's method.
|
|
54
122
|
|
|
123
|
+
Args:
|
|
124
|
+
image: Input image (YX, TYX, ZYX, CYX, TCYX, TZYX, etc.)
|
|
125
|
+
dimension_order: Dimension interpretation hint (Auto, YX, TYX, ZYX, CYX, TCYX, etc.)
|
|
126
|
+
If TYX/ZYX/TCYX/TZYX: processes each frame/slice independently
|
|
127
|
+
If CYX: processes each channel independently
|
|
128
|
+
If YX or Auto: processes as single 2D image
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Binary image with same shape as input (255=foreground, 0=background)
|
|
132
|
+
"""
|
|
55
133
|
image = skimage.img_as_ubyte(image) # convert to 8-bit
|
|
56
|
-
|
|
57
|
-
|
|
134
|
+
|
|
135
|
+
# Handle different dimension orders
|
|
136
|
+
if dimension_order in ["TYX", "ZYX", "TCYX", "TZYX", "ZCYX", "TZCYX"]:
|
|
137
|
+
# Process frame-by-frame or slice-by-slice
|
|
138
|
+
result = np.zeros_like(image, dtype=np.uint8)
|
|
139
|
+
|
|
140
|
+
# Determine which axes to iterate over
|
|
141
|
+
if len(image.shape) == 3: # TYX or ZYX
|
|
142
|
+
for i in range(image.shape[0]):
|
|
143
|
+
thresh = skimage.filters.threshold_otsu(image[i])
|
|
144
|
+
result[i] = np.where(image[i] > thresh, 255, 0).astype(
|
|
145
|
+
np.uint8
|
|
146
|
+
)
|
|
147
|
+
elif len(image.shape) == 4: # TCYX, TZYX, ZCYX
|
|
148
|
+
for i in range(image.shape[0]):
|
|
149
|
+
for j in range(image.shape[1]):
|
|
150
|
+
thresh = skimage.filters.threshold_otsu(image[i, j])
|
|
151
|
+
result[i, j] = np.where(
|
|
152
|
+
image[i, j] > thresh, 255, 0
|
|
153
|
+
).astype(np.uint8)
|
|
154
|
+
elif len(image.shape) == 5: # TZCYX
|
|
155
|
+
for i in range(image.shape[0]):
|
|
156
|
+
for j in range(image.shape[1]):
|
|
157
|
+
for k in range(image.shape[2]):
|
|
158
|
+
thresh = skimage.filters.threshold_otsu(
|
|
159
|
+
image[i, j, k]
|
|
160
|
+
)
|
|
161
|
+
result[i, j, k] = np.where(
|
|
162
|
+
image[i, j, k] > thresh, 255, 0
|
|
163
|
+
).astype(np.uint8)
|
|
164
|
+
else:
|
|
165
|
+
# Fallback for unexpected shapes
|
|
166
|
+
thresh = skimage.filters.threshold_otsu(image)
|
|
167
|
+
result = np.where(image > thresh, 255, 0).astype(np.uint8)
|
|
168
|
+
|
|
169
|
+
return result
|
|
170
|
+
elif dimension_order == "CYX":
|
|
171
|
+
# Process each channel independently
|
|
172
|
+
if len(image.shape) >= 3:
|
|
173
|
+
result = np.zeros_like(image, dtype=np.uint8)
|
|
174
|
+
for i in range(image.shape[0]):
|
|
175
|
+
thresh = skimage.filters.threshold_otsu(image[i])
|
|
176
|
+
result[i] = np.where(image[i] > thresh, 255, 0).astype(
|
|
177
|
+
np.uint8
|
|
178
|
+
)
|
|
179
|
+
return result
|
|
180
|
+
else:
|
|
181
|
+
# Fallback if not actually multi-channel
|
|
182
|
+
thresh = skimage.filters.threshold_otsu(image)
|
|
183
|
+
return np.where(image > thresh, 255, 0).astype(np.uint8)
|
|
184
|
+
else:
|
|
185
|
+
# YX or Auto: process as single image
|
|
186
|
+
thresh = skimage.filters.threshold_otsu(image)
|
|
187
|
+
return np.where(image > thresh, 255, 0).astype(np.uint8)
|
|
58
188
|
|
|
59
189
|
# instance segmentation
|
|
60
190
|
@BatchProcessingRegistry.register(
|
|
61
191
|
name="Otsu Thresholding (instance)",
|
|
62
192
|
suffix="_otsu_labels",
|
|
63
|
-
description="Threshold image using Otsu's method to obtain a multi-label image",
|
|
193
|
+
description="Threshold image using Otsu's method to obtain a multi-label image. Supports dimension_order hint (TYX, ZYX, etc.) to process frame-by-frame or slice-by-slice.",
|
|
64
194
|
)
|
|
65
|
-
def otsu_thresholding_instance(
|
|
195
|
+
def otsu_thresholding_instance(
|
|
196
|
+
image: np.ndarray, dimension_order: str = "Auto"
|
|
197
|
+
) -> np.ndarray:
|
|
66
198
|
"""
|
|
67
|
-
Threshold image using Otsu's method
|
|
199
|
+
Threshold image using Otsu's method to create instance labels.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
image: Input image (YX, TYX, ZYX, CYX, TCYX, TZYX, etc.)
|
|
203
|
+
dimension_order: Dimension interpretation hint (Auto, YX, TYX, ZYX, CYX, TCYX, etc.)
|
|
204
|
+
If TYX/ZYX/TCYX/TZYX: processes each frame/slice independently
|
|
205
|
+
If CYX: processes each channel independently
|
|
206
|
+
If YX or Auto: processes as single 2D image
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
Label image with same shape as input (0=background, 1,2,3...=objects)
|
|
68
210
|
"""
|
|
69
211
|
image = skimage.img_as_ubyte(image) # convert to 8-bit
|
|
70
|
-
|
|
71
|
-
|
|
212
|
+
|
|
213
|
+
# Handle different dimension orders
|
|
214
|
+
if dimension_order in ["TYX", "ZYX", "TCYX", "TZYX", "ZCYX", "TZCYX"]:
|
|
215
|
+
# Process frame-by-frame or slice-by-slice
|
|
216
|
+
result = np.zeros_like(image, dtype=np.uint32)
|
|
217
|
+
|
|
218
|
+
# Determine which axes to iterate over
|
|
219
|
+
if len(image.shape) == 3: # TYX or ZYX
|
|
220
|
+
for i in range(image.shape[0]):
|
|
221
|
+
thresh = skimage.filters.threshold_otsu(image[i])
|
|
222
|
+
result[i] = skimage.measure.label(
|
|
223
|
+
image[i] > thresh
|
|
224
|
+
).astype(np.uint32)
|
|
225
|
+
elif len(image.shape) == 4: # TCYX, TZYX, ZCYX
|
|
226
|
+
for i in range(image.shape[0]):
|
|
227
|
+
for j in range(image.shape[1]):
|
|
228
|
+
thresh = skimage.filters.threshold_otsu(image[i, j])
|
|
229
|
+
result[i, j] = skimage.measure.label(
|
|
230
|
+
image[i, j] > thresh
|
|
231
|
+
).astype(np.uint32)
|
|
232
|
+
elif len(image.shape) == 5: # TZCYX
|
|
233
|
+
for i in range(image.shape[0]):
|
|
234
|
+
for j in range(image.shape[1]):
|
|
235
|
+
for k in range(image.shape[2]):
|
|
236
|
+
thresh = skimage.filters.threshold_otsu(
|
|
237
|
+
image[i, j, k]
|
|
238
|
+
)
|
|
239
|
+
result[i, j, k] = skimage.measure.label(
|
|
240
|
+
image[i, j, k] > thresh
|
|
241
|
+
).astype(np.uint32)
|
|
242
|
+
else:
|
|
243
|
+
# Fallback for unexpected shapes
|
|
244
|
+
thresh = skimage.filters.threshold_otsu(image)
|
|
245
|
+
result = skimage.measure.label(image > thresh).astype(
|
|
246
|
+
np.uint32
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
return result
|
|
250
|
+
elif dimension_order == "CYX":
|
|
251
|
+
# Process each channel independently
|
|
252
|
+
if len(image.shape) >= 3:
|
|
253
|
+
result = np.zeros_like(image, dtype=np.uint32)
|
|
254
|
+
for i in range(image.shape[0]):
|
|
255
|
+
thresh = skimage.filters.threshold_otsu(image[i])
|
|
256
|
+
result[i] = skimage.measure.label(
|
|
257
|
+
image[i] > thresh
|
|
258
|
+
).astype(np.uint32)
|
|
259
|
+
return result
|
|
260
|
+
else:
|
|
261
|
+
# Fallback if not actually multi-channel
|
|
262
|
+
thresh = skimage.filters.threshold_otsu(image)
|
|
263
|
+
return skimage.measure.label(image > thresh).astype(np.uint32)
|
|
264
|
+
else:
|
|
265
|
+
# YX or Auto: process as single image
|
|
266
|
+
thresh = skimage.filters.threshold_otsu(image)
|
|
267
|
+
return skimage.measure.label(image > thresh).astype(np.uint32)
|
|
72
268
|
|
|
73
269
|
# simple thresholding
|
|
74
270
|
@BatchProcessingRegistry.register(
|
|
@@ -93,7 +289,9 @@ if SKIMAGE_AVAILABLE:
|
|
|
93
289
|
"""
|
|
94
290
|
# convert to 8-bit
|
|
95
291
|
image = skimage.img_as_ubyte(image)
|
|
96
|
-
|
|
292
|
+
# Return 255 for values above threshold, 0 for values below
|
|
293
|
+
# This ensures the binary image is visible when viewed as a regular image
|
|
294
|
+
return np.where(image > threshold, 255, 0).astype(np.uint8)
|
|
97
295
|
|
|
98
296
|
# remove small objects
|
|
99
297
|
@BatchProcessingRegistry.register(
|
|
@@ -212,199 +410,26 @@ if SKIMAGE_AVAILABLE:
|
|
|
212
410
|
|
|
213
411
|
return result.astype(np.uint32)
|
|
214
412
|
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
parameters={
|
|
220
|
-
"properties": {
|
|
221
|
-
"type": str,
|
|
222
|
-
"default": "area,bbox,centroid,eccentricity,euler_number,perimeter",
|
|
223
|
-
"description": "Comma-separated list of properties to extract (e.g., area,perimeter,centroid)",
|
|
224
|
-
},
|
|
225
|
-
"intensity_image": {
|
|
226
|
-
"type": bool,
|
|
227
|
-
"default": False,
|
|
228
|
-
"description": "Use input as intensity image for intensity-based measurements",
|
|
229
|
-
},
|
|
230
|
-
"min_area": {
|
|
231
|
-
"type": int,
|
|
232
|
-
"default": 0,
|
|
233
|
-
"min": 0,
|
|
234
|
-
"max": 100000,
|
|
235
|
-
"description": "Minimum area to include in results (pixels)",
|
|
236
|
-
},
|
|
237
|
-
},
|
|
238
|
-
)
|
|
239
|
-
def extract_region_properties(
|
|
240
|
-
image: np.ndarray,
|
|
241
|
-
properties: str = "area,bbox,centroid,eccentricity,euler_number,perimeter",
|
|
242
|
-
intensity_image: bool = False,
|
|
243
|
-
min_area: int = 0,
|
|
244
|
-
) -> np.ndarray:
|
|
245
|
-
"""
|
|
246
|
-
Extract properties of labeled regions in an image and save results as CSV.
|
|
247
|
-
|
|
248
|
-
This function analyzes all labeled regions in a label image and computes
|
|
249
|
-
various region properties like area, perimeter, centroid, etc. The results
|
|
250
|
-
are saved as a CSV file. The input image is returned unchanged.
|
|
251
|
-
|
|
252
|
-
Parameters:
|
|
253
|
-
-----------
|
|
254
|
-
image : numpy.ndarray
|
|
255
|
-
Input label image (instance segmentation)
|
|
256
|
-
properties : str
|
|
257
|
-
Comma-separated list of properties to extract
|
|
258
|
-
See scikit-image documentation for all available properties:
|
|
259
|
-
https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.regionprops
|
|
260
|
-
intensity_image : bool
|
|
261
|
-
Whether to use the input image as intensity image for intensity-based measurements
|
|
262
|
-
min_area : int
|
|
263
|
-
Minimum area (in pixels) for regions to include in results
|
|
264
|
-
|
|
265
|
-
Returns:
|
|
266
|
-
--------
|
|
267
|
-
numpy.ndarray
|
|
268
|
-
The original image (unchanged)
|
|
269
|
-
"""
|
|
270
|
-
# Check if we have a proper label image
|
|
271
|
-
if image.ndim < 2 or np.max(image) == 0:
|
|
272
|
-
print(
|
|
273
|
-
"Input must be a valid label image with at least one labeled region"
|
|
274
|
-
)
|
|
275
|
-
return image
|
|
276
|
-
|
|
277
|
-
# Convert image to proper format for regionprops
|
|
278
|
-
label_image = image.astype(np.int32)
|
|
279
|
-
|
|
280
|
-
# Parse the properties list
|
|
281
|
-
prop_list = [prop.strip() for prop in properties.split(",")]
|
|
282
|
-
|
|
283
|
-
# Get region properties
|
|
284
|
-
if intensity_image:
|
|
285
|
-
# Use the same image as both label and intensity image # this is wrong
|
|
286
|
-
regions = skimage.measure.regionprops(
|
|
287
|
-
label_image, intensity_image=image
|
|
288
|
-
)
|
|
289
|
-
else:
|
|
290
|
-
regions = skimage.measure.regionprops(label_image)
|
|
291
|
-
|
|
292
|
-
# Collect property data
|
|
293
|
-
data = []
|
|
294
|
-
for region in regions:
|
|
295
|
-
# Skip regions that are too small
|
|
296
|
-
if region.area < min_area:
|
|
297
|
-
continue
|
|
298
|
-
|
|
299
|
-
# Get all requested properties
|
|
300
|
-
region_data = {"label": region.label}
|
|
301
|
-
for prop in prop_list:
|
|
302
|
-
try:
|
|
303
|
-
value = getattr(region, prop)
|
|
304
|
-
|
|
305
|
-
# Handle different types of properties
|
|
306
|
-
if isinstance(value, tuple) or (
|
|
307
|
-
isinstance(value, np.ndarray) and value.ndim > 0
|
|
308
|
-
):
|
|
309
|
-
# For tuple/array properties like centroid, bbox, etc.
|
|
310
|
-
if isinstance(value, tuple):
|
|
311
|
-
value = np.array(value)
|
|
312
|
-
|
|
313
|
-
# For each element in the tuple/array
|
|
314
|
-
for i, val in enumerate(value):
|
|
315
|
-
region_data[f"{prop}_{i}"] = val
|
|
316
|
-
else:
|
|
317
|
-
# For scalar properties like area, perimeter, etc.
|
|
318
|
-
region_data[prop] = value
|
|
319
|
-
except AttributeError:
|
|
320
|
-
print(f"Property '{prop}' not found, skipping")
|
|
321
|
-
continue
|
|
322
|
-
|
|
323
|
-
data.append(region_data)
|
|
324
|
-
|
|
325
|
-
# Create a DataFrame
|
|
326
|
-
df = pd.DataFrame(data)
|
|
413
|
+
# Note: Old "Extract Region Properties" function removed
|
|
414
|
+
# Use "Extract Regionprops to CSV" from regionprops_analysis.py instead
|
|
415
|
+
# which properly handles multi-dimensional data (T, C, Z dimensions)
|
|
416
|
+
# and creates a single CSV for all images in a folder
|
|
327
417
|
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
418
|
+
else:
|
|
419
|
+
# Export stub functions that raise ImportError when called
|
|
420
|
+
def invert_image(*args, **kwargs):
|
|
421
|
+
raise ImportError(
|
|
422
|
+
"scikit-image is not available. Please install scikit-image to use this function."
|
|
333
423
|
)
|
|
334
424
|
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
try:
|
|
340
|
-
# Check if ProcessingWorker is imported and available
|
|
341
|
-
original_process_file = ProcessingWorker.process_file
|
|
342
|
-
|
|
343
|
-
# Create a new version that handles saving CSV
|
|
344
|
-
def process_file_with_csv_export(self, filepath):
|
|
345
|
-
"""Modified process_file function that saves CSV after processing."""
|
|
346
|
-
result = original_process_file(self, filepath)
|
|
347
|
-
|
|
348
|
-
# Check if there's a result and if we should save CSV
|
|
349
|
-
if isinstance(result, dict) and "processed_file" in result:
|
|
350
|
-
output_path = result["processed_file"]
|
|
351
|
-
|
|
352
|
-
# Check if the processing function had CSV data
|
|
353
|
-
if (
|
|
354
|
-
hasattr(self.processing_func, "save_csv")
|
|
355
|
-
and self.processing_func.save_csv
|
|
356
|
-
and hasattr(self.processing_func, "csv_data")
|
|
357
|
-
):
|
|
358
|
-
|
|
359
|
-
# Get the CSV data
|
|
360
|
-
df = self.processing_func.csv_data
|
|
361
|
-
|
|
362
|
-
# For functions that don't need an image output, use the original filepath
|
|
363
|
-
# as the base for the CSV filename
|
|
364
|
-
if (
|
|
365
|
-
hasattr(self.processing_func, "no_image_output")
|
|
366
|
-
and self.processing_func.no_image_output
|
|
367
|
-
):
|
|
368
|
-
# Use the original filepath without creating a new image file
|
|
369
|
-
base_path = os.path.splitext(filepath)[0]
|
|
370
|
-
csv_path = f"{base_path}_regionprops.csv"
|
|
371
|
-
|
|
372
|
-
# Don't save a duplicate image file
|
|
373
|
-
if (
|
|
374
|
-
os.path.exists(output_path)
|
|
375
|
-
and output_path != filepath
|
|
376
|
-
):
|
|
377
|
-
contextlib.suppress(OSError)
|
|
378
|
-
else:
|
|
379
|
-
# Create CSV filename from the output image path
|
|
380
|
-
csv_path = (
|
|
381
|
-
os.path.splitext(output_path)[0]
|
|
382
|
-
+ "_regionprops.csv"
|
|
383
|
-
)
|
|
384
|
-
|
|
385
|
-
# Save the CSV file
|
|
386
|
-
df.to_csv(csv_path, index=False)
|
|
387
|
-
print(f"Saved region properties to {csv_path}")
|
|
388
|
-
|
|
389
|
-
# Add the CSV file to the result
|
|
390
|
-
result["secondary_files"] = [csv_path]
|
|
391
|
-
|
|
392
|
-
# If we don't need an image output, update the result to just point to the CSV
|
|
393
|
-
if (
|
|
394
|
-
hasattr(self.processing_func, "no_image_output")
|
|
395
|
-
and self.processing_func.no_image_output
|
|
396
|
-
):
|
|
397
|
-
result["processed_file"] = csv_path
|
|
398
|
-
|
|
399
|
-
return result
|
|
400
|
-
|
|
401
|
-
# Apply the monkey patch
|
|
402
|
-
ProcessingWorker.process_file = process_file_with_csv_export
|
|
425
|
+
def equalize_histogram(*args, **kwargs):
|
|
426
|
+
raise ImportError(
|
|
427
|
+
"scikit-image is not available. Please install scikit-image to use this function."
|
|
428
|
+
)
|
|
403
429
|
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
"Region properties will be extracted but CSV files may not be saved"
|
|
430
|
+
def otsu_thresholding(*args, **kwargs):
|
|
431
|
+
raise ImportError(
|
|
432
|
+
"scikit-image is not available. Please install scikit-image to use this function."
|
|
408
433
|
)
|
|
409
434
|
|
|
410
435
|
|
|
@@ -455,3 +480,190 @@ def convert_to_uint8(image: np.ndarray) -> np.ndarray:
|
|
|
455
480
|
|
|
456
481
|
# Convert the rescaled image to uint8
|
|
457
482
|
return skimage.img_as_ubyte(img_rescaled)
|
|
483
|
+
|
|
484
|
+
|
|
485
|
+
# ============================================================================
|
|
486
|
+
# Bright Region Extraction Functions
|
|
487
|
+
# ============================================================================
|
|
488
|
+
|
|
489
|
+
if SKIMAGE_AVAILABLE:
|
|
490
|
+
|
|
491
|
+
@BatchProcessingRegistry.register(
|
|
492
|
+
name="Percentile Threshold (Keep Brightest)",
|
|
493
|
+
suffix="_percentile",
|
|
494
|
+
description="Keep only pixels above a brightness percentile, zero out the rest",
|
|
495
|
+
parameters={
|
|
496
|
+
"percentile": {
|
|
497
|
+
"type": float,
|
|
498
|
+
"default": 90.0,
|
|
499
|
+
"min": 0.0,
|
|
500
|
+
"max": 100.0,
|
|
501
|
+
"description": "Keep pixels brighter than this percentile (0-100)",
|
|
502
|
+
},
|
|
503
|
+
"output_type": {
|
|
504
|
+
"type": str,
|
|
505
|
+
"default": "original",
|
|
506
|
+
"options": ["original", "binary"],
|
|
507
|
+
"description": "Output original values or binary mask",
|
|
508
|
+
},
|
|
509
|
+
},
|
|
510
|
+
)
|
|
511
|
+
def percentile_threshold(
|
|
512
|
+
image: np.ndarray,
|
|
513
|
+
percentile: float = 90.0,
|
|
514
|
+
output_type: str = "original",
|
|
515
|
+
) -> np.ndarray:
|
|
516
|
+
"""
|
|
517
|
+
Keep only pixels above a certain brightness percentile.
|
|
518
|
+
|
|
519
|
+
This function calculates the specified percentile of pixel intensities
|
|
520
|
+
and keeps only pixels brighter than that threshold. Darker pixels are
|
|
521
|
+
set to zero.
|
|
522
|
+
|
|
523
|
+
Parameters:
|
|
524
|
+
-----------
|
|
525
|
+
image : numpy.ndarray
|
|
526
|
+
Input image array
|
|
527
|
+
percentile : float
|
|
528
|
+
Percentile threshold (0-100). Higher values keep fewer, brighter pixels.
|
|
529
|
+
output_type : str
|
|
530
|
+
'original' returns the original pixel values for pixels above threshold,
|
|
531
|
+
'binary' returns a binary mask (255 for above threshold, 0 otherwise)
|
|
532
|
+
|
|
533
|
+
Returns:
|
|
534
|
+
--------
|
|
535
|
+
numpy.ndarray
|
|
536
|
+
Image with only bright regions preserved
|
|
537
|
+
"""
|
|
538
|
+
# Calculate the percentile threshold
|
|
539
|
+
threshold = np.percentile(image, percentile)
|
|
540
|
+
|
|
541
|
+
if output_type == "binary":
|
|
542
|
+
# Return binary mask
|
|
543
|
+
return np.where(image > threshold, 255, 0).astype(np.uint8)
|
|
544
|
+
else:
|
|
545
|
+
# Return original values above threshold, zero elsewhere
|
|
546
|
+
result = image.copy()
|
|
547
|
+
result[image <= threshold] = 0
|
|
548
|
+
return result
|
|
549
|
+
|
|
550
|
+
@BatchProcessingRegistry.register(
|
|
551
|
+
name="Rolling Ball Background Subtraction",
|
|
552
|
+
suffix="_rollingball",
|
|
553
|
+
description="Remove uneven background using rolling ball algorithm (like ImageJ)",
|
|
554
|
+
parameters={
|
|
555
|
+
"radius": {
|
|
556
|
+
"type": int,
|
|
557
|
+
"default": 50,
|
|
558
|
+
"min": 5,
|
|
559
|
+
"max": 200,
|
|
560
|
+
"description": "Radius of rolling ball (larger = remove broader background)",
|
|
561
|
+
}
|
|
562
|
+
},
|
|
563
|
+
)
|
|
564
|
+
def rolling_ball_background(
|
|
565
|
+
image: np.ndarray, radius: int = 50
|
|
566
|
+
) -> np.ndarray:
|
|
567
|
+
"""
|
|
568
|
+
Remove background using rolling ball algorithm.
|
|
569
|
+
|
|
570
|
+
This algorithm estimates and removes uneven background by simulating
|
|
571
|
+
a ball rolling under the image surface. It's particularly effective
|
|
572
|
+
for fluorescence microscopy images with uneven illumination.
|
|
573
|
+
|
|
574
|
+
Parameters:
|
|
575
|
+
-----------
|
|
576
|
+
image : numpy.ndarray
|
|
577
|
+
Input image array
|
|
578
|
+
radius : int
|
|
579
|
+
Radius of the rolling ball. Should be larger than the largest
|
|
580
|
+
feature you want to keep. Larger values remove broader background
|
|
581
|
+
variations.
|
|
582
|
+
|
|
583
|
+
Returns:
|
|
584
|
+
--------
|
|
585
|
+
numpy.ndarray
|
|
586
|
+
Background-subtracted image with bright features preserved
|
|
587
|
+
"""
|
|
588
|
+
from skimage.restoration import rolling_ball
|
|
589
|
+
|
|
590
|
+
# Estimate background
|
|
591
|
+
background = rolling_ball(image, radius=radius)
|
|
592
|
+
|
|
593
|
+
# Subtract background and clip to valid range
|
|
594
|
+
result = image.astype(np.float32) - background
|
|
595
|
+
result = np.clip(result, 0, None)
|
|
596
|
+
|
|
597
|
+
# Convert back to original dtype range if needed
|
|
598
|
+
if image.dtype == np.uint8:
|
|
599
|
+
result = np.clip(result, 0, 255).astype(np.uint8)
|
|
600
|
+
elif image.dtype == np.uint16:
|
|
601
|
+
result = np.clip(result, 0, 65535).astype(np.uint16)
|
|
602
|
+
|
|
603
|
+
return result
|
|
604
|
+
|
|
605
|
+
@BatchProcessingRegistry.register(
|
|
606
|
+
name="Adaptive Threshold (Bright Bias)",
|
|
607
|
+
suffix="_adaptive_bright",
|
|
608
|
+
description="Adaptive thresholding biased to keep bright regions",
|
|
609
|
+
parameters={
|
|
610
|
+
"block_size": {
|
|
611
|
+
"type": int,
|
|
612
|
+
"default": 35,
|
|
613
|
+
"min": 3,
|
|
614
|
+
"max": 201,
|
|
615
|
+
"description": "Size of local neighborhood (must be odd)",
|
|
616
|
+
},
|
|
617
|
+
"offset": {
|
|
618
|
+
"type": float,
|
|
619
|
+
"default": -10.0,
|
|
620
|
+
"min": -128.0,
|
|
621
|
+
"max": 128.0,
|
|
622
|
+
"description": "Constant subtracted from mean (negative = keep more bright pixels)",
|
|
623
|
+
},
|
|
624
|
+
},
|
|
625
|
+
)
|
|
626
|
+
def adaptive_threshold_bright(
|
|
627
|
+
image: np.ndarray, block_size: int = 35, offset: float = -10.0
|
|
628
|
+
) -> np.ndarray:
|
|
629
|
+
"""
|
|
630
|
+
Apply adaptive thresholding with bias toward bright regions.
|
|
631
|
+
|
|
632
|
+
Unlike global thresholding, adaptive thresholding calculates a threshold
|
|
633
|
+
for each pixel based on its local neighborhood. The negative offset
|
|
634
|
+
biases the threshold to keep more bright pixels.
|
|
635
|
+
|
|
636
|
+
Parameters:
|
|
637
|
+
-----------
|
|
638
|
+
image : numpy.ndarray
|
|
639
|
+
Input image array
|
|
640
|
+
block_size : int
|
|
641
|
+
Size of the local neighborhood for threshold calculation. Must be odd.
|
|
642
|
+
Larger values consider broader neighborhoods.
|
|
643
|
+
offset : float
|
|
644
|
+
Value subtracted from the local mean. Negative values (like -10)
|
|
645
|
+
lower the threshold, keeping more bright pixels.
|
|
646
|
+
|
|
647
|
+
Returns:
|
|
648
|
+
--------
|
|
649
|
+
numpy.ndarray
|
|
650
|
+
Binary image (255 for bright regions, 0 elsewhere)
|
|
651
|
+
"""
|
|
652
|
+
# Ensure block_size is odd
|
|
653
|
+
if block_size % 2 == 0:
|
|
654
|
+
block_size += 1
|
|
655
|
+
|
|
656
|
+
# Convert to uint8 if needed
|
|
657
|
+
if image.dtype != np.uint8:
|
|
658
|
+
image = skimage.img_as_ubyte(image)
|
|
659
|
+
|
|
660
|
+
# Apply adaptive thresholding
|
|
661
|
+
threshold = skimage.filters.threshold_local(
|
|
662
|
+
image, block_size=block_size, offset=offset
|
|
663
|
+
)
|
|
664
|
+
|
|
665
|
+
# Create binary mask
|
|
666
|
+
binary = image > threshold
|
|
667
|
+
|
|
668
|
+
# Return as uint8 (255/0)
|
|
669
|
+
return (binary * 255).astype(np.uint8)
|