napari-tmidas 0.1.9__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- napari_tmidas/_crop_anything.py +1895 -608
- napari_tmidas/_file_selector.py +87 -6
- napari_tmidas/_label_inspection.py +94 -47
- napari_tmidas/_version.py +2 -2
- napari_tmidas/processing_functions/basic.py +554 -23
- napari_tmidas/processing_functions/careamics_denoising.py +324 -0
- napari_tmidas/processing_functions/careamics_env_manager.py +339 -0
- napari_tmidas/processing_functions/cellpose_env_manager.py +55 -20
- napari_tmidas/processing_functions/cellpose_segmentation.py +105 -218
- napari_tmidas/processing_functions/sam2_env_manager.py +111 -0
- napari_tmidas/processing_functions/sam2_mp4.py +283 -0
- napari_tmidas/processing_functions/skimage_filters.py +323 -0
- napari_tmidas/processing_functions/timepoint_merger.py +490 -0
- napari_tmidas/processing_functions/trackastra_tracking.py +303 -0
- {napari_tmidas-0.1.9.dist-info → napari_tmidas-0.2.1.dist-info}/METADATA +15 -8
- {napari_tmidas-0.1.9.dist-info → napari_tmidas-0.2.1.dist-info}/RECORD +20 -14
- {napari_tmidas-0.1.9.dist-info → napari_tmidas-0.2.1.dist-info}/WHEEL +1 -1
- {napari_tmidas-0.1.9.dist-info → napari_tmidas-0.2.1.dist-info}/entry_points.txt +0 -0
- {napari_tmidas-0.1.9.dist-info → napari_tmidas-0.2.1.dist-info}/licenses/LICENSE +0 -0
- {napari_tmidas-0.1.9.dist-info → napari_tmidas-0.2.1.dist-info}/top_level.txt +0 -0
|
@@ -16,6 +16,12 @@ except ImportError:
|
|
|
16
16
|
"scikit-image not available, some processing functions will be disabled"
|
|
17
17
|
)
|
|
18
18
|
|
|
19
|
+
import contextlib
|
|
20
|
+
import os
|
|
21
|
+
|
|
22
|
+
import pandas as pd
|
|
23
|
+
|
|
24
|
+
from napari_tmidas._file_selector import ProcessingWorker
|
|
19
25
|
from napari_tmidas._registry import BatchProcessingRegistry
|
|
20
26
|
|
|
21
27
|
if SKIMAGE_AVAILABLE:
|
|
@@ -114,6 +120,293 @@ if SKIMAGE_AVAILABLE:
|
|
|
114
120
|
image, min_size=min_size
|
|
115
121
|
)
|
|
116
122
|
|
|
123
|
+
@BatchProcessingRegistry.register(
|
|
124
|
+
name="Invert Image",
|
|
125
|
+
suffix="_inverted",
|
|
126
|
+
description="Invert pixel values in the image using scikit-image's invert function",
|
|
127
|
+
)
|
|
128
|
+
def invert_image(image: np.ndarray) -> np.ndarray:
|
|
129
|
+
"""
|
|
130
|
+
Invert the image pixel values.
|
|
131
|
+
|
|
132
|
+
This function inverts the values in an image using scikit-image's invert function,
|
|
133
|
+
which handles different data types appropriately.
|
|
134
|
+
|
|
135
|
+
Parameters:
|
|
136
|
+
-----------
|
|
137
|
+
image : numpy.ndarray
|
|
138
|
+
Input image array
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
--------
|
|
142
|
+
numpy.ndarray
|
|
143
|
+
Inverted image with the same data type as the input
|
|
144
|
+
"""
|
|
145
|
+
# Make a copy to avoid modifying the original
|
|
146
|
+
image_copy = image.copy()
|
|
147
|
+
|
|
148
|
+
# Use skimage's invert function which handles all data types properly
|
|
149
|
+
return skimage.util.invert(image_copy)
|
|
150
|
+
|
|
151
|
+
@BatchProcessingRegistry.register(
|
|
152
|
+
name="Semantic to Instance Segmentation",
|
|
153
|
+
suffix="_instance",
|
|
154
|
+
description="Convert semantic segmentation masks to instance segmentation labels using connected components",
|
|
155
|
+
)
|
|
156
|
+
def semantic_to_instance(image: np.ndarray) -> np.ndarray:
|
|
157
|
+
"""
|
|
158
|
+
Convert semantic segmentation masks to instance segmentation labels.
|
|
159
|
+
|
|
160
|
+
This function takes a binary or multi-class semantic segmentation mask and
|
|
161
|
+
converts it to an instance segmentation by finding connected components.
|
|
162
|
+
Each connected region receives a unique label.
|
|
163
|
+
|
|
164
|
+
Parameters:
|
|
165
|
+
-----------
|
|
166
|
+
image : numpy.ndarray
|
|
167
|
+
Input semantic segmentation mask
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
--------
|
|
171
|
+
numpy.ndarray
|
|
172
|
+
Instance segmentation with unique labels for each connected component
|
|
173
|
+
"""
|
|
174
|
+
# Create a copy to avoid modifying the original
|
|
175
|
+
instance_mask = image.copy()
|
|
176
|
+
|
|
177
|
+
# If the input is multi-class, process each class separately
|
|
178
|
+
if np.max(instance_mask) > 1:
|
|
179
|
+
# Get unique non-zero class values
|
|
180
|
+
class_values = np.unique(instance_mask)
|
|
181
|
+
class_values = class_values[
|
|
182
|
+
class_values > 0
|
|
183
|
+
] # Remove background (0)
|
|
184
|
+
|
|
185
|
+
# Create an empty output mask
|
|
186
|
+
result = np.zeros_like(instance_mask, dtype=np.uint32)
|
|
187
|
+
|
|
188
|
+
# Process each class
|
|
189
|
+
label_offset = 0
|
|
190
|
+
for class_val in class_values:
|
|
191
|
+
# Create binary mask for this class
|
|
192
|
+
binary_mask = (instance_mask == class_val).astype(np.uint8)
|
|
193
|
+
|
|
194
|
+
# Find connected components
|
|
195
|
+
labeled = skimage.measure.label(binary_mask, connectivity=2)
|
|
196
|
+
|
|
197
|
+
# Skip if no components found
|
|
198
|
+
if np.max(labeled) == 0:
|
|
199
|
+
continue
|
|
200
|
+
|
|
201
|
+
# Add offset to avoid label overlap between classes
|
|
202
|
+
labeled[labeled > 0] += label_offset
|
|
203
|
+
|
|
204
|
+
# Add to result
|
|
205
|
+
result = np.maximum(result, labeled)
|
|
206
|
+
|
|
207
|
+
# Update offset for next class
|
|
208
|
+
label_offset = np.max(result)
|
|
209
|
+
else:
|
|
210
|
+
# For binary masks, just find connected components
|
|
211
|
+
result = skimage.measure.label(instance_mask > 0, connectivity=2)
|
|
212
|
+
|
|
213
|
+
return result.astype(np.uint32)
|
|
214
|
+
|
|
215
|
+
@BatchProcessingRegistry.register(
|
|
216
|
+
name="Extract Region Properties",
|
|
217
|
+
suffix="_props", # Changed to indicate this is for CSV output only
|
|
218
|
+
description="Extract properties of labeled regions and save as CSV (no image output)",
|
|
219
|
+
parameters={
|
|
220
|
+
"properties": {
|
|
221
|
+
"type": str,
|
|
222
|
+
"default": "area,bbox,centroid,eccentricity,euler_number,perimeter",
|
|
223
|
+
"description": "Comma-separated list of properties to extract (e.g., area,perimeter,centroid)",
|
|
224
|
+
},
|
|
225
|
+
"intensity_image": {
|
|
226
|
+
"type": bool,
|
|
227
|
+
"default": False,
|
|
228
|
+
"description": "Use input as intensity image for intensity-based measurements",
|
|
229
|
+
},
|
|
230
|
+
"min_area": {
|
|
231
|
+
"type": int,
|
|
232
|
+
"default": 0,
|
|
233
|
+
"min": 0,
|
|
234
|
+
"max": 100000,
|
|
235
|
+
"description": "Minimum area to include in results (pixels)",
|
|
236
|
+
},
|
|
237
|
+
},
|
|
238
|
+
)
|
|
239
|
+
def extract_region_properties(
|
|
240
|
+
image: np.ndarray,
|
|
241
|
+
properties: str = "area,bbox,centroid,eccentricity,euler_number,perimeter",
|
|
242
|
+
intensity_image: bool = False,
|
|
243
|
+
min_area: int = 0,
|
|
244
|
+
) -> np.ndarray:
|
|
245
|
+
"""
|
|
246
|
+
Extract properties of labeled regions in an image and save results as CSV.
|
|
247
|
+
|
|
248
|
+
This function analyzes all labeled regions in a label image and computes
|
|
249
|
+
various region properties like area, perimeter, centroid, etc. The results
|
|
250
|
+
are saved as a CSV file. The input image is returned unchanged.
|
|
251
|
+
|
|
252
|
+
Parameters:
|
|
253
|
+
-----------
|
|
254
|
+
image : numpy.ndarray
|
|
255
|
+
Input label image (instance segmentation)
|
|
256
|
+
properties : str
|
|
257
|
+
Comma-separated list of properties to extract
|
|
258
|
+
See scikit-image documentation for all available properties:
|
|
259
|
+
https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.regionprops
|
|
260
|
+
intensity_image : bool
|
|
261
|
+
Whether to use the input image as intensity image for intensity-based measurements
|
|
262
|
+
min_area : int
|
|
263
|
+
Minimum area (in pixels) for regions to include in results
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
--------
|
|
267
|
+
numpy.ndarray
|
|
268
|
+
The original image (unchanged)
|
|
269
|
+
"""
|
|
270
|
+
# Check if we have a proper label image
|
|
271
|
+
if image.ndim < 2 or np.max(image) == 0:
|
|
272
|
+
print(
|
|
273
|
+
"Input must be a valid label image with at least one labeled region"
|
|
274
|
+
)
|
|
275
|
+
return image
|
|
276
|
+
|
|
277
|
+
# Convert image to proper format for regionprops
|
|
278
|
+
label_image = image.astype(np.int32)
|
|
279
|
+
|
|
280
|
+
# Parse the properties list
|
|
281
|
+
prop_list = [prop.strip() for prop in properties.split(",")]
|
|
282
|
+
|
|
283
|
+
# Get region properties
|
|
284
|
+
if intensity_image:
|
|
285
|
+
# Use the same image as both label and intensity image # this is wrong
|
|
286
|
+
regions = skimage.measure.regionprops(
|
|
287
|
+
label_image, intensity_image=image
|
|
288
|
+
)
|
|
289
|
+
else:
|
|
290
|
+
regions = skimage.measure.regionprops(label_image)
|
|
291
|
+
|
|
292
|
+
# Collect property data
|
|
293
|
+
data = []
|
|
294
|
+
for region in regions:
|
|
295
|
+
# Skip regions that are too small
|
|
296
|
+
if region.area < min_area:
|
|
297
|
+
continue
|
|
298
|
+
|
|
299
|
+
# Get all requested properties
|
|
300
|
+
region_data = {"label": region.label}
|
|
301
|
+
for prop in prop_list:
|
|
302
|
+
try:
|
|
303
|
+
value = getattr(region, prop)
|
|
304
|
+
|
|
305
|
+
# Handle different types of properties
|
|
306
|
+
if isinstance(value, tuple) or (
|
|
307
|
+
isinstance(value, np.ndarray) and value.ndim > 0
|
|
308
|
+
):
|
|
309
|
+
# For tuple/array properties like centroid, bbox, etc.
|
|
310
|
+
if isinstance(value, tuple):
|
|
311
|
+
value = np.array(value)
|
|
312
|
+
|
|
313
|
+
# For each element in the tuple/array
|
|
314
|
+
for i, val in enumerate(value):
|
|
315
|
+
region_data[f"{prop}_{i}"] = val
|
|
316
|
+
else:
|
|
317
|
+
# For scalar properties like area, perimeter, etc.
|
|
318
|
+
region_data[prop] = value
|
|
319
|
+
except AttributeError:
|
|
320
|
+
print(f"Property '{prop}' not found, skipping")
|
|
321
|
+
continue
|
|
322
|
+
|
|
323
|
+
data.append(region_data)
|
|
324
|
+
|
|
325
|
+
# Create a DataFrame
|
|
326
|
+
df = pd.DataFrame(data)
|
|
327
|
+
|
|
328
|
+
# Store the DataFrame as an attribute of the function
|
|
329
|
+
extract_region_properties.csv_data = df
|
|
330
|
+
extract_region_properties.save_csv = True
|
|
331
|
+
extract_region_properties.no_image_output = (
|
|
332
|
+
True # Indicate no image output needed
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
print(f"Extracted properties for {len(data)} regions")
|
|
336
|
+
return image
|
|
337
|
+
|
|
338
|
+
# Monkey patch to handle saving CSV files without creating a new image file
|
|
339
|
+
try:
|
|
340
|
+
# Check if ProcessingWorker is imported and available
|
|
341
|
+
original_process_file = ProcessingWorker.process_file
|
|
342
|
+
|
|
343
|
+
# Create a new version that handles saving CSV
|
|
344
|
+
def process_file_with_csv_export(self, filepath):
|
|
345
|
+
"""Modified process_file function that saves CSV after processing."""
|
|
346
|
+
result = original_process_file(self, filepath)
|
|
347
|
+
|
|
348
|
+
# Check if there's a result and if we should save CSV
|
|
349
|
+
if isinstance(result, dict) and "processed_file" in result:
|
|
350
|
+
output_path = result["processed_file"]
|
|
351
|
+
|
|
352
|
+
# Check if the processing function had CSV data
|
|
353
|
+
if (
|
|
354
|
+
hasattr(self.processing_func, "save_csv")
|
|
355
|
+
and self.processing_func.save_csv
|
|
356
|
+
and hasattr(self.processing_func, "csv_data")
|
|
357
|
+
):
|
|
358
|
+
|
|
359
|
+
# Get the CSV data
|
|
360
|
+
df = self.processing_func.csv_data
|
|
361
|
+
|
|
362
|
+
# For functions that don't need an image output, use the original filepath
|
|
363
|
+
# as the base for the CSV filename
|
|
364
|
+
if (
|
|
365
|
+
hasattr(self.processing_func, "no_image_output")
|
|
366
|
+
and self.processing_func.no_image_output
|
|
367
|
+
):
|
|
368
|
+
# Use the original filepath without creating a new image file
|
|
369
|
+
base_path = os.path.splitext(filepath)[0]
|
|
370
|
+
csv_path = f"{base_path}_regionprops.csv"
|
|
371
|
+
|
|
372
|
+
# Don't save a duplicate image file
|
|
373
|
+
if (
|
|
374
|
+
os.path.exists(output_path)
|
|
375
|
+
and output_path != filepath
|
|
376
|
+
):
|
|
377
|
+
contextlib.suppress(OSError)
|
|
378
|
+
else:
|
|
379
|
+
# Create CSV filename from the output image path
|
|
380
|
+
csv_path = (
|
|
381
|
+
os.path.splitext(output_path)[0]
|
|
382
|
+
+ "_regionprops.csv"
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
# Save the CSV file
|
|
386
|
+
df.to_csv(csv_path, index=False)
|
|
387
|
+
print(f"Saved region properties to {csv_path}")
|
|
388
|
+
|
|
389
|
+
# Add the CSV file to the result
|
|
390
|
+
result["secondary_files"] = [csv_path]
|
|
391
|
+
|
|
392
|
+
# If we don't need an image output, update the result to just point to the CSV
|
|
393
|
+
if (
|
|
394
|
+
hasattr(self.processing_func, "no_image_output")
|
|
395
|
+
and self.processing_func.no_image_output
|
|
396
|
+
):
|
|
397
|
+
result["processed_file"] = csv_path
|
|
398
|
+
|
|
399
|
+
return result
|
|
400
|
+
|
|
401
|
+
# Apply the monkey patch
|
|
402
|
+
ProcessingWorker.process_file = process_file_with_csv_export
|
|
403
|
+
|
|
404
|
+
except (NameError, AttributeError) as e:
|
|
405
|
+
print(f"Warning: Could not apply CSV export patch: {e}")
|
|
406
|
+
print(
|
|
407
|
+
"Region properties will be extracted but CSV files may not be saved"
|
|
408
|
+
)
|
|
409
|
+
|
|
117
410
|
|
|
118
411
|
# binary to labels
|
|
119
412
|
@BatchProcessingRegistry.register(
|
|
@@ -132,3 +425,33 @@ def binary_to_labels(image: np.ndarray) -> np.ndarray:
|
|
|
132
425
|
label_image = skimage.measure.label(label_image, connectivity=2)
|
|
133
426
|
|
|
134
427
|
return label_image
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
@BatchProcessingRegistry.register(
|
|
431
|
+
name="Convert to 8-bit (uint8)",
|
|
432
|
+
suffix="_uint8",
|
|
433
|
+
description="Convert image data to 8-bit (uint8) format with proper scaling",
|
|
434
|
+
)
|
|
435
|
+
def convert_to_uint8(image: np.ndarray) -> np.ndarray:
|
|
436
|
+
"""
|
|
437
|
+
Convert image data to 8-bit (uint8) format with proper scaling.
|
|
438
|
+
|
|
439
|
+
This function handles any input image dimensions (including TZYX) and properly
|
|
440
|
+
rescales data to the 0-1 range before conversion to uint8. Ideal for scientific
|
|
441
|
+
imaging data with arbitrary value ranges.
|
|
442
|
+
|
|
443
|
+
Parameters:
|
|
444
|
+
-----------
|
|
445
|
+
image : numpy.ndarray
|
|
446
|
+
Input image array of any numerical dtype
|
|
447
|
+
|
|
448
|
+
Returns:
|
|
449
|
+
--------
|
|
450
|
+
numpy.ndarray
|
|
451
|
+
8-bit image with shape preserved and values properly scaled
|
|
452
|
+
"""
|
|
453
|
+
# Rescale to 0-1 range (works for any input range, negative or positive)
|
|
454
|
+
img_rescaled = skimage.exposure.rescale_intensity(image, out_range=(0, 1))
|
|
455
|
+
|
|
456
|
+
# Convert the rescaled image to uint8
|
|
457
|
+
return skimage.img_as_ubyte(img_rescaled)
|