napari-tmidas 0.1.8.5__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- napari_tmidas/_label_inspection.py +94 -47
- napari_tmidas/_version.py +2 -2
- napari_tmidas/processing_functions/basic.py +60 -0
- napari_tmidas/processing_functions/sam2_env_manager.py +111 -0
- napari_tmidas/processing_functions/skimage_filters.py +293 -0
- {napari_tmidas-0.1.8.5.dist-info → napari_tmidas-0.2.0.dist-info}/METADATA +18 -45
- {napari_tmidas-0.1.8.5.dist-info → napari_tmidas-0.2.0.dist-info}/RECORD +11 -10
- {napari_tmidas-0.1.8.5.dist-info → napari_tmidas-0.2.0.dist-info}/WHEEL +1 -1
- {napari_tmidas-0.1.8.5.dist-info → napari_tmidas-0.2.0.dist-info}/entry_points.txt +0 -0
- {napari_tmidas-0.1.8.5.dist-info → napari_tmidas-0.2.0.dist-info}/licenses/LICENSE +0 -0
- {napari_tmidas-0.1.8.5.dist-info → napari_tmidas-0.2.0.dist-info}/top_level.txt +0 -0
|
@@ -11,10 +11,11 @@ Users can make and save changes to the labels, and proceed to the next pair.
|
|
|
11
11
|
import os
|
|
12
12
|
import sys
|
|
13
13
|
|
|
14
|
+
import numpy as np
|
|
14
15
|
from magicgui import magicgui
|
|
15
16
|
from napari.layers import Labels
|
|
16
17
|
from napari.viewer import Viewer
|
|
17
|
-
from qtpy.QtWidgets import QFileDialog, QPushButton
|
|
18
|
+
from qtpy.QtWidgets import QFileDialog, QMessageBox, QPushButton
|
|
18
19
|
from skimage.io import imread # , imsave
|
|
19
20
|
|
|
20
21
|
sys.path.append("src/napari_tmidas")
|
|
@@ -29,63 +30,105 @@ class LabelInspector:
|
|
|
29
30
|
def load_image_label_pairs(self, folder_path: str, label_suffix: str):
|
|
30
31
|
"""
|
|
31
32
|
Load image-label pairs from a folder.
|
|
32
|
-
Finds
|
|
33
|
+
Finds all files with the given suffix and matches them with their corresponding image files.
|
|
34
|
+
Validates that label files are in the correct format.
|
|
33
35
|
"""
|
|
36
|
+
if not os.path.exists(folder_path) or not os.path.isdir(folder_path):
|
|
37
|
+
self.viewer.status = f"Folder path does not exist: {folder_path}"
|
|
38
|
+
return
|
|
39
|
+
|
|
34
40
|
files = os.listdir(folder_path)
|
|
35
|
-
label_files = [file for file in files if file.endswith(label_suffix)]
|
|
36
41
|
|
|
37
|
-
#
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
42
|
+
# Find all files that contain the label suffix
|
|
43
|
+
# Using "in" instead of "endswith" for more flexibility
|
|
44
|
+
potential_label_files = [
|
|
45
|
+
file for file in files if label_suffix in file
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
if not potential_label_files:
|
|
49
|
+
self.viewer.status = f"No files found with suffix '{label_suffix}'"
|
|
50
|
+
QMessageBox.warning(
|
|
51
|
+
None,
|
|
52
|
+
"No Label Files Found",
|
|
53
|
+
f"No files containing '{label_suffix}' were found in {folder_path}.",
|
|
54
|
+
)
|
|
55
|
+
return
|
|
41
56
|
|
|
42
|
-
#
|
|
57
|
+
# Process all potential label files
|
|
43
58
|
self.image_label_pairs = []
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
os.path.join(folder_path, lbl),
|
|
58
|
-
)
|
|
59
|
-
)
|
|
60
|
-
continue
|
|
61
|
-
|
|
62
|
-
# If not found, try finding any file that starts with the base name
|
|
59
|
+
skipped_files = []
|
|
60
|
+
format_issues = []
|
|
61
|
+
|
|
62
|
+
for label_file in potential_label_files:
|
|
63
|
+
label_path = os.path.join(folder_path, label_file)
|
|
64
|
+
|
|
65
|
+
# Get file extension
|
|
66
|
+
_, file_extension = os.path.splitext(label_file)
|
|
67
|
+
|
|
68
|
+
# Try to find a matching image file (everything before the label suffix)
|
|
69
|
+
base_name = label_file.split(label_suffix)[0]
|
|
70
|
+
|
|
71
|
+
# Look for potential images matching the base name
|
|
63
72
|
potential_images = [
|
|
64
73
|
file
|
|
65
74
|
for file in files
|
|
66
|
-
if file.startswith(
|
|
75
|
+
if file.startswith(base_name)
|
|
76
|
+
and file != label_file
|
|
67
77
|
and file.endswith(file_extension)
|
|
68
|
-
and file != lbl
|
|
69
78
|
]
|
|
70
79
|
|
|
80
|
+
# If we found at least one potential image
|
|
71
81
|
if potential_images:
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
82
|
+
image_path = os.path.join(folder_path, potential_images[0])
|
|
83
|
+
|
|
84
|
+
# Validate label file format
|
|
85
|
+
try:
|
|
86
|
+
label_data = imread(label_path)
|
|
87
|
+
|
|
88
|
+
# Check if it looks like a label image (integer type)
|
|
89
|
+
if not np.issubdtype(label_data.dtype, np.integer):
|
|
90
|
+
format_issues.append(
|
|
91
|
+
(label_file, "not an integer type")
|
|
92
|
+
)
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
# Add valid pair
|
|
96
|
+
self.image_label_pairs.append((image_path, label_path))
|
|
97
|
+
|
|
98
|
+
except (
|
|
99
|
+
FileNotFoundError,
|
|
100
|
+
OSError,
|
|
101
|
+
ValueError,
|
|
102
|
+
Exception,
|
|
103
|
+
) as e:
|
|
104
|
+
skipped_files.append((label_file, str(e)))
|
|
105
|
+
else:
|
|
106
|
+
skipped_files.append((label_file, "no matching image found"))
|
|
107
|
+
|
|
108
|
+
# Report results
|
|
109
|
+
if self.image_label_pairs:
|
|
110
|
+
self.viewer.status = (
|
|
111
|
+
f"Found {len(self.image_label_pairs)} valid image-label pairs."
|
|
112
|
+
)
|
|
113
|
+
self.current_index = 0
|
|
114
|
+
self._load_current_pair()
|
|
115
|
+
else:
|
|
116
|
+
self.viewer.status = "No valid image-label pairs found."
|
|
117
|
+
|
|
118
|
+
# Show detailed report if there were issues
|
|
119
|
+
if skipped_files or format_issues:
|
|
120
|
+
msg = ""
|
|
121
|
+
if skipped_files:
|
|
122
|
+
msg += "Skipped files:\n"
|
|
123
|
+
for file, reason in skipped_files:
|
|
124
|
+
msg += f"- {file}: {reason}\n"
|
|
125
|
+
|
|
126
|
+
if format_issues:
|
|
127
|
+
msg += "\nFormat issues:\n"
|
|
128
|
+
for file, issue in format_issues:
|
|
129
|
+
msg += f"- {file}: {issue}\n"
|
|
130
|
+
|
|
131
|
+
QMessageBox.information(None, "Loading Report", msg)
|
|
89
132
|
|
|
90
133
|
def _load_current_pair(self):
|
|
91
134
|
"""
|
|
@@ -110,6 +153,10 @@ class LabelInspector:
|
|
|
110
153
|
label_image, name=f"Labels ({os.path.basename(label_path)})"
|
|
111
154
|
)
|
|
112
155
|
|
|
156
|
+
# Show progress
|
|
157
|
+
total = len(self.image_label_pairs)
|
|
158
|
+
self.viewer.status = f"Viewing pair {self.current_index + 1} of {total}: {os.path.basename(image_path)}"
|
|
159
|
+
|
|
113
160
|
def save_current_labels(self):
|
|
114
161
|
"""
|
|
115
162
|
Save the current labels back to the original file.
|
|
@@ -172,7 +219,7 @@ class LabelInspector:
|
|
|
172
219
|
@magicgui(
|
|
173
220
|
call_button="Start Label Inspection",
|
|
174
221
|
folder_path={"label": "Folder Path", "widget_type": "LineEdit"},
|
|
175
|
-
label_suffix={"label": "Label Suffix (e.g.,
|
|
222
|
+
label_suffix={"label": "Label Suffix (e.g., _labels.tif)"},
|
|
176
223
|
)
|
|
177
224
|
def label_inspector(
|
|
178
225
|
folder_path: str,
|
napari_tmidas/_version.py
CHANGED
|
@@ -100,6 +100,66 @@ def max_z_projection(image: np.ndarray) -> np.ndarray:
|
|
|
100
100
|
return (projection * max_val).clip(0, max_val).astype(image.dtype)
|
|
101
101
|
|
|
102
102
|
|
|
103
|
+
@BatchProcessingRegistry.register(
|
|
104
|
+
name="Max Z Projection (TZYX)",
|
|
105
|
+
suffix="_maxZ_tzyx",
|
|
106
|
+
description="Maximum intensity projection along the Z-axis for TZYX data",
|
|
107
|
+
parameters={}, # No parameters needed - fully automatic
|
|
108
|
+
)
|
|
109
|
+
def max_z_projection_tzyx(image: np.ndarray) -> np.ndarray:
|
|
110
|
+
"""
|
|
111
|
+
Memory-efficient maximum intensity projection along the Z-axis for TZYX data.
|
|
112
|
+
|
|
113
|
+
This function intelligently chooses the most memory-efficient approach
|
|
114
|
+
based on the input data size and available system memory.
|
|
115
|
+
|
|
116
|
+
Parameters:
|
|
117
|
+
-----------
|
|
118
|
+
image : numpy.ndarray
|
|
119
|
+
Input 4D image with TZYX dimensions
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
--------
|
|
123
|
+
numpy.ndarray
|
|
124
|
+
3D image with TYX dimensions after max projection
|
|
125
|
+
"""
|
|
126
|
+
# Validate input dimensions
|
|
127
|
+
if image.ndim != 4:
|
|
128
|
+
raise ValueError(f"Expected 4D image (TZYX), got {image.ndim}D image")
|
|
129
|
+
|
|
130
|
+
# Get dimensions
|
|
131
|
+
t_size, z_size, y_size, x_size = image.shape
|
|
132
|
+
|
|
133
|
+
# For Z projection, we only need one Z plane in memory at a time
|
|
134
|
+
# so we can process this plane by plane to minimize memory usage
|
|
135
|
+
|
|
136
|
+
# Create output array with appropriate dimensions and same dtype
|
|
137
|
+
result = np.zeros((t_size, y_size, x_size), dtype=image.dtype)
|
|
138
|
+
|
|
139
|
+
# Process each time point separately to minimize memory usage
|
|
140
|
+
for t in range(t_size):
|
|
141
|
+
# If data type allows direct max, use it
|
|
142
|
+
if np.issubdtype(image.dtype, np.integer) or np.issubdtype(
|
|
143
|
+
image.dtype, np.floating
|
|
144
|
+
):
|
|
145
|
+
# Process Z planes efficiently
|
|
146
|
+
# Start with the first Z plane
|
|
147
|
+
z_max = image[t, 0].copy()
|
|
148
|
+
|
|
149
|
+
# Compare with each subsequent Z plane
|
|
150
|
+
for z in range(1, z_size):
|
|
151
|
+
# Use numpy's maximum function to update max values in-place
|
|
152
|
+
np.maximum(z_max, image[t, z], out=z_max)
|
|
153
|
+
|
|
154
|
+
# Store result for this time point
|
|
155
|
+
result[t] = z_max
|
|
156
|
+
else:
|
|
157
|
+
# For unusual data types, fall back to numpy's max function
|
|
158
|
+
result[t] = np.max(image[t], axis=0)
|
|
159
|
+
|
|
160
|
+
return result
|
|
161
|
+
|
|
162
|
+
|
|
103
163
|
@BatchProcessingRegistry.register(
|
|
104
164
|
name="Split Channels",
|
|
105
165
|
suffix="_split_channels",
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
"""
|
|
2
|
+
processing_functions/sam2_env_manager.py
|
|
3
|
+
|
|
4
|
+
This module manages a dedicated virtual environment for SAM2.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import platform
|
|
9
|
+
import shutil
|
|
10
|
+
import subprocess
|
|
11
|
+
import venv
|
|
12
|
+
|
|
13
|
+
# Define the environment directory in user's home folder
|
|
14
|
+
ENV_DIR = os.path.join(
|
|
15
|
+
os.path.expanduser("~"), ".napari-tmidas", "envs", "sam2-env"
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def is_sam2_installed():
|
|
20
|
+
"""Check if SAM2 is installed in the current environment."""
|
|
21
|
+
try:
|
|
22
|
+
import importlib.util
|
|
23
|
+
|
|
24
|
+
return importlib.util.find_spec("sam2-env") is not None
|
|
25
|
+
except ImportError:
|
|
26
|
+
return False
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def is_env_created():
|
|
30
|
+
"""Check if the dedicated environment exists."""
|
|
31
|
+
env_python = get_env_python_path()
|
|
32
|
+
return os.path.exists(env_python)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def get_env_python_path():
|
|
36
|
+
"""Get the path to the Python executable in the environment."""
|
|
37
|
+
if platform.system() == "Windows":
|
|
38
|
+
return os.path.join(ENV_DIR, "Scripts", "python.exe")
|
|
39
|
+
else:
|
|
40
|
+
return os.path.join(ENV_DIR, "bin", "python")
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def create_sam2_env():
|
|
44
|
+
"""Create a dedicated virtual environment for SAM2."""
|
|
45
|
+
# Ensure the environment directory exists
|
|
46
|
+
os.makedirs(os.path.dirname(ENV_DIR), exist_ok=True)
|
|
47
|
+
|
|
48
|
+
# Remove existing environment if it exists
|
|
49
|
+
if os.path.exists(ENV_DIR):
|
|
50
|
+
shutil.rmtree(ENV_DIR)
|
|
51
|
+
|
|
52
|
+
print(f"Creating SAM2 environment at {ENV_DIR}...")
|
|
53
|
+
|
|
54
|
+
# Create a new virtual environment
|
|
55
|
+
venv.create(ENV_DIR, with_pip=True)
|
|
56
|
+
|
|
57
|
+
# Path to the Python executable in the new environment
|
|
58
|
+
env_python = get_env_python_path()
|
|
59
|
+
|
|
60
|
+
# Upgrade pip
|
|
61
|
+
print("Upgrading pip...")
|
|
62
|
+
subprocess.check_call(
|
|
63
|
+
[env_python, "-m", "pip", "install", "--upgrade", "pip"]
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
# Install numpy and torch first for compatibility
|
|
67
|
+
print("Installing torch and torchvision...")
|
|
68
|
+
subprocess.check_call(
|
|
69
|
+
[env_python, "-m", "pip", "install", "torch", "torchvision"]
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
# Install sam2 from GitHub
|
|
73
|
+
print("Installing SAM2 from GitHub...")
|
|
74
|
+
subprocess.check_call(
|
|
75
|
+
[
|
|
76
|
+
env_python,
|
|
77
|
+
"-m",
|
|
78
|
+
"pip",
|
|
79
|
+
"install",
|
|
80
|
+
"git+https://github.com/facebookresearch/sam2.git",
|
|
81
|
+
]
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
subprocess.run(
|
|
85
|
+
[
|
|
86
|
+
env_python,
|
|
87
|
+
"-c",
|
|
88
|
+
"import torch; import torchvision; print('PyTorch version:', torch.__version__); print('Torchvision version:', torchvision.__version__); print('CUDA is available:', torch.cuda.is_available())",
|
|
89
|
+
]
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
print("SAM2 environment created successfully.")
|
|
93
|
+
return env_python
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def run_sam2_in_env(func_name, args_dict):
|
|
97
|
+
"""
|
|
98
|
+
Run SAM2 in a dedicated environment with minimal complexity.
|
|
99
|
+
|
|
100
|
+
Parameters:
|
|
101
|
+
-----------
|
|
102
|
+
func_name : str
|
|
103
|
+
Name of the SAM2 function to run (currently unused)
|
|
104
|
+
args_dict : dict
|
|
105
|
+
Dictionary of arguments for SAM2
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
--------
|
|
109
|
+
numpy.ndarray
|
|
110
|
+
Segmentation masks
|
|
111
|
+
"""
|
|
@@ -16,6 +16,12 @@ except ImportError:
|
|
|
16
16
|
"scikit-image not available, some processing functions will be disabled"
|
|
17
17
|
)
|
|
18
18
|
|
|
19
|
+
import contextlib
|
|
20
|
+
import os
|
|
21
|
+
|
|
22
|
+
import pandas as pd
|
|
23
|
+
|
|
24
|
+
from napari_tmidas._file_selector import ProcessingWorker
|
|
19
25
|
from napari_tmidas._registry import BatchProcessingRegistry
|
|
20
26
|
|
|
21
27
|
if SKIMAGE_AVAILABLE:
|
|
@@ -114,6 +120,293 @@ if SKIMAGE_AVAILABLE:
|
|
|
114
120
|
image, min_size=min_size
|
|
115
121
|
)
|
|
116
122
|
|
|
123
|
+
@BatchProcessingRegistry.register(
|
|
124
|
+
name="Invert Image",
|
|
125
|
+
suffix="_inverted",
|
|
126
|
+
description="Invert pixel values in the image using scikit-image's invert function",
|
|
127
|
+
)
|
|
128
|
+
def invert_image(image: np.ndarray) -> np.ndarray:
|
|
129
|
+
"""
|
|
130
|
+
Invert the image pixel values.
|
|
131
|
+
|
|
132
|
+
This function inverts the values in an image using scikit-image's invert function,
|
|
133
|
+
which handles different data types appropriately.
|
|
134
|
+
|
|
135
|
+
Parameters:
|
|
136
|
+
-----------
|
|
137
|
+
image : numpy.ndarray
|
|
138
|
+
Input image array
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
--------
|
|
142
|
+
numpy.ndarray
|
|
143
|
+
Inverted image with the same data type as the input
|
|
144
|
+
"""
|
|
145
|
+
# Make a copy to avoid modifying the original
|
|
146
|
+
image_copy = image.copy()
|
|
147
|
+
|
|
148
|
+
# Use skimage's invert function which handles all data types properly
|
|
149
|
+
return skimage.util.invert(image_copy)
|
|
150
|
+
|
|
151
|
+
@BatchProcessingRegistry.register(
|
|
152
|
+
name="Semantic to Instance Segmentation",
|
|
153
|
+
suffix="_instance",
|
|
154
|
+
description="Convert semantic segmentation masks to instance segmentation labels using connected components",
|
|
155
|
+
)
|
|
156
|
+
def semantic_to_instance(image: np.ndarray) -> np.ndarray:
|
|
157
|
+
"""
|
|
158
|
+
Convert semantic segmentation masks to instance segmentation labels.
|
|
159
|
+
|
|
160
|
+
This function takes a binary or multi-class semantic segmentation mask and
|
|
161
|
+
converts it to an instance segmentation by finding connected components.
|
|
162
|
+
Each connected region receives a unique label.
|
|
163
|
+
|
|
164
|
+
Parameters:
|
|
165
|
+
-----------
|
|
166
|
+
image : numpy.ndarray
|
|
167
|
+
Input semantic segmentation mask
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
--------
|
|
171
|
+
numpy.ndarray
|
|
172
|
+
Instance segmentation with unique labels for each connected component
|
|
173
|
+
"""
|
|
174
|
+
# Create a copy to avoid modifying the original
|
|
175
|
+
instance_mask = image.copy()
|
|
176
|
+
|
|
177
|
+
# If the input is multi-class, process each class separately
|
|
178
|
+
if np.max(instance_mask) > 1:
|
|
179
|
+
# Get unique non-zero class values
|
|
180
|
+
class_values = np.unique(instance_mask)
|
|
181
|
+
class_values = class_values[
|
|
182
|
+
class_values > 0
|
|
183
|
+
] # Remove background (0)
|
|
184
|
+
|
|
185
|
+
# Create an empty output mask
|
|
186
|
+
result = np.zeros_like(instance_mask, dtype=np.uint32)
|
|
187
|
+
|
|
188
|
+
# Process each class
|
|
189
|
+
label_offset = 0
|
|
190
|
+
for class_val in class_values:
|
|
191
|
+
# Create binary mask for this class
|
|
192
|
+
binary_mask = (instance_mask == class_val).astype(np.uint8)
|
|
193
|
+
|
|
194
|
+
# Find connected components
|
|
195
|
+
labeled = skimage.measure.label(binary_mask, connectivity=2)
|
|
196
|
+
|
|
197
|
+
# Skip if no components found
|
|
198
|
+
if np.max(labeled) == 0:
|
|
199
|
+
continue
|
|
200
|
+
|
|
201
|
+
# Add offset to avoid label overlap between classes
|
|
202
|
+
labeled[labeled > 0] += label_offset
|
|
203
|
+
|
|
204
|
+
# Add to result
|
|
205
|
+
result = np.maximum(result, labeled)
|
|
206
|
+
|
|
207
|
+
# Update offset for next class
|
|
208
|
+
label_offset = np.max(result)
|
|
209
|
+
else:
|
|
210
|
+
# For binary masks, just find connected components
|
|
211
|
+
result = skimage.measure.label(instance_mask > 0, connectivity=2)
|
|
212
|
+
|
|
213
|
+
return result.astype(np.uint32)
|
|
214
|
+
|
|
215
|
+
@BatchProcessingRegistry.register(
|
|
216
|
+
name="Extract Region Properties",
|
|
217
|
+
suffix="_props", # Changed to indicate this is for CSV output only
|
|
218
|
+
description="Extract properties of labeled regions and save as CSV (no image output)",
|
|
219
|
+
parameters={
|
|
220
|
+
"properties": {
|
|
221
|
+
"type": str,
|
|
222
|
+
"default": "area,bbox,centroid,eccentricity,euler_number,perimeter",
|
|
223
|
+
"description": "Comma-separated list of properties to extract (e.g., area,perimeter,centroid)",
|
|
224
|
+
},
|
|
225
|
+
"intensity_image": {
|
|
226
|
+
"type": bool,
|
|
227
|
+
"default": False,
|
|
228
|
+
"description": "Use input as intensity image for intensity-based measurements",
|
|
229
|
+
},
|
|
230
|
+
"min_area": {
|
|
231
|
+
"type": int,
|
|
232
|
+
"default": 0,
|
|
233
|
+
"min": 0,
|
|
234
|
+
"max": 100000,
|
|
235
|
+
"description": "Minimum area to include in results (pixels)",
|
|
236
|
+
},
|
|
237
|
+
},
|
|
238
|
+
)
|
|
239
|
+
def extract_region_properties(
|
|
240
|
+
image: np.ndarray,
|
|
241
|
+
properties: str = "area,bbox,centroid,eccentricity,euler_number,perimeter",
|
|
242
|
+
intensity_image: bool = False,
|
|
243
|
+
min_area: int = 0,
|
|
244
|
+
) -> np.ndarray:
|
|
245
|
+
"""
|
|
246
|
+
Extract properties of labeled regions in an image and save results as CSV.
|
|
247
|
+
|
|
248
|
+
This function analyzes all labeled regions in a label image and computes
|
|
249
|
+
various region properties like area, perimeter, centroid, etc. The results
|
|
250
|
+
are saved as a CSV file. The input image is returned unchanged.
|
|
251
|
+
|
|
252
|
+
Parameters:
|
|
253
|
+
-----------
|
|
254
|
+
image : numpy.ndarray
|
|
255
|
+
Input label image (instance segmentation)
|
|
256
|
+
properties : str
|
|
257
|
+
Comma-separated list of properties to extract
|
|
258
|
+
See scikit-image documentation for all available properties:
|
|
259
|
+
https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.regionprops
|
|
260
|
+
intensity_image : bool
|
|
261
|
+
Whether to use the input image as intensity image for intensity-based measurements
|
|
262
|
+
min_area : int
|
|
263
|
+
Minimum area (in pixels) for regions to include in results
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
--------
|
|
267
|
+
numpy.ndarray
|
|
268
|
+
The original image (unchanged)
|
|
269
|
+
"""
|
|
270
|
+
# Check if we have a proper label image
|
|
271
|
+
if image.ndim < 2 or np.max(image) == 0:
|
|
272
|
+
print(
|
|
273
|
+
"Input must be a valid label image with at least one labeled region"
|
|
274
|
+
)
|
|
275
|
+
return image
|
|
276
|
+
|
|
277
|
+
# Convert image to proper format for regionprops
|
|
278
|
+
label_image = image.astype(np.int32)
|
|
279
|
+
|
|
280
|
+
# Parse the properties list
|
|
281
|
+
prop_list = [prop.strip() for prop in properties.split(",")]
|
|
282
|
+
|
|
283
|
+
# Get region properties
|
|
284
|
+
if intensity_image:
|
|
285
|
+
# Use the same image as both label and intensity image
|
|
286
|
+
regions = skimage.measure.regionprops(
|
|
287
|
+
label_image, intensity_image=image
|
|
288
|
+
)
|
|
289
|
+
else:
|
|
290
|
+
regions = skimage.measure.regionprops(label_image)
|
|
291
|
+
|
|
292
|
+
# Collect property data
|
|
293
|
+
data = []
|
|
294
|
+
for region in regions:
|
|
295
|
+
# Skip regions that are too small
|
|
296
|
+
if region.area < min_area:
|
|
297
|
+
continue
|
|
298
|
+
|
|
299
|
+
# Get all requested properties
|
|
300
|
+
region_data = {"label": region.label}
|
|
301
|
+
for prop in prop_list:
|
|
302
|
+
try:
|
|
303
|
+
value = getattr(region, prop)
|
|
304
|
+
|
|
305
|
+
# Handle different types of properties
|
|
306
|
+
if isinstance(value, tuple) or (
|
|
307
|
+
isinstance(value, np.ndarray) and value.ndim > 0
|
|
308
|
+
):
|
|
309
|
+
# For tuple/array properties like centroid, bbox, etc.
|
|
310
|
+
if isinstance(value, tuple):
|
|
311
|
+
value = np.array(value)
|
|
312
|
+
|
|
313
|
+
# For each element in the tuple/array
|
|
314
|
+
for i, val in enumerate(value):
|
|
315
|
+
region_data[f"{prop}_{i}"] = val
|
|
316
|
+
else:
|
|
317
|
+
# For scalar properties like area, perimeter, etc.
|
|
318
|
+
region_data[prop] = value
|
|
319
|
+
except AttributeError:
|
|
320
|
+
print(f"Property '{prop}' not found, skipping")
|
|
321
|
+
continue
|
|
322
|
+
|
|
323
|
+
data.append(region_data)
|
|
324
|
+
|
|
325
|
+
# Create a DataFrame
|
|
326
|
+
df = pd.DataFrame(data)
|
|
327
|
+
|
|
328
|
+
# Store the DataFrame as an attribute of the function
|
|
329
|
+
extract_region_properties.csv_data = df
|
|
330
|
+
extract_region_properties.save_csv = True
|
|
331
|
+
extract_region_properties.no_image_output = (
|
|
332
|
+
True # Indicate no image output needed
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
print(f"Extracted properties for {len(data)} regions")
|
|
336
|
+
return image
|
|
337
|
+
|
|
338
|
+
# Monkey patch to handle saving CSV files without creating a new image file
|
|
339
|
+
try:
|
|
340
|
+
# Check if ProcessingWorker is imported and available
|
|
341
|
+
original_process_file = ProcessingWorker.process_file
|
|
342
|
+
|
|
343
|
+
# Create a new version that handles saving CSV
|
|
344
|
+
def process_file_with_csv_export(self, filepath):
|
|
345
|
+
"""Modified process_file function that saves CSV after processing."""
|
|
346
|
+
result = original_process_file(self, filepath)
|
|
347
|
+
|
|
348
|
+
# Check if there's a result and if we should save CSV
|
|
349
|
+
if isinstance(result, dict) and "processed_file" in result:
|
|
350
|
+
output_path = result["processed_file"]
|
|
351
|
+
|
|
352
|
+
# Check if the processing function had CSV data
|
|
353
|
+
if (
|
|
354
|
+
hasattr(self.processing_func, "save_csv")
|
|
355
|
+
and self.processing_func.save_csv
|
|
356
|
+
and hasattr(self.processing_func, "csv_data")
|
|
357
|
+
):
|
|
358
|
+
|
|
359
|
+
# Get the CSV data
|
|
360
|
+
df = self.processing_func.csv_data
|
|
361
|
+
|
|
362
|
+
# For functions that don't need an image output, use the original filepath
|
|
363
|
+
# as the base for the CSV filename
|
|
364
|
+
if (
|
|
365
|
+
hasattr(self.processing_func, "no_image_output")
|
|
366
|
+
and self.processing_func.no_image_output
|
|
367
|
+
):
|
|
368
|
+
# Use the original filepath without creating a new image file
|
|
369
|
+
base_path = os.path.splitext(filepath)[0]
|
|
370
|
+
csv_path = f"{base_path}_regionprops.csv"
|
|
371
|
+
|
|
372
|
+
# Don't save a duplicate image file
|
|
373
|
+
if (
|
|
374
|
+
os.path.exists(output_path)
|
|
375
|
+
and output_path != filepath
|
|
376
|
+
):
|
|
377
|
+
contextlib.suppress(OSError)
|
|
378
|
+
else:
|
|
379
|
+
# Create CSV filename from the output image path
|
|
380
|
+
csv_path = (
|
|
381
|
+
os.path.splitext(output_path)[0]
|
|
382
|
+
+ "_regionprops.csv"
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
# Save the CSV file
|
|
386
|
+
df.to_csv(csv_path, index=False)
|
|
387
|
+
print(f"Saved region properties to {csv_path}")
|
|
388
|
+
|
|
389
|
+
# Add the CSV file to the result
|
|
390
|
+
result["secondary_files"] = [csv_path]
|
|
391
|
+
|
|
392
|
+
# If we don't need an image output, update the result to just point to the CSV
|
|
393
|
+
if (
|
|
394
|
+
hasattr(self.processing_func, "no_image_output")
|
|
395
|
+
and self.processing_func.no_image_output
|
|
396
|
+
):
|
|
397
|
+
result["processed_file"] = csv_path
|
|
398
|
+
|
|
399
|
+
return result
|
|
400
|
+
|
|
401
|
+
# Apply the monkey patch
|
|
402
|
+
ProcessingWorker.process_file = process_file_with_csv_export
|
|
403
|
+
|
|
404
|
+
except (NameError, AttributeError) as e:
|
|
405
|
+
print(f"Warning: Could not apply CSV export patch: {e}")
|
|
406
|
+
print(
|
|
407
|
+
"Region properties will be extracted but CSV files may not be saved"
|
|
408
|
+
)
|
|
409
|
+
|
|
117
410
|
|
|
118
411
|
# binary to labels
|
|
119
412
|
@BatchProcessingRegistry.register(
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: napari-tmidas
|
|
3
|
-
Version: 0.
|
|
4
|
-
Summary: A plugin for batch processing of confocal microscopy images
|
|
3
|
+
Version: 0.2.0
|
|
4
|
+
Summary: A plugin for batch processing of confocal and whole-slide microscopy images of biological tissues
|
|
5
5
|
Author: Marco Meer
|
|
6
6
|
Author-email: marco.meer@pm.me
|
|
7
7
|
License:
|
|
@@ -83,42 +83,14 @@ Dynamic: license-file
|
|
|
83
83
|
[](https://github.com/macromeer/napari-tmidas/actions)
|
|
84
84
|
[](https://napari-hub.org/plugins/napari-tmidas)
|
|
85
85
|
<!-- [](https://codecov.io/gh/macromeer/napari-tmidas) -->
|
|
86
|
-
The `napari-tmidas` plugin consists of a growing collection of pipelines for fast batch processing of microscopy images. This is a WIP and based on the CLI version of [T-MIDAS](https://github.com/MercaderLabAnatomy/T-MIDAS).
|
|
87
|
-
|
|
88
|
-
## Feature Overview
|
|
89
|
-
|
|
90
|
-
1. **Image Processing**
|
|
91
|
-
- Process image folders with:
|
|
92
|
-
- Gamma correction & histogram equalization
|
|
93
|
-
- Z-projection and channel splitting
|
|
94
|
-
- Gaussian & median filters
|
|
95
|
-
- Thresholding (Otsu/manual)
|
|
96
|
-
- Label cleaning & binary conversion
|
|
97
|
-
- RGB to labels conversion
|
|
98
|
-
- Cellpose 3.0 automated segmentation
|
|
99
|
-
- File compression (Zstandard)
|
|
100
|
-
|
|
101
|
-
2. **Label Inspection**
|
|
102
|
-
- Review and edit label images with auto-save
|
|
103
|
-
|
|
104
|
-
3. **Microscopy Image Conversion**
|
|
105
|
-
- Convert .nd2/.lif/.ndpi/.czi/acquifer → .tif/.zarr with metadata preservation
|
|
106
|
-
|
|
107
|
-
4. **Crop Anything**
|
|
108
|
-
- Interactive ROI selection via click interface
|
|
109
|
-
|
|
110
|
-
5. **ROI Colocalization**
|
|
111
|
-
- Count colocalized labels across multiple channels
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
### Coming Soon
|
|
116
|
-
New features arriving April 2025
|
|
86
|
+
The `napari-tmidas` plugin consists of a growing collection of pipelines for fast batch processing of confocal and whole slide microscopy images of biological tissues. This is a WIP and based on the CLI version of [T-MIDAS](https://github.com/MercaderLabAnatomy/T-MIDAS).
|
|
117
87
|
|
|
88
|
+
## Features
|
|
89
|
+
Currently, napari-tmidas provides pipelines as widgets for batch image conversion / cropping / processing, ROI colocalization and label inspection (cf. [Usage](#usage) below).
|
|
118
90
|
|
|
119
91
|
## Installation
|
|
120
92
|
|
|
121
|
-
First install Napari in a virtual environment:
|
|
93
|
+
First, install Napari in a virtual environment:
|
|
122
94
|
|
|
123
95
|
mamba create -y -n napari-tmidas -c conda-forge python=3.11 tqdm
|
|
124
96
|
mamba activate napari-tmidas
|
|
@@ -156,16 +128,15 @@ To use the plugin, start napari in the activated virtual environment with this t
|
|
|
156
128
|
|
|
157
129
|
mamba run -n napari-tmidas napari
|
|
158
130
|
|
|
159
|
-
You can find the installed plugin
|
|
160
|
-
|
|
161
|
-

|
|
162
|
-
|
|
131
|
+
You can then find the installed plugin in the Plugins tab.
|
|
163
132
|
|
|
164
133
|
### Microscopy Image Conversion
|
|
165
134
|
|
|
166
135
|
You can start this pipeline via `Plugins > T-MIDAS > Batch Microscopy Image Conversion`. Currently, this pipeline supports the conversion of `.nd2, .lif, .ndpi, .czi` and acquifer data. After scanning a folder of your choice for microscopy image data, select a file in the first column of the table and preview and export any image data it contains.
|
|
167
136
|
|
|
168
|
-
|
|
137
|
+
|
|
138
|
+
<img src="https://github.com/user-attachments/assets/e377ca71-2f30-447d-825e-d2feebf7061b" alt="Microscopy Image Conversion Widget" style="width:75%; height:auto;">
|
|
139
|
+
|
|
169
140
|
|
|
170
141
|
### Image Processing
|
|
171
142
|
|
|
@@ -173,7 +144,7 @@ You can start this pipeline via `Plugins > T-MIDAS > Batch Microscopy Image Conv
|
|
|
173
144
|
|
|
174
145
|

|
|
175
146
|
|
|
176
|
-
2. As a result, a table appears with the found images.
|
|
147
|
+
2. As a result, a table appears with the found images. You can click on them to inspect them in the viewer.
|
|
177
148
|
|
|
178
149
|

|
|
179
150
|
|
|
@@ -182,26 +153,28 @@ You can start this pipeline via `Plugins > T-MIDAS > Batch Microscopy Image Conv
|
|
|
182
153
|

|
|
183
154
|
|
|
184
155
|
4. You can click on the images in the table to show them in the viewer. For example first click on one of the `Original Files`, and then the corresponding `Processed File` to see an overlay.
|
|
156
|
+
|
|
157
|
+
<img src="https://github.com/user-attachments/assets/cfe84828-c1cc-4196-9a53-5dfb82d5bfce" alt="Image Processing Widget" style="width:75%; height:auto;">
|
|
185
158
|
|
|
186
|
-

|
|
187
159
|
|
|
188
160
|
Note that whenever you click on an `Original File` or `Processed File` in the table, it will replace the one that is currently shown in the viewer. So naturally, you'd first select the original image, and then the processed image to correctly see the image pair that you want to inspect.
|
|
189
161
|
|
|
190
162
|
### Batch Label Inspection
|
|
191
163
|
If you have already segmented a folder full of images and now you want to maybe inspect and edit each label image, you can use the `Plugins > T-MIDAS > Batch Label Inspection`, which automatically saves your changes to the existing label image once you click the `Save Changes and Continue` button (bottom right).
|
|
192
164
|
|
|
193
|
-
|
|
165
|
+
<img src="https://github.com/user-attachments/assets/0bf8c6ae-4212-449d-8183-e91b23ba740e" alt="Batch Label Inspection Widget" style="width:75%; height:auto;">
|
|
166
|
+
|
|
194
167
|
|
|
195
168
|
### Crop Anything
|
|
196
169
|
This pipeline combines the Segment Anything Model (SAM) for automatic object detection with an interactive interface for selecting and cropping multiple objects from images. To launch the widget, open `Plugins > T-MIDAS > Batch Crop Anything`. Click the image below to see a video demo.
|
|
197
170
|
|
|
198
|
-
|
|
171
|
+
<img src="https://github.com/user-attachments/assets/6d72c2a2-1064-4a27-b398-a9b86fcbc443" alt="Crop Anything Widget" style="width:75%; height:auto;">
|
|
172
|
+
|
|
199
173
|
|
|
200
174
|
### ROI Colocalization
|
|
201
175
|
This pipeline quantifies colocalization between labeled regions of interest (ROIs) across multiple image channels. It determines the extent of overlap between ROIs in a reference channel and those in one or two other channels. The output is a table of colocalization counts. Optionally, the size of reference channel ROIs, as well as the total or median size of colocalizing ROIs in the other channels, can be included. Colocalization is determined using Boolean masking. The number of colocalizing instances is determined by counting unique label IDs within the overlapping regions. Typically, the reference channel contains larger structures, while other channels contain smaller, potentially nested, structures. For example, the reference channel might contain cell bodies, with the second and third channels containing nuclei and sub-nuclear objects, respectively.
|
|
202
176
|
|
|
203
|
-
|
|
204
|
-
|
|
177
|
+
<img src="https://github.com/user-attachments/assets/2f9022a0-7b88-4588-a448-250f07a634d7" alt="ROI Colocalization Widget" style="width:75%; height:auto;">
|
|
205
178
|
|
|
206
179
|
## Contributing
|
|
207
180
|
|
|
@@ -2,12 +2,12 @@ napari_tmidas/__init__.py,sha256=YNBLESwk8jr_TlDdkSC1CwH0tf0CKHF1i2_efzLjdpk,589
|
|
|
2
2
|
napari_tmidas/_crop_anything.py,sha256=NItpE6uzfeKujh8a53TDDkFN2thpKC5NGiXMpAmSnnM,45446
|
|
3
3
|
napari_tmidas/_file_conversion.py,sha256=V6evJmggUwOFzJO203Y5ltboHXEWNJQckZPedGRkrLI,72203
|
|
4
4
|
napari_tmidas/_file_selector.py,sha256=sZOY0QNwyAgugsEzG5pqZLfrVeCHwjPEW2C_BHndzyI,39595
|
|
5
|
-
napari_tmidas/_label_inspection.py,sha256=
|
|
5
|
+
napari_tmidas/_label_inspection.py,sha256=74V36y5EnGs0vWK1FC7Kui4CPLBW_SIg885PSKeZsJ4,9184
|
|
6
6
|
napari_tmidas/_reader.py,sha256=A9_hdDxtVkVGmbOsbqgnARCSvpEh7GGPo7ylzmbnu8o,2485
|
|
7
7
|
napari_tmidas/_registry.py,sha256=Oz9HFJh41MKRLeKxRuc7x7yzc-OrmoTdRFnfngFU_XE,2007
|
|
8
8
|
napari_tmidas/_roi_colocalization.py,sha256=OVjdHvtFN07DgrtTX8uqbrxZL6jVwl2L3klorgW2C9k,43196
|
|
9
9
|
napari_tmidas/_sample_data.py,sha256=khuv1jemz_fCjqNwEKMFf83Ju0EN4S89IKydsUMmUxw,645
|
|
10
|
-
napari_tmidas/_version.py,sha256=
|
|
10
|
+
napari_tmidas/_version.py,sha256=iB5DfB5V6YB5Wo4JmvS-txT42QtmGaWcWp3udRT7zCI,511
|
|
11
11
|
napari_tmidas/_widget.py,sha256=u9uf9WILAwZg_InhFyjWInY4ej1TV1a59dR8Fe3vNF8,4794
|
|
12
12
|
napari_tmidas/_writer.py,sha256=wbVfHFjjHdybSg37VR4lVmL-kdCkDZsUPDJ66AVLaFQ,1941
|
|
13
13
|
napari_tmidas/napari.yaml,sha256=1Am1dA0-ZtCXk6veIT6jrMz3zwQ7dF8_p9tZTFx_vTg,2641
|
|
@@ -17,16 +17,17 @@ napari_tmidas/_tests/test_sample_data.py,sha256=D1HU_C3hWpO3mlSW_7Z94xaYHDtxz0XU
|
|
|
17
17
|
napari_tmidas/_tests/test_widget.py,sha256=I_d-Cra_CTcS0QdMItg_HMphvhj0XCx81JnFyCHk9lg,2204
|
|
18
18
|
napari_tmidas/_tests/test_writer.py,sha256=4_MlZM9a5So74J16_4tIOJc6pwTOw9R0-oAE_YioIx4,122
|
|
19
19
|
napari_tmidas/processing_functions/__init__.py,sha256=osXY9jSgDsrwFaS6ShPHP0wGRxMuX1mHRN9EDa9l41g,1891
|
|
20
|
-
napari_tmidas/processing_functions/basic.py,sha256=
|
|
20
|
+
napari_tmidas/processing_functions/basic.py,sha256=kQcuA0_yPh6MwzkopcsBghcf3wMSR7uA1QDksS5SG2o,8761
|
|
21
21
|
napari_tmidas/processing_functions/cellpose_env_manager.py,sha256=zngS5eborsJUimFn_g1Lm_YOZk2ZNIKxceWNxOjpxEg,4885
|
|
22
22
|
napari_tmidas/processing_functions/cellpose_segmentation.py,sha256=7BCHj_QA1QJEl1NrsuoIFAJ040_SCWw0U-U_xjCyk18,16187
|
|
23
23
|
napari_tmidas/processing_functions/colocalization.py,sha256=AiTTVAcVhKuuHZhrj5IHwbzns7-GE6ewvFqhYy1L-do,7657
|
|
24
24
|
napari_tmidas/processing_functions/file_compression.py,sha256=mxR-yqBdc-T1XI3StIXpW8h5xGdCOtLQjt8uoRFpDSY,6859
|
|
25
|
+
napari_tmidas/processing_functions/sam2_env_manager.py,sha256=WzKOLFeu1KZRRBryKdWkDm6QJolhs3rCj-KD6Q-z9dE,2897
|
|
25
26
|
napari_tmidas/processing_functions/scipy_filters.py,sha256=kKpDAlQQ0ZNbkt77QUWi-Bwolk6MMDvtG_bZJV3MjOo,1612
|
|
26
|
-
napari_tmidas/processing_functions/skimage_filters.py,sha256=
|
|
27
|
-
napari_tmidas-0.
|
|
28
|
-
napari_tmidas-0.
|
|
29
|
-
napari_tmidas-0.
|
|
30
|
-
napari_tmidas-0.
|
|
31
|
-
napari_tmidas-0.
|
|
32
|
-
napari_tmidas-0.
|
|
27
|
+
napari_tmidas/processing_functions/skimage_filters.py,sha256=8UiXp5Wi7V-5prPZO-NgfkVi_kEYs7RUyINiCMxqTl0,15306
|
|
28
|
+
napari_tmidas-0.2.0.dist-info/licenses/LICENSE,sha256=tSjiOqj57exmEIfP2YVPCEeQf0cH49S6HheQR8IiY3g,1485
|
|
29
|
+
napari_tmidas-0.2.0.dist-info/METADATA,sha256=yUMPnhgtxQ3nN_uHvazNs8k7iEc9zUZxhTxCuF2Q9Jg,11560
|
|
30
|
+
napari_tmidas-0.2.0.dist-info/WHEEL,sha256=0CuiUZ_p9E4cD6NyLD6UG80LBXYyiSYZOKDm5lp32xk,91
|
|
31
|
+
napari_tmidas-0.2.0.dist-info/entry_points.txt,sha256=fbVjzbJTm4aDMIBtel1Lyqvq-CwXY7wmCOo_zJ-jtRY,60
|
|
32
|
+
napari_tmidas-0.2.0.dist-info/top_level.txt,sha256=63ybdxCZ4SeT13f_Ou4TsivGV_2Gtm_pJOXToAt30_E,14
|
|
33
|
+
napari_tmidas-0.2.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|