napari-tmidas 0.1.9__tar.gz → 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/PKG-INFO +1 -1
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_label_inspection.py +94 -47
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_version.py +2 -2
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/processing_functions/basic.py +60 -0
- napari_tmidas-0.2.0/src/napari_tmidas/processing_functions/sam2_env_manager.py +111 -0
- napari_tmidas-0.2.0/src/napari_tmidas/processing_functions/skimage_filters.py +427 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas.egg-info/PKG-INFO +1 -1
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas.egg-info/SOURCES.txt +1 -0
- napari_tmidas-0.1.9/src/napari_tmidas/processing_functions/skimage_filters.py +0 -134
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/.github/dependabot.yml +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/.github/workflows/test_and_deploy.yml +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/.gitignore +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/.napari-hub/DESCRIPTION.md +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/.napari-hub/config.yml +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/.pre-commit-config.yaml +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/LICENSE +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/MANIFEST.in +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/README.md +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/pyproject.toml +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/setup.cfg +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/__init__.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_crop_anything.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_file_conversion.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_file_selector.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_reader.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_registry.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_roi_colocalization.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_sample_data.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_tests/__init__.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_tests/test_reader.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_tests/test_sample_data.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_tests/test_widget.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_tests/test_writer.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_widget.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/_writer.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/napari.yaml +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/processing_functions/__init__.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/processing_functions/cellpose_env_manager.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/processing_functions/cellpose_segmentation.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/processing_functions/colocalization.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/processing_functions/file_compression.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/processing_functions/scipy_filters.py +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas.egg-info/dependency_links.txt +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas.egg-info/entry_points.txt +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas.egg-info/requires.txt +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas.egg-info/top_level.txt +0 -0
- {napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/tox.ini +0 -0
|
@@ -11,10 +11,11 @@ Users can make and save changes to the labels, and proceed to the next pair.
|
|
|
11
11
|
import os
|
|
12
12
|
import sys
|
|
13
13
|
|
|
14
|
+
import numpy as np
|
|
14
15
|
from magicgui import magicgui
|
|
15
16
|
from napari.layers import Labels
|
|
16
17
|
from napari.viewer import Viewer
|
|
17
|
-
from qtpy.QtWidgets import QFileDialog, QPushButton
|
|
18
|
+
from qtpy.QtWidgets import QFileDialog, QMessageBox, QPushButton
|
|
18
19
|
from skimage.io import imread # , imsave
|
|
19
20
|
|
|
20
21
|
sys.path.append("src/napari_tmidas")
|
|
@@ -29,63 +30,105 @@ class LabelInspector:
|
|
|
29
30
|
def load_image_label_pairs(self, folder_path: str, label_suffix: str):
|
|
30
31
|
"""
|
|
31
32
|
Load image-label pairs from a folder.
|
|
32
|
-
Finds
|
|
33
|
+
Finds all files with the given suffix and matches them with their corresponding image files.
|
|
34
|
+
Validates that label files are in the correct format.
|
|
33
35
|
"""
|
|
36
|
+
if not os.path.exists(folder_path) or not os.path.isdir(folder_path):
|
|
37
|
+
self.viewer.status = f"Folder path does not exist: {folder_path}"
|
|
38
|
+
return
|
|
39
|
+
|
|
34
40
|
files = os.listdir(folder_path)
|
|
35
|
-
label_files = [file for file in files if file.endswith(label_suffix)]
|
|
36
41
|
|
|
37
|
-
#
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
42
|
+
# Find all files that contain the label suffix
|
|
43
|
+
# Using "in" instead of "endswith" for more flexibility
|
|
44
|
+
potential_label_files = [
|
|
45
|
+
file for file in files if label_suffix in file
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
if not potential_label_files:
|
|
49
|
+
self.viewer.status = f"No files found with suffix '{label_suffix}'"
|
|
50
|
+
QMessageBox.warning(
|
|
51
|
+
None,
|
|
52
|
+
"No Label Files Found",
|
|
53
|
+
f"No files containing '{label_suffix}' were found in {folder_path}.",
|
|
54
|
+
)
|
|
55
|
+
return
|
|
41
56
|
|
|
42
|
-
#
|
|
57
|
+
# Process all potential label files
|
|
43
58
|
self.image_label_pairs = []
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
os.path.join(folder_path, lbl),
|
|
58
|
-
)
|
|
59
|
-
)
|
|
60
|
-
continue
|
|
61
|
-
|
|
62
|
-
# If not found, try finding any file that starts with the base name
|
|
59
|
+
skipped_files = []
|
|
60
|
+
format_issues = []
|
|
61
|
+
|
|
62
|
+
for label_file in potential_label_files:
|
|
63
|
+
label_path = os.path.join(folder_path, label_file)
|
|
64
|
+
|
|
65
|
+
# Get file extension
|
|
66
|
+
_, file_extension = os.path.splitext(label_file)
|
|
67
|
+
|
|
68
|
+
# Try to find a matching image file (everything before the label suffix)
|
|
69
|
+
base_name = label_file.split(label_suffix)[0]
|
|
70
|
+
|
|
71
|
+
# Look for potential images matching the base name
|
|
63
72
|
potential_images = [
|
|
64
73
|
file
|
|
65
74
|
for file in files
|
|
66
|
-
if file.startswith(
|
|
75
|
+
if file.startswith(base_name)
|
|
76
|
+
and file != label_file
|
|
67
77
|
and file.endswith(file_extension)
|
|
68
|
-
and file != lbl
|
|
69
78
|
]
|
|
70
79
|
|
|
80
|
+
# If we found at least one potential image
|
|
71
81
|
if potential_images:
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
82
|
+
image_path = os.path.join(folder_path, potential_images[0])
|
|
83
|
+
|
|
84
|
+
# Validate label file format
|
|
85
|
+
try:
|
|
86
|
+
label_data = imread(label_path)
|
|
87
|
+
|
|
88
|
+
# Check if it looks like a label image (integer type)
|
|
89
|
+
if not np.issubdtype(label_data.dtype, np.integer):
|
|
90
|
+
format_issues.append(
|
|
91
|
+
(label_file, "not an integer type")
|
|
92
|
+
)
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
# Add valid pair
|
|
96
|
+
self.image_label_pairs.append((image_path, label_path))
|
|
97
|
+
|
|
98
|
+
except (
|
|
99
|
+
FileNotFoundError,
|
|
100
|
+
OSError,
|
|
101
|
+
ValueError,
|
|
102
|
+
Exception,
|
|
103
|
+
) as e:
|
|
104
|
+
skipped_files.append((label_file, str(e)))
|
|
105
|
+
else:
|
|
106
|
+
skipped_files.append((label_file, "no matching image found"))
|
|
107
|
+
|
|
108
|
+
# Report results
|
|
109
|
+
if self.image_label_pairs:
|
|
110
|
+
self.viewer.status = (
|
|
111
|
+
f"Found {len(self.image_label_pairs)} valid image-label pairs."
|
|
112
|
+
)
|
|
113
|
+
self.current_index = 0
|
|
114
|
+
self._load_current_pair()
|
|
115
|
+
else:
|
|
116
|
+
self.viewer.status = "No valid image-label pairs found."
|
|
117
|
+
|
|
118
|
+
# Show detailed report if there were issues
|
|
119
|
+
if skipped_files or format_issues:
|
|
120
|
+
msg = ""
|
|
121
|
+
if skipped_files:
|
|
122
|
+
msg += "Skipped files:\n"
|
|
123
|
+
for file, reason in skipped_files:
|
|
124
|
+
msg += f"- {file}: {reason}\n"
|
|
125
|
+
|
|
126
|
+
if format_issues:
|
|
127
|
+
msg += "\nFormat issues:\n"
|
|
128
|
+
for file, issue in format_issues:
|
|
129
|
+
msg += f"- {file}: {issue}\n"
|
|
130
|
+
|
|
131
|
+
QMessageBox.information(None, "Loading Report", msg)
|
|
89
132
|
|
|
90
133
|
def _load_current_pair(self):
|
|
91
134
|
"""
|
|
@@ -110,6 +153,10 @@ class LabelInspector:
|
|
|
110
153
|
label_image, name=f"Labels ({os.path.basename(label_path)})"
|
|
111
154
|
)
|
|
112
155
|
|
|
156
|
+
# Show progress
|
|
157
|
+
total = len(self.image_label_pairs)
|
|
158
|
+
self.viewer.status = f"Viewing pair {self.current_index + 1} of {total}: {os.path.basename(image_path)}"
|
|
159
|
+
|
|
113
160
|
def save_current_labels(self):
|
|
114
161
|
"""
|
|
115
162
|
Save the current labels back to the original file.
|
|
@@ -172,7 +219,7 @@ class LabelInspector:
|
|
|
172
219
|
@magicgui(
|
|
173
220
|
call_button="Start Label Inspection",
|
|
174
221
|
folder_path={"label": "Folder Path", "widget_type": "LineEdit"},
|
|
175
|
-
label_suffix={"label": "Label Suffix (e.g.,
|
|
222
|
+
label_suffix={"label": "Label Suffix (e.g., _labels.tif)"},
|
|
176
223
|
)
|
|
177
224
|
def label_inspector(
|
|
178
225
|
folder_path: str,
|
|
@@ -100,6 +100,66 @@ def max_z_projection(image: np.ndarray) -> np.ndarray:
|
|
|
100
100
|
return (projection * max_val).clip(0, max_val).astype(image.dtype)
|
|
101
101
|
|
|
102
102
|
|
|
103
|
+
@BatchProcessingRegistry.register(
|
|
104
|
+
name="Max Z Projection (TZYX)",
|
|
105
|
+
suffix="_maxZ_tzyx",
|
|
106
|
+
description="Maximum intensity projection along the Z-axis for TZYX data",
|
|
107
|
+
parameters={}, # No parameters needed - fully automatic
|
|
108
|
+
)
|
|
109
|
+
def max_z_projection_tzyx(image: np.ndarray) -> np.ndarray:
|
|
110
|
+
"""
|
|
111
|
+
Memory-efficient maximum intensity projection along the Z-axis for TZYX data.
|
|
112
|
+
|
|
113
|
+
This function intelligently chooses the most memory-efficient approach
|
|
114
|
+
based on the input data size and available system memory.
|
|
115
|
+
|
|
116
|
+
Parameters:
|
|
117
|
+
-----------
|
|
118
|
+
image : numpy.ndarray
|
|
119
|
+
Input 4D image with TZYX dimensions
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
--------
|
|
123
|
+
numpy.ndarray
|
|
124
|
+
3D image with TYX dimensions after max projection
|
|
125
|
+
"""
|
|
126
|
+
# Validate input dimensions
|
|
127
|
+
if image.ndim != 4:
|
|
128
|
+
raise ValueError(f"Expected 4D image (TZYX), got {image.ndim}D image")
|
|
129
|
+
|
|
130
|
+
# Get dimensions
|
|
131
|
+
t_size, z_size, y_size, x_size = image.shape
|
|
132
|
+
|
|
133
|
+
# For Z projection, we only need one Z plane in memory at a time
|
|
134
|
+
# so we can process this plane by plane to minimize memory usage
|
|
135
|
+
|
|
136
|
+
# Create output array with appropriate dimensions and same dtype
|
|
137
|
+
result = np.zeros((t_size, y_size, x_size), dtype=image.dtype)
|
|
138
|
+
|
|
139
|
+
# Process each time point separately to minimize memory usage
|
|
140
|
+
for t in range(t_size):
|
|
141
|
+
# If data type allows direct max, use it
|
|
142
|
+
if np.issubdtype(image.dtype, np.integer) or np.issubdtype(
|
|
143
|
+
image.dtype, np.floating
|
|
144
|
+
):
|
|
145
|
+
# Process Z planes efficiently
|
|
146
|
+
# Start with the first Z plane
|
|
147
|
+
z_max = image[t, 0].copy()
|
|
148
|
+
|
|
149
|
+
# Compare with each subsequent Z plane
|
|
150
|
+
for z in range(1, z_size):
|
|
151
|
+
# Use numpy's maximum function to update max values in-place
|
|
152
|
+
np.maximum(z_max, image[t, z], out=z_max)
|
|
153
|
+
|
|
154
|
+
# Store result for this time point
|
|
155
|
+
result[t] = z_max
|
|
156
|
+
else:
|
|
157
|
+
# For unusual data types, fall back to numpy's max function
|
|
158
|
+
result[t] = np.max(image[t], axis=0)
|
|
159
|
+
|
|
160
|
+
return result
|
|
161
|
+
|
|
162
|
+
|
|
103
163
|
@BatchProcessingRegistry.register(
|
|
104
164
|
name="Split Channels",
|
|
105
165
|
suffix="_split_channels",
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
"""
|
|
2
|
+
processing_functions/sam2_env_manager.py
|
|
3
|
+
|
|
4
|
+
This module manages a dedicated virtual environment for SAM2.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import platform
|
|
9
|
+
import shutil
|
|
10
|
+
import subprocess
|
|
11
|
+
import venv
|
|
12
|
+
|
|
13
|
+
# Define the environment directory in user's home folder
|
|
14
|
+
ENV_DIR = os.path.join(
|
|
15
|
+
os.path.expanduser("~"), ".napari-tmidas", "envs", "sam2-env"
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def is_sam2_installed():
|
|
20
|
+
"""Check if SAM2 is installed in the current environment."""
|
|
21
|
+
try:
|
|
22
|
+
import importlib.util
|
|
23
|
+
|
|
24
|
+
return importlib.util.find_spec("sam2-env") is not None
|
|
25
|
+
except ImportError:
|
|
26
|
+
return False
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def is_env_created():
|
|
30
|
+
"""Check if the dedicated environment exists."""
|
|
31
|
+
env_python = get_env_python_path()
|
|
32
|
+
return os.path.exists(env_python)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def get_env_python_path():
|
|
36
|
+
"""Get the path to the Python executable in the environment."""
|
|
37
|
+
if platform.system() == "Windows":
|
|
38
|
+
return os.path.join(ENV_DIR, "Scripts", "python.exe")
|
|
39
|
+
else:
|
|
40
|
+
return os.path.join(ENV_DIR, "bin", "python")
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def create_sam2_env():
|
|
44
|
+
"""Create a dedicated virtual environment for SAM2."""
|
|
45
|
+
# Ensure the environment directory exists
|
|
46
|
+
os.makedirs(os.path.dirname(ENV_DIR), exist_ok=True)
|
|
47
|
+
|
|
48
|
+
# Remove existing environment if it exists
|
|
49
|
+
if os.path.exists(ENV_DIR):
|
|
50
|
+
shutil.rmtree(ENV_DIR)
|
|
51
|
+
|
|
52
|
+
print(f"Creating SAM2 environment at {ENV_DIR}...")
|
|
53
|
+
|
|
54
|
+
# Create a new virtual environment
|
|
55
|
+
venv.create(ENV_DIR, with_pip=True)
|
|
56
|
+
|
|
57
|
+
# Path to the Python executable in the new environment
|
|
58
|
+
env_python = get_env_python_path()
|
|
59
|
+
|
|
60
|
+
# Upgrade pip
|
|
61
|
+
print("Upgrading pip...")
|
|
62
|
+
subprocess.check_call(
|
|
63
|
+
[env_python, "-m", "pip", "install", "--upgrade", "pip"]
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
# Install numpy and torch first for compatibility
|
|
67
|
+
print("Installing torch and torchvision...")
|
|
68
|
+
subprocess.check_call(
|
|
69
|
+
[env_python, "-m", "pip", "install", "torch", "torchvision"]
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
# Install sam2 from GitHub
|
|
73
|
+
print("Installing SAM2 from GitHub...")
|
|
74
|
+
subprocess.check_call(
|
|
75
|
+
[
|
|
76
|
+
env_python,
|
|
77
|
+
"-m",
|
|
78
|
+
"pip",
|
|
79
|
+
"install",
|
|
80
|
+
"git+https://github.com/facebookresearch/sam2.git",
|
|
81
|
+
]
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
subprocess.run(
|
|
85
|
+
[
|
|
86
|
+
env_python,
|
|
87
|
+
"-c",
|
|
88
|
+
"import torch; import torchvision; print('PyTorch version:', torch.__version__); print('Torchvision version:', torchvision.__version__); print('CUDA is available:', torch.cuda.is_available())",
|
|
89
|
+
]
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
print("SAM2 environment created successfully.")
|
|
93
|
+
return env_python
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def run_sam2_in_env(func_name, args_dict):
|
|
97
|
+
"""
|
|
98
|
+
Run SAM2 in a dedicated environment with minimal complexity.
|
|
99
|
+
|
|
100
|
+
Parameters:
|
|
101
|
+
-----------
|
|
102
|
+
func_name : str
|
|
103
|
+
Name of the SAM2 function to run (currently unused)
|
|
104
|
+
args_dict : dict
|
|
105
|
+
Dictionary of arguments for SAM2
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
--------
|
|
109
|
+
numpy.ndarray
|
|
110
|
+
Segmentation masks
|
|
111
|
+
"""
|
|
@@ -0,0 +1,427 @@
|
|
|
1
|
+
# processing_functions/skimage_filters.py
|
|
2
|
+
"""
|
|
3
|
+
Processing functions that depend on scikit-image.
|
|
4
|
+
"""
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
import skimage.exposure
|
|
9
|
+
import skimage.filters
|
|
10
|
+
import skimage.morphology
|
|
11
|
+
|
|
12
|
+
SKIMAGE_AVAILABLE = True
|
|
13
|
+
except ImportError:
|
|
14
|
+
SKIMAGE_AVAILABLE = False
|
|
15
|
+
print(
|
|
16
|
+
"scikit-image not available, some processing functions will be disabled"
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
import contextlib
|
|
20
|
+
import os
|
|
21
|
+
|
|
22
|
+
import pandas as pd
|
|
23
|
+
|
|
24
|
+
from napari_tmidas._file_selector import ProcessingWorker
|
|
25
|
+
from napari_tmidas._registry import BatchProcessingRegistry
|
|
26
|
+
|
|
27
|
+
if SKIMAGE_AVAILABLE:
|
|
28
|
+
|
|
29
|
+
# Equalize histogram
|
|
30
|
+
@BatchProcessingRegistry.register(
|
|
31
|
+
name="Equalize Histogram",
|
|
32
|
+
suffix="_equalized",
|
|
33
|
+
description="Equalize histogram of image",
|
|
34
|
+
)
|
|
35
|
+
def equalize_histogram(
|
|
36
|
+
image: np.ndarray, clip_limit: float = 0.01
|
|
37
|
+
) -> np.ndarray:
|
|
38
|
+
"""
|
|
39
|
+
Equalize histogram of image
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
return skimage.exposure.equalize_hist(image)
|
|
43
|
+
|
|
44
|
+
# simple otsu thresholding
|
|
45
|
+
@BatchProcessingRegistry.register(
|
|
46
|
+
name="Otsu Thresholding (semantic)",
|
|
47
|
+
suffix="_otsu_semantic",
|
|
48
|
+
description="Threshold image using Otsu's method to obtain a binary image",
|
|
49
|
+
)
|
|
50
|
+
def otsu_thresholding(image: np.ndarray) -> np.ndarray:
|
|
51
|
+
"""
|
|
52
|
+
Threshold image using Otsu's method
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
image = skimage.img_as_ubyte(image) # convert to 8-bit
|
|
56
|
+
thresh = skimage.filters.threshold_otsu(image)
|
|
57
|
+
return (image > thresh).astype(np.uint32)
|
|
58
|
+
|
|
59
|
+
# instance segmentation
|
|
60
|
+
@BatchProcessingRegistry.register(
|
|
61
|
+
name="Otsu Thresholding (instance)",
|
|
62
|
+
suffix="_otsu_labels",
|
|
63
|
+
description="Threshold image using Otsu's method to obtain a multi-label image",
|
|
64
|
+
)
|
|
65
|
+
def otsu_thresholding_instance(image: np.ndarray) -> np.ndarray:
|
|
66
|
+
"""
|
|
67
|
+
Threshold image using Otsu's method
|
|
68
|
+
"""
|
|
69
|
+
image = skimage.img_as_ubyte(image) # convert to 8-bit
|
|
70
|
+
thresh = skimage.filters.threshold_otsu(image)
|
|
71
|
+
return skimage.measure.label(image > thresh).astype(np.uint32)
|
|
72
|
+
|
|
73
|
+
# simple thresholding
|
|
74
|
+
@BatchProcessingRegistry.register(
|
|
75
|
+
name="Manual Thresholding (8-bit)",
|
|
76
|
+
suffix="_thresh",
|
|
77
|
+
description="Threshold image using a fixed threshold to obtain a binary image",
|
|
78
|
+
parameters={
|
|
79
|
+
"threshold": {
|
|
80
|
+
"type": int,
|
|
81
|
+
"default": 128,
|
|
82
|
+
"min": 0,
|
|
83
|
+
"max": 255,
|
|
84
|
+
"description": "Threshold value",
|
|
85
|
+
},
|
|
86
|
+
},
|
|
87
|
+
)
|
|
88
|
+
def simple_thresholding(
|
|
89
|
+
image: np.ndarray, threshold: int = 128
|
|
90
|
+
) -> np.ndarray:
|
|
91
|
+
"""
|
|
92
|
+
Threshold image using a fixed threshold
|
|
93
|
+
"""
|
|
94
|
+
# convert to 8-bit
|
|
95
|
+
image = skimage.img_as_ubyte(image)
|
|
96
|
+
return image > threshold
|
|
97
|
+
|
|
98
|
+
# remove small objects
|
|
99
|
+
@BatchProcessingRegistry.register(
|
|
100
|
+
name="Remove Small Labels",
|
|
101
|
+
suffix="_rm_small",
|
|
102
|
+
description="Remove small labels from label images",
|
|
103
|
+
parameters={
|
|
104
|
+
"min_size": {
|
|
105
|
+
"type": int,
|
|
106
|
+
"default": 100,
|
|
107
|
+
"min": 1,
|
|
108
|
+
"max": 100000,
|
|
109
|
+
"description": "Remove labels smaller than: ",
|
|
110
|
+
},
|
|
111
|
+
},
|
|
112
|
+
)
|
|
113
|
+
def remove_small_objects(
|
|
114
|
+
image: np.ndarray, min_size: int = 100
|
|
115
|
+
) -> np.ndarray:
|
|
116
|
+
"""
|
|
117
|
+
Remove small labels from label images
|
|
118
|
+
"""
|
|
119
|
+
return skimage.morphology.remove_small_objects(
|
|
120
|
+
image, min_size=min_size
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
@BatchProcessingRegistry.register(
|
|
124
|
+
name="Invert Image",
|
|
125
|
+
suffix="_inverted",
|
|
126
|
+
description="Invert pixel values in the image using scikit-image's invert function",
|
|
127
|
+
)
|
|
128
|
+
def invert_image(image: np.ndarray) -> np.ndarray:
|
|
129
|
+
"""
|
|
130
|
+
Invert the image pixel values.
|
|
131
|
+
|
|
132
|
+
This function inverts the values in an image using scikit-image's invert function,
|
|
133
|
+
which handles different data types appropriately.
|
|
134
|
+
|
|
135
|
+
Parameters:
|
|
136
|
+
-----------
|
|
137
|
+
image : numpy.ndarray
|
|
138
|
+
Input image array
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
--------
|
|
142
|
+
numpy.ndarray
|
|
143
|
+
Inverted image with the same data type as the input
|
|
144
|
+
"""
|
|
145
|
+
# Make a copy to avoid modifying the original
|
|
146
|
+
image_copy = image.copy()
|
|
147
|
+
|
|
148
|
+
# Use skimage's invert function which handles all data types properly
|
|
149
|
+
return skimage.util.invert(image_copy)
|
|
150
|
+
|
|
151
|
+
@BatchProcessingRegistry.register(
|
|
152
|
+
name="Semantic to Instance Segmentation",
|
|
153
|
+
suffix="_instance",
|
|
154
|
+
description="Convert semantic segmentation masks to instance segmentation labels using connected components",
|
|
155
|
+
)
|
|
156
|
+
def semantic_to_instance(image: np.ndarray) -> np.ndarray:
|
|
157
|
+
"""
|
|
158
|
+
Convert semantic segmentation masks to instance segmentation labels.
|
|
159
|
+
|
|
160
|
+
This function takes a binary or multi-class semantic segmentation mask and
|
|
161
|
+
converts it to an instance segmentation by finding connected components.
|
|
162
|
+
Each connected region receives a unique label.
|
|
163
|
+
|
|
164
|
+
Parameters:
|
|
165
|
+
-----------
|
|
166
|
+
image : numpy.ndarray
|
|
167
|
+
Input semantic segmentation mask
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
--------
|
|
171
|
+
numpy.ndarray
|
|
172
|
+
Instance segmentation with unique labels for each connected component
|
|
173
|
+
"""
|
|
174
|
+
# Create a copy to avoid modifying the original
|
|
175
|
+
instance_mask = image.copy()
|
|
176
|
+
|
|
177
|
+
# If the input is multi-class, process each class separately
|
|
178
|
+
if np.max(instance_mask) > 1:
|
|
179
|
+
# Get unique non-zero class values
|
|
180
|
+
class_values = np.unique(instance_mask)
|
|
181
|
+
class_values = class_values[
|
|
182
|
+
class_values > 0
|
|
183
|
+
] # Remove background (0)
|
|
184
|
+
|
|
185
|
+
# Create an empty output mask
|
|
186
|
+
result = np.zeros_like(instance_mask, dtype=np.uint32)
|
|
187
|
+
|
|
188
|
+
# Process each class
|
|
189
|
+
label_offset = 0
|
|
190
|
+
for class_val in class_values:
|
|
191
|
+
# Create binary mask for this class
|
|
192
|
+
binary_mask = (instance_mask == class_val).astype(np.uint8)
|
|
193
|
+
|
|
194
|
+
# Find connected components
|
|
195
|
+
labeled = skimage.measure.label(binary_mask, connectivity=2)
|
|
196
|
+
|
|
197
|
+
# Skip if no components found
|
|
198
|
+
if np.max(labeled) == 0:
|
|
199
|
+
continue
|
|
200
|
+
|
|
201
|
+
# Add offset to avoid label overlap between classes
|
|
202
|
+
labeled[labeled > 0] += label_offset
|
|
203
|
+
|
|
204
|
+
# Add to result
|
|
205
|
+
result = np.maximum(result, labeled)
|
|
206
|
+
|
|
207
|
+
# Update offset for next class
|
|
208
|
+
label_offset = np.max(result)
|
|
209
|
+
else:
|
|
210
|
+
# For binary masks, just find connected components
|
|
211
|
+
result = skimage.measure.label(instance_mask > 0, connectivity=2)
|
|
212
|
+
|
|
213
|
+
return result.astype(np.uint32)
|
|
214
|
+
|
|
215
|
+
@BatchProcessingRegistry.register(
|
|
216
|
+
name="Extract Region Properties",
|
|
217
|
+
suffix="_props", # Changed to indicate this is for CSV output only
|
|
218
|
+
description="Extract properties of labeled regions and save as CSV (no image output)",
|
|
219
|
+
parameters={
|
|
220
|
+
"properties": {
|
|
221
|
+
"type": str,
|
|
222
|
+
"default": "area,bbox,centroid,eccentricity,euler_number,perimeter",
|
|
223
|
+
"description": "Comma-separated list of properties to extract (e.g., area,perimeter,centroid)",
|
|
224
|
+
},
|
|
225
|
+
"intensity_image": {
|
|
226
|
+
"type": bool,
|
|
227
|
+
"default": False,
|
|
228
|
+
"description": "Use input as intensity image for intensity-based measurements",
|
|
229
|
+
},
|
|
230
|
+
"min_area": {
|
|
231
|
+
"type": int,
|
|
232
|
+
"default": 0,
|
|
233
|
+
"min": 0,
|
|
234
|
+
"max": 100000,
|
|
235
|
+
"description": "Minimum area to include in results (pixels)",
|
|
236
|
+
},
|
|
237
|
+
},
|
|
238
|
+
)
|
|
239
|
+
def extract_region_properties(
|
|
240
|
+
image: np.ndarray,
|
|
241
|
+
properties: str = "area,bbox,centroid,eccentricity,euler_number,perimeter",
|
|
242
|
+
intensity_image: bool = False,
|
|
243
|
+
min_area: int = 0,
|
|
244
|
+
) -> np.ndarray:
|
|
245
|
+
"""
|
|
246
|
+
Extract properties of labeled regions in an image and save results as CSV.
|
|
247
|
+
|
|
248
|
+
This function analyzes all labeled regions in a label image and computes
|
|
249
|
+
various region properties like area, perimeter, centroid, etc. The results
|
|
250
|
+
are saved as a CSV file. The input image is returned unchanged.
|
|
251
|
+
|
|
252
|
+
Parameters:
|
|
253
|
+
-----------
|
|
254
|
+
image : numpy.ndarray
|
|
255
|
+
Input label image (instance segmentation)
|
|
256
|
+
properties : str
|
|
257
|
+
Comma-separated list of properties to extract
|
|
258
|
+
See scikit-image documentation for all available properties:
|
|
259
|
+
https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.regionprops
|
|
260
|
+
intensity_image : bool
|
|
261
|
+
Whether to use the input image as intensity image for intensity-based measurements
|
|
262
|
+
min_area : int
|
|
263
|
+
Minimum area (in pixels) for regions to include in results
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
--------
|
|
267
|
+
numpy.ndarray
|
|
268
|
+
The original image (unchanged)
|
|
269
|
+
"""
|
|
270
|
+
# Check if we have a proper label image
|
|
271
|
+
if image.ndim < 2 or np.max(image) == 0:
|
|
272
|
+
print(
|
|
273
|
+
"Input must be a valid label image with at least one labeled region"
|
|
274
|
+
)
|
|
275
|
+
return image
|
|
276
|
+
|
|
277
|
+
# Convert image to proper format for regionprops
|
|
278
|
+
label_image = image.astype(np.int32)
|
|
279
|
+
|
|
280
|
+
# Parse the properties list
|
|
281
|
+
prop_list = [prop.strip() for prop in properties.split(",")]
|
|
282
|
+
|
|
283
|
+
# Get region properties
|
|
284
|
+
if intensity_image:
|
|
285
|
+
# Use the same image as both label and intensity image
|
|
286
|
+
regions = skimage.measure.regionprops(
|
|
287
|
+
label_image, intensity_image=image
|
|
288
|
+
)
|
|
289
|
+
else:
|
|
290
|
+
regions = skimage.measure.regionprops(label_image)
|
|
291
|
+
|
|
292
|
+
# Collect property data
|
|
293
|
+
data = []
|
|
294
|
+
for region in regions:
|
|
295
|
+
# Skip regions that are too small
|
|
296
|
+
if region.area < min_area:
|
|
297
|
+
continue
|
|
298
|
+
|
|
299
|
+
# Get all requested properties
|
|
300
|
+
region_data = {"label": region.label}
|
|
301
|
+
for prop in prop_list:
|
|
302
|
+
try:
|
|
303
|
+
value = getattr(region, prop)
|
|
304
|
+
|
|
305
|
+
# Handle different types of properties
|
|
306
|
+
if isinstance(value, tuple) or (
|
|
307
|
+
isinstance(value, np.ndarray) and value.ndim > 0
|
|
308
|
+
):
|
|
309
|
+
# For tuple/array properties like centroid, bbox, etc.
|
|
310
|
+
if isinstance(value, tuple):
|
|
311
|
+
value = np.array(value)
|
|
312
|
+
|
|
313
|
+
# For each element in the tuple/array
|
|
314
|
+
for i, val in enumerate(value):
|
|
315
|
+
region_data[f"{prop}_{i}"] = val
|
|
316
|
+
else:
|
|
317
|
+
# For scalar properties like area, perimeter, etc.
|
|
318
|
+
region_data[prop] = value
|
|
319
|
+
except AttributeError:
|
|
320
|
+
print(f"Property '{prop}' not found, skipping")
|
|
321
|
+
continue
|
|
322
|
+
|
|
323
|
+
data.append(region_data)
|
|
324
|
+
|
|
325
|
+
# Create a DataFrame
|
|
326
|
+
df = pd.DataFrame(data)
|
|
327
|
+
|
|
328
|
+
# Store the DataFrame as an attribute of the function
|
|
329
|
+
extract_region_properties.csv_data = df
|
|
330
|
+
extract_region_properties.save_csv = True
|
|
331
|
+
extract_region_properties.no_image_output = (
|
|
332
|
+
True # Indicate no image output needed
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
print(f"Extracted properties for {len(data)} regions")
|
|
336
|
+
return image
|
|
337
|
+
|
|
338
|
+
# Monkey patch to handle saving CSV files without creating a new image file
|
|
339
|
+
try:
|
|
340
|
+
# Check if ProcessingWorker is imported and available
|
|
341
|
+
original_process_file = ProcessingWorker.process_file
|
|
342
|
+
|
|
343
|
+
# Create a new version that handles saving CSV
|
|
344
|
+
def process_file_with_csv_export(self, filepath):
|
|
345
|
+
"""Modified process_file function that saves CSV after processing."""
|
|
346
|
+
result = original_process_file(self, filepath)
|
|
347
|
+
|
|
348
|
+
# Check if there's a result and if we should save CSV
|
|
349
|
+
if isinstance(result, dict) and "processed_file" in result:
|
|
350
|
+
output_path = result["processed_file"]
|
|
351
|
+
|
|
352
|
+
# Check if the processing function had CSV data
|
|
353
|
+
if (
|
|
354
|
+
hasattr(self.processing_func, "save_csv")
|
|
355
|
+
and self.processing_func.save_csv
|
|
356
|
+
and hasattr(self.processing_func, "csv_data")
|
|
357
|
+
):
|
|
358
|
+
|
|
359
|
+
# Get the CSV data
|
|
360
|
+
df = self.processing_func.csv_data
|
|
361
|
+
|
|
362
|
+
# For functions that don't need an image output, use the original filepath
|
|
363
|
+
# as the base for the CSV filename
|
|
364
|
+
if (
|
|
365
|
+
hasattr(self.processing_func, "no_image_output")
|
|
366
|
+
and self.processing_func.no_image_output
|
|
367
|
+
):
|
|
368
|
+
# Use the original filepath without creating a new image file
|
|
369
|
+
base_path = os.path.splitext(filepath)[0]
|
|
370
|
+
csv_path = f"{base_path}_regionprops.csv"
|
|
371
|
+
|
|
372
|
+
# Don't save a duplicate image file
|
|
373
|
+
if (
|
|
374
|
+
os.path.exists(output_path)
|
|
375
|
+
and output_path != filepath
|
|
376
|
+
):
|
|
377
|
+
contextlib.suppress(OSError)
|
|
378
|
+
else:
|
|
379
|
+
# Create CSV filename from the output image path
|
|
380
|
+
csv_path = (
|
|
381
|
+
os.path.splitext(output_path)[0]
|
|
382
|
+
+ "_regionprops.csv"
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
# Save the CSV file
|
|
386
|
+
df.to_csv(csv_path, index=False)
|
|
387
|
+
print(f"Saved region properties to {csv_path}")
|
|
388
|
+
|
|
389
|
+
# Add the CSV file to the result
|
|
390
|
+
result["secondary_files"] = [csv_path]
|
|
391
|
+
|
|
392
|
+
# If we don't need an image output, update the result to just point to the CSV
|
|
393
|
+
if (
|
|
394
|
+
hasattr(self.processing_func, "no_image_output")
|
|
395
|
+
and self.processing_func.no_image_output
|
|
396
|
+
):
|
|
397
|
+
result["processed_file"] = csv_path
|
|
398
|
+
|
|
399
|
+
return result
|
|
400
|
+
|
|
401
|
+
# Apply the monkey patch
|
|
402
|
+
ProcessingWorker.process_file = process_file_with_csv_export
|
|
403
|
+
|
|
404
|
+
except (NameError, AttributeError) as e:
|
|
405
|
+
print(f"Warning: Could not apply CSV export patch: {e}")
|
|
406
|
+
print(
|
|
407
|
+
"Region properties will be extracted but CSV files may not be saved"
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
# binary to labels
|
|
412
|
+
@BatchProcessingRegistry.register(
|
|
413
|
+
name="Binary to Labels",
|
|
414
|
+
suffix="_labels",
|
|
415
|
+
description="Convert binary images to label images (connected components)",
|
|
416
|
+
)
|
|
417
|
+
def binary_to_labels(image: np.ndarray) -> np.ndarray:
|
|
418
|
+
"""
|
|
419
|
+
Convert binary images to label images (connected components)
|
|
420
|
+
"""
|
|
421
|
+
# Make a copy of the input image to avoid modifying the original
|
|
422
|
+
label_image = image.copy()
|
|
423
|
+
|
|
424
|
+
# Convert binary image to label image using connected components
|
|
425
|
+
label_image = skimage.measure.label(label_image, connectivity=2)
|
|
426
|
+
|
|
427
|
+
return label_image
|
|
@@ -39,5 +39,6 @@ src/napari_tmidas/processing_functions/cellpose_env_manager.py
|
|
|
39
39
|
src/napari_tmidas/processing_functions/cellpose_segmentation.py
|
|
40
40
|
src/napari_tmidas/processing_functions/colocalization.py
|
|
41
41
|
src/napari_tmidas/processing_functions/file_compression.py
|
|
42
|
+
src/napari_tmidas/processing_functions/sam2_env_manager.py
|
|
42
43
|
src/napari_tmidas/processing_functions/scipy_filters.py
|
|
43
44
|
src/napari_tmidas/processing_functions/skimage_filters.py
|
|
@@ -1,134 +0,0 @@
|
|
|
1
|
-
# processing_functions/skimage_filters.py
|
|
2
|
-
"""
|
|
3
|
-
Processing functions that depend on scikit-image.
|
|
4
|
-
"""
|
|
5
|
-
import numpy as np
|
|
6
|
-
|
|
7
|
-
try:
|
|
8
|
-
import skimage.exposure
|
|
9
|
-
import skimage.filters
|
|
10
|
-
import skimage.morphology
|
|
11
|
-
|
|
12
|
-
SKIMAGE_AVAILABLE = True
|
|
13
|
-
except ImportError:
|
|
14
|
-
SKIMAGE_AVAILABLE = False
|
|
15
|
-
print(
|
|
16
|
-
"scikit-image not available, some processing functions will be disabled"
|
|
17
|
-
)
|
|
18
|
-
|
|
19
|
-
from napari_tmidas._registry import BatchProcessingRegistry
|
|
20
|
-
|
|
21
|
-
if SKIMAGE_AVAILABLE:
|
|
22
|
-
|
|
23
|
-
# Equalize histogram
|
|
24
|
-
@BatchProcessingRegistry.register(
|
|
25
|
-
name="Equalize Histogram",
|
|
26
|
-
suffix="_equalized",
|
|
27
|
-
description="Equalize histogram of image",
|
|
28
|
-
)
|
|
29
|
-
def equalize_histogram(
|
|
30
|
-
image: np.ndarray, clip_limit: float = 0.01
|
|
31
|
-
) -> np.ndarray:
|
|
32
|
-
"""
|
|
33
|
-
Equalize histogram of image
|
|
34
|
-
"""
|
|
35
|
-
|
|
36
|
-
return skimage.exposure.equalize_hist(image)
|
|
37
|
-
|
|
38
|
-
# simple otsu thresholding
|
|
39
|
-
@BatchProcessingRegistry.register(
|
|
40
|
-
name="Otsu Thresholding (semantic)",
|
|
41
|
-
suffix="_otsu_semantic",
|
|
42
|
-
description="Threshold image using Otsu's method to obtain a binary image",
|
|
43
|
-
)
|
|
44
|
-
def otsu_thresholding(image: np.ndarray) -> np.ndarray:
|
|
45
|
-
"""
|
|
46
|
-
Threshold image using Otsu's method
|
|
47
|
-
"""
|
|
48
|
-
|
|
49
|
-
image = skimage.img_as_ubyte(image) # convert to 8-bit
|
|
50
|
-
thresh = skimage.filters.threshold_otsu(image)
|
|
51
|
-
return (image > thresh).astype(np.uint32)
|
|
52
|
-
|
|
53
|
-
# instance segmentation
|
|
54
|
-
@BatchProcessingRegistry.register(
|
|
55
|
-
name="Otsu Thresholding (instance)",
|
|
56
|
-
suffix="_otsu_labels",
|
|
57
|
-
description="Threshold image using Otsu's method to obtain a multi-label image",
|
|
58
|
-
)
|
|
59
|
-
def otsu_thresholding_instance(image: np.ndarray) -> np.ndarray:
|
|
60
|
-
"""
|
|
61
|
-
Threshold image using Otsu's method
|
|
62
|
-
"""
|
|
63
|
-
image = skimage.img_as_ubyte(image) # convert to 8-bit
|
|
64
|
-
thresh = skimage.filters.threshold_otsu(image)
|
|
65
|
-
return skimage.measure.label(image > thresh).astype(np.uint32)
|
|
66
|
-
|
|
67
|
-
# simple thresholding
|
|
68
|
-
@BatchProcessingRegistry.register(
|
|
69
|
-
name="Manual Thresholding (8-bit)",
|
|
70
|
-
suffix="_thresh",
|
|
71
|
-
description="Threshold image using a fixed threshold to obtain a binary image",
|
|
72
|
-
parameters={
|
|
73
|
-
"threshold": {
|
|
74
|
-
"type": int,
|
|
75
|
-
"default": 128,
|
|
76
|
-
"min": 0,
|
|
77
|
-
"max": 255,
|
|
78
|
-
"description": "Threshold value",
|
|
79
|
-
},
|
|
80
|
-
},
|
|
81
|
-
)
|
|
82
|
-
def simple_thresholding(
|
|
83
|
-
image: np.ndarray, threshold: int = 128
|
|
84
|
-
) -> np.ndarray:
|
|
85
|
-
"""
|
|
86
|
-
Threshold image using a fixed threshold
|
|
87
|
-
"""
|
|
88
|
-
# convert to 8-bit
|
|
89
|
-
image = skimage.img_as_ubyte(image)
|
|
90
|
-
return image > threshold
|
|
91
|
-
|
|
92
|
-
# remove small objects
|
|
93
|
-
@BatchProcessingRegistry.register(
|
|
94
|
-
name="Remove Small Labels",
|
|
95
|
-
suffix="_rm_small",
|
|
96
|
-
description="Remove small labels from label images",
|
|
97
|
-
parameters={
|
|
98
|
-
"min_size": {
|
|
99
|
-
"type": int,
|
|
100
|
-
"default": 100,
|
|
101
|
-
"min": 1,
|
|
102
|
-
"max": 100000,
|
|
103
|
-
"description": "Remove labels smaller than: ",
|
|
104
|
-
},
|
|
105
|
-
},
|
|
106
|
-
)
|
|
107
|
-
def remove_small_objects(
|
|
108
|
-
image: np.ndarray, min_size: int = 100
|
|
109
|
-
) -> np.ndarray:
|
|
110
|
-
"""
|
|
111
|
-
Remove small labels from label images
|
|
112
|
-
"""
|
|
113
|
-
return skimage.morphology.remove_small_objects(
|
|
114
|
-
image, min_size=min_size
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
# binary to labels
|
|
119
|
-
@BatchProcessingRegistry.register(
|
|
120
|
-
name="Binary to Labels",
|
|
121
|
-
suffix="_labels",
|
|
122
|
-
description="Convert binary images to label images (connected components)",
|
|
123
|
-
)
|
|
124
|
-
def binary_to_labels(image: np.ndarray) -> np.ndarray:
|
|
125
|
-
"""
|
|
126
|
-
Convert binary images to label images (connected components)
|
|
127
|
-
"""
|
|
128
|
-
# Make a copy of the input image to avoid modifying the original
|
|
129
|
-
label_image = image.copy()
|
|
130
|
-
|
|
131
|
-
# Convert binary image to label image using connected components
|
|
132
|
-
label_image = skimage.measure.label(label_image, connectivity=2)
|
|
133
|
-
|
|
134
|
-
return label_image
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/processing_functions/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/processing_functions/colocalization.py
RENAMED
|
File without changes
|
|
File without changes
|
{napari_tmidas-0.1.9 → napari_tmidas-0.2.0}/src/napari_tmidas/processing_functions/scipy_filters.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|