napari-tmidas 0.1.9__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- napari_tmidas/_crop_anything.py +1895 -608
- napari_tmidas/_file_selector.py +87 -6
- napari_tmidas/_label_inspection.py +94 -47
- napari_tmidas/_version.py +2 -2
- napari_tmidas/processing_functions/basic.py +554 -23
- napari_tmidas/processing_functions/careamics_denoising.py +324 -0
- napari_tmidas/processing_functions/careamics_env_manager.py +339 -0
- napari_tmidas/processing_functions/cellpose_env_manager.py +55 -20
- napari_tmidas/processing_functions/cellpose_segmentation.py +105 -218
- napari_tmidas/processing_functions/sam2_env_manager.py +111 -0
- napari_tmidas/processing_functions/sam2_mp4.py +283 -0
- napari_tmidas/processing_functions/skimage_filters.py +323 -0
- napari_tmidas/processing_functions/timepoint_merger.py +490 -0
- napari_tmidas/processing_functions/trackastra_tracking.py +303 -0
- {napari_tmidas-0.1.9.dist-info → napari_tmidas-0.2.1.dist-info}/METADATA +15 -8
- {napari_tmidas-0.1.9.dist-info → napari_tmidas-0.2.1.dist-info}/RECORD +20 -14
- {napari_tmidas-0.1.9.dist-info → napari_tmidas-0.2.1.dist-info}/WHEEL +1 -1
- {napari_tmidas-0.1.9.dist-info → napari_tmidas-0.2.1.dist-info}/entry_points.txt +0 -0
- {napari_tmidas-0.1.9.dist-info → napari_tmidas-0.2.1.dist-info}/licenses/LICENSE +0 -0
- {napari_tmidas-0.1.9.dist-info → napari_tmidas-0.2.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
"""
|
|
2
|
+
processing_functions/sam2_env_manager.py
|
|
3
|
+
|
|
4
|
+
This module manages a dedicated virtual environment for SAM2.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import platform
|
|
9
|
+
import shutil
|
|
10
|
+
import subprocess
|
|
11
|
+
import venv
|
|
12
|
+
|
|
13
|
+
# Define the environment directory in user's home folder
|
|
14
|
+
ENV_DIR = os.path.join(
|
|
15
|
+
os.path.expanduser("~"), ".napari-tmidas", "envs", "sam2-env"
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def is_sam2_installed():
|
|
20
|
+
"""Check if SAM2 is installed in the current environment."""
|
|
21
|
+
try:
|
|
22
|
+
import importlib.util
|
|
23
|
+
|
|
24
|
+
return importlib.util.find_spec("sam2-env") is not None
|
|
25
|
+
except ImportError:
|
|
26
|
+
return False
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def is_env_created():
|
|
30
|
+
"""Check if the dedicated environment exists."""
|
|
31
|
+
env_python = get_env_python_path()
|
|
32
|
+
return os.path.exists(env_python)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def get_env_python_path():
|
|
36
|
+
"""Get the path to the Python executable in the environment."""
|
|
37
|
+
if platform.system() == "Windows":
|
|
38
|
+
return os.path.join(ENV_DIR, "Scripts", "python.exe")
|
|
39
|
+
else:
|
|
40
|
+
return os.path.join(ENV_DIR, "bin", "python")
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def create_sam2_env():
|
|
44
|
+
"""Create a dedicated virtual environment for SAM2."""
|
|
45
|
+
# Ensure the environment directory exists
|
|
46
|
+
os.makedirs(os.path.dirname(ENV_DIR), exist_ok=True)
|
|
47
|
+
|
|
48
|
+
# Remove existing environment if it exists
|
|
49
|
+
if os.path.exists(ENV_DIR):
|
|
50
|
+
shutil.rmtree(ENV_DIR)
|
|
51
|
+
|
|
52
|
+
print(f"Creating SAM2 environment at {ENV_DIR}...")
|
|
53
|
+
|
|
54
|
+
# Create a new virtual environment
|
|
55
|
+
venv.create(ENV_DIR, with_pip=True)
|
|
56
|
+
|
|
57
|
+
# Path to the Python executable in the new environment
|
|
58
|
+
env_python = get_env_python_path()
|
|
59
|
+
|
|
60
|
+
# Upgrade pip
|
|
61
|
+
print("Upgrading pip...")
|
|
62
|
+
subprocess.check_call(
|
|
63
|
+
[env_python, "-m", "pip", "install", "--upgrade", "pip"]
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
# Install numpy and torch first for compatibility
|
|
67
|
+
print("Installing torch and torchvision...")
|
|
68
|
+
subprocess.check_call(
|
|
69
|
+
[env_python, "-m", "pip", "install", "torch", "torchvision"]
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
# Install sam2 from GitHub
|
|
73
|
+
print("Installing SAM2 from GitHub...")
|
|
74
|
+
subprocess.check_call(
|
|
75
|
+
[
|
|
76
|
+
env_python,
|
|
77
|
+
"-m",
|
|
78
|
+
"pip",
|
|
79
|
+
"install",
|
|
80
|
+
"git+https://github.com/facebookresearch/sam2.git",
|
|
81
|
+
]
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
subprocess.run(
|
|
85
|
+
[
|
|
86
|
+
env_python,
|
|
87
|
+
"-c",
|
|
88
|
+
"import torch; import torchvision; print('PyTorch version:', torch.__version__); print('Torchvision version:', torchvision.__version__); print('CUDA is available:', torch.cuda.is_available())",
|
|
89
|
+
]
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
print("SAM2 environment created successfully.")
|
|
93
|
+
return env_python
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def run_sam2_in_env(func_name, args_dict):
|
|
97
|
+
"""
|
|
98
|
+
Run SAM2 in a dedicated environment with minimal complexity.
|
|
99
|
+
|
|
100
|
+
Parameters:
|
|
101
|
+
-----------
|
|
102
|
+
func_name : str
|
|
103
|
+
Name of the SAM2 function to run (currently unused)
|
|
104
|
+
args_dict : dict
|
|
105
|
+
Dictionary of arguments for SAM2
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
--------
|
|
109
|
+
numpy.ndarray
|
|
110
|
+
Segmentation masks
|
|
111
|
+
"""
|
|
@@ -0,0 +1,283 @@
|
|
|
1
|
+
import shutil
|
|
2
|
+
import subprocess
|
|
3
|
+
import tempfile
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
import cv2
|
|
7
|
+
import numpy as np
|
|
8
|
+
import tifffile
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def tif_to_mp4(input_path, fps=10, cleanup_temp=True):
|
|
12
|
+
"""
|
|
13
|
+
Convert a TIF stack to MP4 using JPEG2000 lossless as an intermediate format.
|
|
14
|
+
|
|
15
|
+
Parameters:
|
|
16
|
+
-----------
|
|
17
|
+
input_path : str or Path
|
|
18
|
+
Path to the input TIF file
|
|
19
|
+
|
|
20
|
+
fps : int, optional
|
|
21
|
+
Frames per second for the video. Default is 10.
|
|
22
|
+
|
|
23
|
+
cleanup_temp : bool, optional
|
|
24
|
+
Whether to clean up temporary JP2 files. Default is True.
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
--------
|
|
28
|
+
str
|
|
29
|
+
Path to the created MP4 file
|
|
30
|
+
"""
|
|
31
|
+
input_path = Path(input_path)
|
|
32
|
+
|
|
33
|
+
# Generate output MP4 path in the same folder
|
|
34
|
+
output_path = input_path.with_suffix(".mp4")
|
|
35
|
+
|
|
36
|
+
# Create a temporary directory for JP2 files
|
|
37
|
+
temp_dir = Path(tempfile.mkdtemp(prefix="tif_to_jp2_"))
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
# Read the TIFF file
|
|
41
|
+
print(f"Reading {input_path}...")
|
|
42
|
+
try:
|
|
43
|
+
# Try using tifffile which handles scientific imaging formats better
|
|
44
|
+
with tifffile.TiffFile(input_path) as tif:
|
|
45
|
+
# Check if it's a multi-page TIFF (Z stack or time series)
|
|
46
|
+
if len(tif.pages) > 1:
|
|
47
|
+
# Read as a stack - this will handle TYX or ZYX format
|
|
48
|
+
stack = tifffile.imread(input_path)
|
|
49
|
+
print(f"Stack shape: {stack.shape}, dtype: {stack.dtype}")
|
|
50
|
+
|
|
51
|
+
# Check dimensions
|
|
52
|
+
if len(stack.shape) == 3:
|
|
53
|
+
# We have a 3D stack (T/Z, Y, X)
|
|
54
|
+
print(f"Detected 3D stack with shape {stack.shape}")
|
|
55
|
+
frames = stack
|
|
56
|
+
is_grayscale = True
|
|
57
|
+
elif len(stack.shape) == 4:
|
|
58
|
+
if stack.shape[3] == 3: # (T/Z, Y, X, 3) - color
|
|
59
|
+
print(
|
|
60
|
+
f"Detected 4D color stack with shape {stack.shape}"
|
|
61
|
+
)
|
|
62
|
+
frames = stack
|
|
63
|
+
is_grayscale = False
|
|
64
|
+
else:
|
|
65
|
+
# We have a 4D stack (likely T, Z, Y, X)
|
|
66
|
+
print(
|
|
67
|
+
f"Detected 4D stack with shape {stack.shape}. Flattening first two dimensions."
|
|
68
|
+
)
|
|
69
|
+
# Flatten first two dimensions
|
|
70
|
+
t_dim, z_dim = stack.shape[0], stack.shape[1]
|
|
71
|
+
height, width = stack.shape[2], stack.shape[3]
|
|
72
|
+
frames = stack.reshape(
|
|
73
|
+
t_dim * z_dim, height, width
|
|
74
|
+
)
|
|
75
|
+
is_grayscale = True
|
|
76
|
+
else:
|
|
77
|
+
raise ValueError(
|
|
78
|
+
f"Unsupported TIFF shape: {stack.shape}"
|
|
79
|
+
)
|
|
80
|
+
else:
|
|
81
|
+
# Single page TIFF
|
|
82
|
+
frame = tifffile.imread(input_path)
|
|
83
|
+
print(f"Detected single frame with shape {frame.shape}")
|
|
84
|
+
if len(frame.shape) == 2: # (Y, X) - grayscale
|
|
85
|
+
frames = np.array([frame])
|
|
86
|
+
is_grayscale = True
|
|
87
|
+
elif (
|
|
88
|
+
len(frame.shape) == 3 and frame.shape[2] == 3
|
|
89
|
+
): # (Y, X, 3) - color
|
|
90
|
+
frames = np.array([frame])
|
|
91
|
+
is_grayscale = False
|
|
92
|
+
else:
|
|
93
|
+
raise ValueError(
|
|
94
|
+
f"Unsupported frame shape: {frame.shape}"
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
# Print min/max/mean values to help diagnose
|
|
98
|
+
sample_frame = frames[0]
|
|
99
|
+
print(
|
|
100
|
+
f"Sample frame - min: {np.min(sample_frame)}, max: {np.max(sample_frame)}, "
|
|
101
|
+
f"mean: {np.mean(sample_frame):.2f}, dtype: {sample_frame.dtype}"
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
except (
|
|
105
|
+
OSError,
|
|
106
|
+
tifffile.TiffFileError,
|
|
107
|
+
ValueError,
|
|
108
|
+
FileNotFoundError,
|
|
109
|
+
MemoryError,
|
|
110
|
+
) as e:
|
|
111
|
+
print(f"Error reading with tifffile: {e}")
|
|
112
|
+
print("Falling back to OpenCV...")
|
|
113
|
+
|
|
114
|
+
# Try with OpenCV as fallback
|
|
115
|
+
cap = cv2.VideoCapture(str(input_path))
|
|
116
|
+
if not cap.isOpened():
|
|
117
|
+
raise ValueError(
|
|
118
|
+
f"Could not open file {input_path} with either tifffile or OpenCV"
|
|
119
|
+
) from e
|
|
120
|
+
|
|
121
|
+
frames = []
|
|
122
|
+
while True:
|
|
123
|
+
ret, frame = cap.read()
|
|
124
|
+
if not ret:
|
|
125
|
+
break
|
|
126
|
+
frames.append(frame)
|
|
127
|
+
|
|
128
|
+
frames = np.array(frames)
|
|
129
|
+
is_grayscale = len(frames[0].shape) == 2 or frames[0].shape[2] == 1
|
|
130
|
+
cap.release()
|
|
131
|
+
|
|
132
|
+
# Get the number of frames
|
|
133
|
+
num_frames = len(frames)
|
|
134
|
+
print(f"Processing {num_frames} frames...")
|
|
135
|
+
|
|
136
|
+
# Check if ffmpeg is available
|
|
137
|
+
if not shutil.which("ffmpeg"):
|
|
138
|
+
raise RuntimeError("FFmpeg is required but was not found.")
|
|
139
|
+
|
|
140
|
+
# Process each frame and save as lossless JP2
|
|
141
|
+
jp2_paths = []
|
|
142
|
+
|
|
143
|
+
for i in range(num_frames):
|
|
144
|
+
# Get the frame
|
|
145
|
+
frame = frames[i].copy()
|
|
146
|
+
|
|
147
|
+
# For analysis and debugging
|
|
148
|
+
if i == 0 or i == num_frames - 1:
|
|
149
|
+
print(f"Frame {i} shape: {frame.shape}, dtype: {frame.dtype}")
|
|
150
|
+
print(
|
|
151
|
+
f"Frame {i} stats - min: {np.min(frame)}, max: {np.max(frame)}, mean: {np.mean(frame):.2f}"
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
# Improved handling for float32 and other types - prioritize conversion to uint8
|
|
155
|
+
if frame.dtype != np.uint8:
|
|
156
|
+
# Get actual data range
|
|
157
|
+
min_val, max_val = np.min(frame), np.max(frame)
|
|
158
|
+
|
|
159
|
+
# For float32 and other types, convert directly to uint8
|
|
160
|
+
if (
|
|
161
|
+
np.issubdtype(frame.dtype, np.floating)
|
|
162
|
+
or min_val < max_val
|
|
163
|
+
):
|
|
164
|
+
# Scale to full uint8 range [0, 255] with proper handling of min/max
|
|
165
|
+
frame = np.clip(
|
|
166
|
+
(frame - min_val)
|
|
167
|
+
* 255.0
|
|
168
|
+
/ (max_val - min_val + 1e-10),
|
|
169
|
+
0,
|
|
170
|
+
255,
|
|
171
|
+
).astype(np.uint8)
|
|
172
|
+
else:
|
|
173
|
+
# If min equals max (constant image), create a mid-gray image
|
|
174
|
+
frame = np.full_like(frame, 128, dtype=np.uint8)
|
|
175
|
+
|
|
176
|
+
# Report conversion stats for debugging
|
|
177
|
+
if i == 0 or i == num_frames - 1:
|
|
178
|
+
print(
|
|
179
|
+
f"After conversion - min: {np.min(frame)}, max: {np.max(frame)}, "
|
|
180
|
+
f"mean: {np.mean(frame):.2f}, dtype: {frame.dtype}"
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
# Convert grayscale to RGB if needed for compatibility
|
|
184
|
+
if is_grayscale and len(frame.shape) == 2:
|
|
185
|
+
# For uint8, we can use cv2.cvtColor
|
|
186
|
+
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
|
|
187
|
+
else:
|
|
188
|
+
rgb_frame = frame
|
|
189
|
+
|
|
190
|
+
# Save frame as intermediate PNG
|
|
191
|
+
png_path = temp_dir / f"frame_{i:06d}.png"
|
|
192
|
+
cv2.imwrite(str(png_path), rgb_frame)
|
|
193
|
+
|
|
194
|
+
# Use FFmpeg to convert PNG to lossless JPEG2000
|
|
195
|
+
jp2_path = temp_dir / f"frame_{i:06d}.jp2"
|
|
196
|
+
jp2_paths.append(jp2_path)
|
|
197
|
+
|
|
198
|
+
# FFmpeg command for lossless JP2 conversion
|
|
199
|
+
cmd = [
|
|
200
|
+
"ffmpeg",
|
|
201
|
+
"-y",
|
|
202
|
+
"-i",
|
|
203
|
+
str(png_path),
|
|
204
|
+
"-codec",
|
|
205
|
+
"jpeg2000",
|
|
206
|
+
"-vf",
|
|
207
|
+
"pad=ceil(iw/2)*2:ceil(ih/2)*2", # width and height are required to be even numbers
|
|
208
|
+
"-pix_fmt",
|
|
209
|
+
(
|
|
210
|
+
"rgb24"
|
|
211
|
+
if not is_grayscale or len(rgb_frame.shape) == 3
|
|
212
|
+
else "gray"
|
|
213
|
+
),
|
|
214
|
+
"-compression_level",
|
|
215
|
+
"0", # Lossless setting
|
|
216
|
+
str(jp2_path),
|
|
217
|
+
]
|
|
218
|
+
|
|
219
|
+
try:
|
|
220
|
+
subprocess.run(
|
|
221
|
+
cmd,
|
|
222
|
+
check=True,
|
|
223
|
+
capture_output=True,
|
|
224
|
+
)
|
|
225
|
+
except subprocess.CalledProcessError as e:
|
|
226
|
+
print(
|
|
227
|
+
f"FFmpeg JP2 encoding error: {e.stderr.decode() if e.stderr else 'Unknown error'}"
|
|
228
|
+
)
|
|
229
|
+
# Fallback to PNG if JP2 encoding fails
|
|
230
|
+
print(f"Falling back to PNG for frame {i}")
|
|
231
|
+
jp2_paths[-1] = png_path
|
|
232
|
+
|
|
233
|
+
# Delete the PNG file if JP2 was successful and not the same as fallback
|
|
234
|
+
if jp2_paths[-1] != png_path and png_path.exists():
|
|
235
|
+
png_path.unlink()
|
|
236
|
+
|
|
237
|
+
# Report progress
|
|
238
|
+
if (i + 1) % 50 == 0 or i == 0 or i == num_frames - 1:
|
|
239
|
+
print(f"Processed {i+1}/{num_frames} frames")
|
|
240
|
+
|
|
241
|
+
# Use FFmpeg to create MP4 from JP2/PNG frames
|
|
242
|
+
print(f"Creating MP4 file from {len(jp2_paths)} frames...")
|
|
243
|
+
|
|
244
|
+
# Get the extension of the first frame to determine input pattern
|
|
245
|
+
ext = jp2_paths[0].suffix
|
|
246
|
+
|
|
247
|
+
cmd = [
|
|
248
|
+
"ffmpeg",
|
|
249
|
+
"-framerate",
|
|
250
|
+
str(fps),
|
|
251
|
+
"-i",
|
|
252
|
+
str(temp_dir / f"frame_%06d{ext}"),
|
|
253
|
+
"-c:v",
|
|
254
|
+
"libx264",
|
|
255
|
+
"-profile:v",
|
|
256
|
+
"high",
|
|
257
|
+
"-crf",
|
|
258
|
+
"17", # High quality
|
|
259
|
+
"-pix_fmt",
|
|
260
|
+
"yuv420p", # Compatible colorspace
|
|
261
|
+
"-y",
|
|
262
|
+
str(output_path),
|
|
263
|
+
]
|
|
264
|
+
|
|
265
|
+
try:
|
|
266
|
+
subprocess.run(cmd, check=True, capture_output=True)
|
|
267
|
+
print(f"Successfully created MP4: {output_path}")
|
|
268
|
+
except subprocess.CalledProcessError as e:
|
|
269
|
+
print(
|
|
270
|
+
f"FFmpeg MP4 creation error: {e.stderr.decode() if e.stderr else 'Unknown error'}"
|
|
271
|
+
)
|
|
272
|
+
raise
|
|
273
|
+
|
|
274
|
+
return str(output_path)
|
|
275
|
+
|
|
276
|
+
finally:
|
|
277
|
+
# Clean up temporary directory
|
|
278
|
+
if cleanup_temp:
|
|
279
|
+
shutil.rmtree(temp_dir)
|
|
280
|
+
else:
|
|
281
|
+
print(f"Temporary files saved in: {temp_dir}")
|
|
282
|
+
|
|
283
|
+
return str(output_path)
|