napari-tmidas 0.2.0__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- napari_tmidas/_crop_anything.py +1895 -608
- napari_tmidas/_file_selector.py +87 -6
- napari_tmidas/_version.py +2 -2
- napari_tmidas/processing_functions/basic.py +494 -23
- napari_tmidas/processing_functions/careamics_denoising.py +324 -0
- napari_tmidas/processing_functions/careamics_env_manager.py +339 -0
- napari_tmidas/processing_functions/cellpose_env_manager.py +55 -20
- napari_tmidas/processing_functions/cellpose_segmentation.py +105 -218
- napari_tmidas/processing_functions/sam2_mp4.py +283 -0
- napari_tmidas/processing_functions/skimage_filters.py +31 -1
- napari_tmidas/processing_functions/timepoint_merger.py +490 -0
- napari_tmidas/processing_functions/trackastra_tracking.py +303 -0
- {napari_tmidas-0.2.0.dist-info → napari_tmidas-0.2.1.dist-info}/METADATA +15 -8
- {napari_tmidas-0.2.0.dist-info → napari_tmidas-0.2.1.dist-info}/RECORD +18 -13
- {napari_tmidas-0.2.0.dist-info → napari_tmidas-0.2.1.dist-info}/WHEEL +1 -1
- {napari_tmidas-0.2.0.dist-info → napari_tmidas-0.2.1.dist-info}/entry_points.txt +0 -0
- {napari_tmidas-0.2.0.dist-info → napari_tmidas-0.2.1.dist-info}/licenses/LICENSE +0 -0
- {napari_tmidas-0.2.0.dist-info → napari_tmidas-0.2.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,283 @@
|
|
|
1
|
+
import shutil
|
|
2
|
+
import subprocess
|
|
3
|
+
import tempfile
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
import cv2
|
|
7
|
+
import numpy as np
|
|
8
|
+
import tifffile
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def tif_to_mp4(input_path, fps=10, cleanup_temp=True):
|
|
12
|
+
"""
|
|
13
|
+
Convert a TIF stack to MP4 using JPEG2000 lossless as an intermediate format.
|
|
14
|
+
|
|
15
|
+
Parameters:
|
|
16
|
+
-----------
|
|
17
|
+
input_path : str or Path
|
|
18
|
+
Path to the input TIF file
|
|
19
|
+
|
|
20
|
+
fps : int, optional
|
|
21
|
+
Frames per second for the video. Default is 10.
|
|
22
|
+
|
|
23
|
+
cleanup_temp : bool, optional
|
|
24
|
+
Whether to clean up temporary JP2 files. Default is True.
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
--------
|
|
28
|
+
str
|
|
29
|
+
Path to the created MP4 file
|
|
30
|
+
"""
|
|
31
|
+
input_path = Path(input_path)
|
|
32
|
+
|
|
33
|
+
# Generate output MP4 path in the same folder
|
|
34
|
+
output_path = input_path.with_suffix(".mp4")
|
|
35
|
+
|
|
36
|
+
# Create a temporary directory for JP2 files
|
|
37
|
+
temp_dir = Path(tempfile.mkdtemp(prefix="tif_to_jp2_"))
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
# Read the TIFF file
|
|
41
|
+
print(f"Reading {input_path}...")
|
|
42
|
+
try:
|
|
43
|
+
# Try using tifffile which handles scientific imaging formats better
|
|
44
|
+
with tifffile.TiffFile(input_path) as tif:
|
|
45
|
+
# Check if it's a multi-page TIFF (Z stack or time series)
|
|
46
|
+
if len(tif.pages) > 1:
|
|
47
|
+
# Read as a stack - this will handle TYX or ZYX format
|
|
48
|
+
stack = tifffile.imread(input_path)
|
|
49
|
+
print(f"Stack shape: {stack.shape}, dtype: {stack.dtype}")
|
|
50
|
+
|
|
51
|
+
# Check dimensions
|
|
52
|
+
if len(stack.shape) == 3:
|
|
53
|
+
# We have a 3D stack (T/Z, Y, X)
|
|
54
|
+
print(f"Detected 3D stack with shape {stack.shape}")
|
|
55
|
+
frames = stack
|
|
56
|
+
is_grayscale = True
|
|
57
|
+
elif len(stack.shape) == 4:
|
|
58
|
+
if stack.shape[3] == 3: # (T/Z, Y, X, 3) - color
|
|
59
|
+
print(
|
|
60
|
+
f"Detected 4D color stack with shape {stack.shape}"
|
|
61
|
+
)
|
|
62
|
+
frames = stack
|
|
63
|
+
is_grayscale = False
|
|
64
|
+
else:
|
|
65
|
+
# We have a 4D stack (likely T, Z, Y, X)
|
|
66
|
+
print(
|
|
67
|
+
f"Detected 4D stack with shape {stack.shape}. Flattening first two dimensions."
|
|
68
|
+
)
|
|
69
|
+
# Flatten first two dimensions
|
|
70
|
+
t_dim, z_dim = stack.shape[0], stack.shape[1]
|
|
71
|
+
height, width = stack.shape[2], stack.shape[3]
|
|
72
|
+
frames = stack.reshape(
|
|
73
|
+
t_dim * z_dim, height, width
|
|
74
|
+
)
|
|
75
|
+
is_grayscale = True
|
|
76
|
+
else:
|
|
77
|
+
raise ValueError(
|
|
78
|
+
f"Unsupported TIFF shape: {stack.shape}"
|
|
79
|
+
)
|
|
80
|
+
else:
|
|
81
|
+
# Single page TIFF
|
|
82
|
+
frame = tifffile.imread(input_path)
|
|
83
|
+
print(f"Detected single frame with shape {frame.shape}")
|
|
84
|
+
if len(frame.shape) == 2: # (Y, X) - grayscale
|
|
85
|
+
frames = np.array([frame])
|
|
86
|
+
is_grayscale = True
|
|
87
|
+
elif (
|
|
88
|
+
len(frame.shape) == 3 and frame.shape[2] == 3
|
|
89
|
+
): # (Y, X, 3) - color
|
|
90
|
+
frames = np.array([frame])
|
|
91
|
+
is_grayscale = False
|
|
92
|
+
else:
|
|
93
|
+
raise ValueError(
|
|
94
|
+
f"Unsupported frame shape: {frame.shape}"
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
# Print min/max/mean values to help diagnose
|
|
98
|
+
sample_frame = frames[0]
|
|
99
|
+
print(
|
|
100
|
+
f"Sample frame - min: {np.min(sample_frame)}, max: {np.max(sample_frame)}, "
|
|
101
|
+
f"mean: {np.mean(sample_frame):.2f}, dtype: {sample_frame.dtype}"
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
except (
|
|
105
|
+
OSError,
|
|
106
|
+
tifffile.TiffFileError,
|
|
107
|
+
ValueError,
|
|
108
|
+
FileNotFoundError,
|
|
109
|
+
MemoryError,
|
|
110
|
+
) as e:
|
|
111
|
+
print(f"Error reading with tifffile: {e}")
|
|
112
|
+
print("Falling back to OpenCV...")
|
|
113
|
+
|
|
114
|
+
# Try with OpenCV as fallback
|
|
115
|
+
cap = cv2.VideoCapture(str(input_path))
|
|
116
|
+
if not cap.isOpened():
|
|
117
|
+
raise ValueError(
|
|
118
|
+
f"Could not open file {input_path} with either tifffile or OpenCV"
|
|
119
|
+
) from e
|
|
120
|
+
|
|
121
|
+
frames = []
|
|
122
|
+
while True:
|
|
123
|
+
ret, frame = cap.read()
|
|
124
|
+
if not ret:
|
|
125
|
+
break
|
|
126
|
+
frames.append(frame)
|
|
127
|
+
|
|
128
|
+
frames = np.array(frames)
|
|
129
|
+
is_grayscale = len(frames[0].shape) == 2 or frames[0].shape[2] == 1
|
|
130
|
+
cap.release()
|
|
131
|
+
|
|
132
|
+
# Get the number of frames
|
|
133
|
+
num_frames = len(frames)
|
|
134
|
+
print(f"Processing {num_frames} frames...")
|
|
135
|
+
|
|
136
|
+
# Check if ffmpeg is available
|
|
137
|
+
if not shutil.which("ffmpeg"):
|
|
138
|
+
raise RuntimeError("FFmpeg is required but was not found.")
|
|
139
|
+
|
|
140
|
+
# Process each frame and save as lossless JP2
|
|
141
|
+
jp2_paths = []
|
|
142
|
+
|
|
143
|
+
for i in range(num_frames):
|
|
144
|
+
# Get the frame
|
|
145
|
+
frame = frames[i].copy()
|
|
146
|
+
|
|
147
|
+
# For analysis and debugging
|
|
148
|
+
if i == 0 or i == num_frames - 1:
|
|
149
|
+
print(f"Frame {i} shape: {frame.shape}, dtype: {frame.dtype}")
|
|
150
|
+
print(
|
|
151
|
+
f"Frame {i} stats - min: {np.min(frame)}, max: {np.max(frame)}, mean: {np.mean(frame):.2f}"
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
# Improved handling for float32 and other types - prioritize conversion to uint8
|
|
155
|
+
if frame.dtype != np.uint8:
|
|
156
|
+
# Get actual data range
|
|
157
|
+
min_val, max_val = np.min(frame), np.max(frame)
|
|
158
|
+
|
|
159
|
+
# For float32 and other types, convert directly to uint8
|
|
160
|
+
if (
|
|
161
|
+
np.issubdtype(frame.dtype, np.floating)
|
|
162
|
+
or min_val < max_val
|
|
163
|
+
):
|
|
164
|
+
# Scale to full uint8 range [0, 255] with proper handling of min/max
|
|
165
|
+
frame = np.clip(
|
|
166
|
+
(frame - min_val)
|
|
167
|
+
* 255.0
|
|
168
|
+
/ (max_val - min_val + 1e-10),
|
|
169
|
+
0,
|
|
170
|
+
255,
|
|
171
|
+
).astype(np.uint8)
|
|
172
|
+
else:
|
|
173
|
+
# If min equals max (constant image), create a mid-gray image
|
|
174
|
+
frame = np.full_like(frame, 128, dtype=np.uint8)
|
|
175
|
+
|
|
176
|
+
# Report conversion stats for debugging
|
|
177
|
+
if i == 0 or i == num_frames - 1:
|
|
178
|
+
print(
|
|
179
|
+
f"After conversion - min: {np.min(frame)}, max: {np.max(frame)}, "
|
|
180
|
+
f"mean: {np.mean(frame):.2f}, dtype: {frame.dtype}"
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
# Convert grayscale to RGB if needed for compatibility
|
|
184
|
+
if is_grayscale and len(frame.shape) == 2:
|
|
185
|
+
# For uint8, we can use cv2.cvtColor
|
|
186
|
+
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
|
|
187
|
+
else:
|
|
188
|
+
rgb_frame = frame
|
|
189
|
+
|
|
190
|
+
# Save frame as intermediate PNG
|
|
191
|
+
png_path = temp_dir / f"frame_{i:06d}.png"
|
|
192
|
+
cv2.imwrite(str(png_path), rgb_frame)
|
|
193
|
+
|
|
194
|
+
# Use FFmpeg to convert PNG to lossless JPEG2000
|
|
195
|
+
jp2_path = temp_dir / f"frame_{i:06d}.jp2"
|
|
196
|
+
jp2_paths.append(jp2_path)
|
|
197
|
+
|
|
198
|
+
# FFmpeg command for lossless JP2 conversion
|
|
199
|
+
cmd = [
|
|
200
|
+
"ffmpeg",
|
|
201
|
+
"-y",
|
|
202
|
+
"-i",
|
|
203
|
+
str(png_path),
|
|
204
|
+
"-codec",
|
|
205
|
+
"jpeg2000",
|
|
206
|
+
"-vf",
|
|
207
|
+
"pad=ceil(iw/2)*2:ceil(ih/2)*2", # width and height are required to be even numbers
|
|
208
|
+
"-pix_fmt",
|
|
209
|
+
(
|
|
210
|
+
"rgb24"
|
|
211
|
+
if not is_grayscale or len(rgb_frame.shape) == 3
|
|
212
|
+
else "gray"
|
|
213
|
+
),
|
|
214
|
+
"-compression_level",
|
|
215
|
+
"0", # Lossless setting
|
|
216
|
+
str(jp2_path),
|
|
217
|
+
]
|
|
218
|
+
|
|
219
|
+
try:
|
|
220
|
+
subprocess.run(
|
|
221
|
+
cmd,
|
|
222
|
+
check=True,
|
|
223
|
+
capture_output=True,
|
|
224
|
+
)
|
|
225
|
+
except subprocess.CalledProcessError as e:
|
|
226
|
+
print(
|
|
227
|
+
f"FFmpeg JP2 encoding error: {e.stderr.decode() if e.stderr else 'Unknown error'}"
|
|
228
|
+
)
|
|
229
|
+
# Fallback to PNG if JP2 encoding fails
|
|
230
|
+
print(f"Falling back to PNG for frame {i}")
|
|
231
|
+
jp2_paths[-1] = png_path
|
|
232
|
+
|
|
233
|
+
# Delete the PNG file if JP2 was successful and not the same as fallback
|
|
234
|
+
if jp2_paths[-1] != png_path and png_path.exists():
|
|
235
|
+
png_path.unlink()
|
|
236
|
+
|
|
237
|
+
# Report progress
|
|
238
|
+
if (i + 1) % 50 == 0 or i == 0 or i == num_frames - 1:
|
|
239
|
+
print(f"Processed {i+1}/{num_frames} frames")
|
|
240
|
+
|
|
241
|
+
# Use FFmpeg to create MP4 from JP2/PNG frames
|
|
242
|
+
print(f"Creating MP4 file from {len(jp2_paths)} frames...")
|
|
243
|
+
|
|
244
|
+
# Get the extension of the first frame to determine input pattern
|
|
245
|
+
ext = jp2_paths[0].suffix
|
|
246
|
+
|
|
247
|
+
cmd = [
|
|
248
|
+
"ffmpeg",
|
|
249
|
+
"-framerate",
|
|
250
|
+
str(fps),
|
|
251
|
+
"-i",
|
|
252
|
+
str(temp_dir / f"frame_%06d{ext}"),
|
|
253
|
+
"-c:v",
|
|
254
|
+
"libx264",
|
|
255
|
+
"-profile:v",
|
|
256
|
+
"high",
|
|
257
|
+
"-crf",
|
|
258
|
+
"17", # High quality
|
|
259
|
+
"-pix_fmt",
|
|
260
|
+
"yuv420p", # Compatible colorspace
|
|
261
|
+
"-y",
|
|
262
|
+
str(output_path),
|
|
263
|
+
]
|
|
264
|
+
|
|
265
|
+
try:
|
|
266
|
+
subprocess.run(cmd, check=True, capture_output=True)
|
|
267
|
+
print(f"Successfully created MP4: {output_path}")
|
|
268
|
+
except subprocess.CalledProcessError as e:
|
|
269
|
+
print(
|
|
270
|
+
f"FFmpeg MP4 creation error: {e.stderr.decode() if e.stderr else 'Unknown error'}"
|
|
271
|
+
)
|
|
272
|
+
raise
|
|
273
|
+
|
|
274
|
+
return str(output_path)
|
|
275
|
+
|
|
276
|
+
finally:
|
|
277
|
+
# Clean up temporary directory
|
|
278
|
+
if cleanup_temp:
|
|
279
|
+
shutil.rmtree(temp_dir)
|
|
280
|
+
else:
|
|
281
|
+
print(f"Temporary files saved in: {temp_dir}")
|
|
282
|
+
|
|
283
|
+
return str(output_path)
|
|
@@ -282,7 +282,7 @@ if SKIMAGE_AVAILABLE:
|
|
|
282
282
|
|
|
283
283
|
# Get region properties
|
|
284
284
|
if intensity_image:
|
|
285
|
-
# Use the same image as both label and intensity image
|
|
285
|
+
# Use the same image as both label and intensity image # this is wrong
|
|
286
286
|
regions = skimage.measure.regionprops(
|
|
287
287
|
label_image, intensity_image=image
|
|
288
288
|
)
|
|
@@ -425,3 +425,33 @@ def binary_to_labels(image: np.ndarray) -> np.ndarray:
|
|
|
425
425
|
label_image = skimage.measure.label(label_image, connectivity=2)
|
|
426
426
|
|
|
427
427
|
return label_image
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
@BatchProcessingRegistry.register(
|
|
431
|
+
name="Convert to 8-bit (uint8)",
|
|
432
|
+
suffix="_uint8",
|
|
433
|
+
description="Convert image data to 8-bit (uint8) format with proper scaling",
|
|
434
|
+
)
|
|
435
|
+
def convert_to_uint8(image: np.ndarray) -> np.ndarray:
|
|
436
|
+
"""
|
|
437
|
+
Convert image data to 8-bit (uint8) format with proper scaling.
|
|
438
|
+
|
|
439
|
+
This function handles any input image dimensions (including TZYX) and properly
|
|
440
|
+
rescales data to the 0-1 range before conversion to uint8. Ideal for scientific
|
|
441
|
+
imaging data with arbitrary value ranges.
|
|
442
|
+
|
|
443
|
+
Parameters:
|
|
444
|
+
-----------
|
|
445
|
+
image : numpy.ndarray
|
|
446
|
+
Input image array of any numerical dtype
|
|
447
|
+
|
|
448
|
+
Returns:
|
|
449
|
+
--------
|
|
450
|
+
numpy.ndarray
|
|
451
|
+
8-bit image with shape preserved and values properly scaled
|
|
452
|
+
"""
|
|
453
|
+
# Rescale to 0-1 range (works for any input range, negative or positive)
|
|
454
|
+
img_rescaled = skimage.exposure.rescale_intensity(image, out_range=(0, 1))
|
|
455
|
+
|
|
456
|
+
# Convert the rescaled image to uint8
|
|
457
|
+
return skimage.img_as_ubyte(img_rescaled)
|