napari-tmidas 0.2.1__py3-none-any.whl → 0.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- napari_tmidas/__init__.py +35 -5
- napari_tmidas/_crop_anything.py +1458 -499
- napari_tmidas/_env_manager.py +76 -0
- napari_tmidas/_file_conversion.py +1646 -1131
- napari_tmidas/_file_selector.py +1464 -223
- napari_tmidas/_label_inspection.py +83 -8
- napari_tmidas/_processing_worker.py +309 -0
- napari_tmidas/_reader.py +6 -10
- napari_tmidas/_registry.py +15 -14
- napari_tmidas/_roi_colocalization.py +1221 -84
- napari_tmidas/_tests/test_crop_anything.py +123 -0
- napari_tmidas/_tests/test_env_manager.py +89 -0
- napari_tmidas/_tests/test_file_selector.py +90 -0
- napari_tmidas/_tests/test_grid_view_overlay.py +193 -0
- napari_tmidas/_tests/test_init.py +98 -0
- napari_tmidas/_tests/test_intensity_label_filter.py +222 -0
- napari_tmidas/_tests/test_label_inspection.py +86 -0
- napari_tmidas/_tests/test_processing_basic.py +500 -0
- napari_tmidas/_tests/test_processing_worker.py +142 -0
- napari_tmidas/_tests/test_regionprops_analysis.py +547 -0
- napari_tmidas/_tests/test_registry.py +135 -0
- napari_tmidas/_tests/test_scipy_filters.py +168 -0
- napari_tmidas/_tests/test_skimage_filters.py +259 -0
- napari_tmidas/_tests/test_split_channels.py +217 -0
- napari_tmidas/_tests/test_spotiflow.py +87 -0
- napari_tmidas/_tests/test_tyx_display_fix.py +142 -0
- napari_tmidas/_tests/test_ui_utils.py +68 -0
- napari_tmidas/_tests/test_widget.py +30 -0
- napari_tmidas/_tests/test_windows_basic.py +66 -0
- napari_tmidas/_ui_utils.py +57 -0
- napari_tmidas/_version.py +16 -3
- napari_tmidas/_widget.py +41 -4
- napari_tmidas/processing_functions/basic.py +557 -20
- napari_tmidas/processing_functions/careamics_env_manager.py +72 -99
- napari_tmidas/processing_functions/cellpose_env_manager.py +415 -112
- napari_tmidas/processing_functions/cellpose_segmentation.py +132 -191
- napari_tmidas/processing_functions/colocalization.py +513 -56
- napari_tmidas/processing_functions/grid_view_overlay.py +703 -0
- napari_tmidas/processing_functions/intensity_label_filter.py +422 -0
- napari_tmidas/processing_functions/regionprops_analysis.py +1280 -0
- napari_tmidas/processing_functions/sam2_env_manager.py +53 -69
- napari_tmidas/processing_functions/sam2_mp4.py +274 -195
- napari_tmidas/processing_functions/scipy_filters.py +403 -8
- napari_tmidas/processing_functions/skimage_filters.py +424 -212
- napari_tmidas/processing_functions/spotiflow_detection.py +949 -0
- napari_tmidas/processing_functions/spotiflow_env_manager.py +591 -0
- napari_tmidas/processing_functions/timepoint_merger.py +334 -86
- napari_tmidas/processing_functions/trackastra_tracking.py +24 -5
- {napari_tmidas-0.2.1.dist-info → napari_tmidas-0.2.4.dist-info}/METADATA +92 -39
- napari_tmidas-0.2.4.dist-info/RECORD +63 -0
- napari_tmidas/_tests/__init__.py +0 -0
- napari_tmidas-0.2.1.dist-info/RECORD +0 -38
- {napari_tmidas-0.2.1.dist-info → napari_tmidas-0.2.4.dist-info}/WHEEL +0 -0
- {napari_tmidas-0.2.1.dist-info → napari_tmidas-0.2.4.dist-info}/entry_points.txt +0 -0
- {napari_tmidas-0.2.1.dist-info → napari_tmidas-0.2.4.dist-info}/licenses/LICENSE +0 -0
- {napari_tmidas-0.2.1.dist-info → napari_tmidas-0.2.4.dist-info}/top_level.txt +0 -0
|
@@ -20,6 +20,23 @@ from skimage.io import imread
|
|
|
20
20
|
|
|
21
21
|
from napari_tmidas._registry import BatchProcessingRegistry
|
|
22
22
|
|
|
23
|
+
# Global set to track which folders have been processed in the current session
|
|
24
|
+
# This prevents redundant processing when the function is called for each file
|
|
25
|
+
_PROCESSED_FOLDERS = set()
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def reset_timepoint_merger_cache():
|
|
29
|
+
"""
|
|
30
|
+
Reset the cache of processed folders.
|
|
31
|
+
|
|
32
|
+
Call this function if you want to reprocess folders that were already
|
|
33
|
+
processed in the current session. This is automatically managed during
|
|
34
|
+
normal batch processing, but can be called manually if needed.
|
|
35
|
+
"""
|
|
36
|
+
global _PROCESSED_FOLDERS
|
|
37
|
+
_PROCESSED_FOLDERS.clear()
|
|
38
|
+
print("🔄 Timepoint merger cache cleared")
|
|
39
|
+
|
|
23
40
|
|
|
24
41
|
def natural_sort_key(filename: str) -> List:
|
|
25
42
|
"""
|
|
@@ -74,7 +91,9 @@ def find_timepoint_images(
|
|
|
74
91
|
return image_files
|
|
75
92
|
|
|
76
93
|
|
|
77
|
-
def load_and_validate_images(
|
|
94
|
+
def load_and_validate_images(
|
|
95
|
+
image_files: List[str], dimension_order: str = "auto"
|
|
96
|
+
) -> Tuple[np.ndarray, str]:
|
|
78
97
|
"""
|
|
79
98
|
Load all images and validate they have consistent dimensions.
|
|
80
99
|
|
|
@@ -82,6 +101,8 @@ def load_and_validate_images(image_files: List[str]) -> Tuple[np.ndarray, str]:
|
|
|
82
101
|
-----------
|
|
83
102
|
image_files : List[str]
|
|
84
103
|
List of image file paths
|
|
104
|
+
dimension_order : str
|
|
105
|
+
Dimension order of input files: "auto", "YX", "ZYX", "CYX", "CZYX", or "TZYX"
|
|
85
106
|
|
|
86
107
|
Returns:
|
|
87
108
|
--------
|
|
@@ -97,70 +118,208 @@ def load_and_validate_images(image_files: List[str]) -> Tuple[np.ndarray, str]:
|
|
|
97
118
|
)
|
|
98
119
|
|
|
99
120
|
# Determine dimension order
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
121
|
+
is_4d_input = False
|
|
122
|
+
ndim = len(first_image.shape)
|
|
123
|
+
|
|
124
|
+
if dimension_order == "auto":
|
|
125
|
+
# Auto-detect based on shape
|
|
126
|
+
if ndim == 2:
|
|
127
|
+
# 2D image (YX)
|
|
128
|
+
detected_order = "YX"
|
|
129
|
+
output_order = "TYX"
|
|
130
|
+
elif ndim == 3:
|
|
131
|
+
# 3D image - assume ZYX (could also be CYX but we can't tell)
|
|
132
|
+
detected_order = "ZYX"
|
|
133
|
+
output_order = "TZYX"
|
|
134
|
+
print("⚠️ 3D images detected - assuming ZYX (Z-stack)")
|
|
135
|
+
print(
|
|
136
|
+
" If this is CYX (color channels), set dimension_order='CYX'"
|
|
137
|
+
)
|
|
138
|
+
elif ndim == 4:
|
|
139
|
+
# 4D image - assume TZYX
|
|
140
|
+
detected_order = "TZYX"
|
|
141
|
+
output_order = "TZYX"
|
|
142
|
+
is_4d_input = True
|
|
143
|
+
print("⚠️ 4D images detected - assuming TZYX (time series)")
|
|
144
|
+
print(
|
|
145
|
+
" If this is CZYX (color Z-stack), set dimension_order='CZYX'"
|
|
146
|
+
)
|
|
147
|
+
else:
|
|
148
|
+
raise ValueError(
|
|
149
|
+
f"Unsupported image dimensionality: {first_image.shape}"
|
|
150
|
+
)
|
|
108
151
|
else:
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
)
|
|
152
|
+
# User specified the dimension order
|
|
153
|
+
detected_order = dimension_order.upper()
|
|
154
|
+
print(f"Using specified dimension order: {detected_order}")
|
|
155
|
+
|
|
156
|
+
# Validate the specified order matches the image shape
|
|
157
|
+
expected_ndim = len(detected_order)
|
|
158
|
+
if ndim != expected_ndim:
|
|
159
|
+
raise ValueError(
|
|
160
|
+
f"Dimension order '{detected_order}' expects {expected_ndim}D data, "
|
|
161
|
+
f"but images have shape {first_image.shape} ({ndim}D)"
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
# Determine output order based on input
|
|
165
|
+
if detected_order == "YX":
|
|
166
|
+
output_order = "TYX"
|
|
167
|
+
elif detected_order in ["ZYX", "CYX"]:
|
|
168
|
+
output_order = "T" + detected_order # TZYX or TCYX
|
|
169
|
+
elif detected_order in ["CZYX", "TZYX"]:
|
|
170
|
+
output_order = (
|
|
171
|
+
"T" + detected_order
|
|
172
|
+
) # TCZYX or TTZYX (will concatenate along T)
|
|
173
|
+
is_4d_input = True
|
|
174
|
+
else:
|
|
175
|
+
raise ValueError(f"Unsupported dimension order: {detected_order}")
|
|
176
|
+
|
|
177
|
+
expected_shape = first_image.shape
|
|
112
178
|
|
|
113
179
|
# Pre-allocate array for all timepoints
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
180
|
+
if is_4d_input:
|
|
181
|
+
# For 4D input, we concatenate along time axis
|
|
182
|
+
if detected_order == "TZYX":
|
|
183
|
+
# Total timepoints = number of files × timepoints per file
|
|
184
|
+
total_timepoints = len(image_files) * first_image.shape[0]
|
|
185
|
+
stack_shape = (total_timepoints,) + first_image.shape[1:]
|
|
186
|
+
print(f"Concatenating {len(image_files)} files along time axis")
|
|
187
|
+
print(
|
|
188
|
+
f" {len(image_files)} files × {first_image.shape[0]} timepoints = {total_timepoints} total"
|
|
189
|
+
)
|
|
190
|
+
elif detected_order == "CZYX":
|
|
191
|
+
# Treat as single timepoint with color channels
|
|
192
|
+
stack_shape = (len(image_files),) + first_image.shape
|
|
193
|
+
output_order = "TCZYX"
|
|
194
|
+
print("Creating time series of color Z-stacks")
|
|
195
|
+
else:
|
|
196
|
+
raise ValueError(f"Unexpected 4D order: {detected_order}")
|
|
118
197
|
|
|
119
|
-
|
|
120
|
-
|
|
198
|
+
print(
|
|
199
|
+
f"Creating time series with shape: {stack_shape} ({output_order})"
|
|
200
|
+
)
|
|
121
201
|
|
|
122
|
-
|
|
123
|
-
|
|
202
|
+
# Use the same dtype as the first image
|
|
203
|
+
time_series = np.zeros(stack_shape, dtype=first_image.dtype)
|
|
124
204
|
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
205
|
+
# Load all images and concatenate
|
|
206
|
+
if detected_order == "TZYX":
|
|
207
|
+
# Concatenating time series along T axis
|
|
208
|
+
current_t = 0
|
|
209
|
+
time_series[0 : first_image.shape[0]] = first_image
|
|
210
|
+
current_t += first_image.shape[0]
|
|
128
211
|
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
212
|
+
for i, image_file in enumerate(image_files[1:], 1):
|
|
213
|
+
try:
|
|
214
|
+
image = imread(image_file)
|
|
215
|
+
|
|
216
|
+
# Validate shape consistency
|
|
217
|
+
if image.shape != expected_shape:
|
|
218
|
+
raise ValueError(
|
|
219
|
+
f"Image {os.path.basename(image_file)} has shape {image.shape}, "
|
|
220
|
+
f"expected {expected_shape}. All images must have the same dimensions."
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
# Validate dtype consistency
|
|
224
|
+
if image.dtype != first_image.dtype:
|
|
225
|
+
print(
|
|
226
|
+
f"Warning: Converting {os.path.basename(image_file)} from {image.dtype} to {first_image.dtype}"
|
|
227
|
+
)
|
|
228
|
+
image = image.astype(first_image.dtype)
|
|
229
|
+
|
|
230
|
+
# Insert timepoints
|
|
231
|
+
total_timepoints = len(image_files) * first_image.shape[0]
|
|
232
|
+
next_t = current_t + image.shape[0]
|
|
233
|
+
time_series[current_t:next_t] = image
|
|
234
|
+
current_t = next_t
|
|
235
|
+
|
|
236
|
+
if (i + 1) % 10 == 0 or i == len(image_files) - 1:
|
|
237
|
+
print(
|
|
238
|
+
f"Loaded {i + 1}/{len(image_files)} files ({current_t}/{total_timepoints} timepoints)"
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
except Exception as e:
|
|
242
|
+
raise ValueError(
|
|
243
|
+
f"Error loading {image_file}: {str(e)}"
|
|
244
|
+
) from e
|
|
245
|
+
else:
|
|
246
|
+
# CZYX - just stack normally
|
|
247
|
+
time_series[0] = first_image
|
|
135
248
|
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
f"Warning: Converting {os.path.basename(image_file)} from {image.dtype} to {first_image.dtype}"
|
|
140
|
-
)
|
|
141
|
-
image = image.astype(first_image.dtype)
|
|
249
|
+
for i, image_file in enumerate(image_files[1:], 1):
|
|
250
|
+
try:
|
|
251
|
+
image = imread(image_file)
|
|
142
252
|
|
|
143
|
-
|
|
253
|
+
if image.shape != expected_shape:
|
|
254
|
+
raise ValueError(
|
|
255
|
+
f"Image {os.path.basename(image_file)} has shape {image.shape}, "
|
|
256
|
+
f"expected {expected_shape}. All images must have the same dimensions."
|
|
257
|
+
)
|
|
144
258
|
|
|
145
|
-
|
|
146
|
-
|
|
259
|
+
if image.dtype != first_image.dtype:
|
|
260
|
+
print(
|
|
261
|
+
f"Warning: Converting {os.path.basename(image_file)} from {image.dtype} to {first_image.dtype}"
|
|
262
|
+
)
|
|
263
|
+
image = image.astype(first_image.dtype)
|
|
147
264
|
|
|
148
|
-
|
|
149
|
-
raise ValueError(f"Error loading {image_file}: {str(e)}") from e
|
|
265
|
+
time_series[i] = image
|
|
150
266
|
|
|
151
|
-
|
|
152
|
-
|
|
267
|
+
if (i + 1) % 10 == 0 or i == len(image_files) - 1:
|
|
268
|
+
print(f"Loaded {i + 1}/{len(image_files)} files")
|
|
269
|
+
|
|
270
|
+
except Exception as e:
|
|
271
|
+
raise ValueError(
|
|
272
|
+
f"Error loading {image_file}: {str(e)}"
|
|
273
|
+
) from e
|
|
274
|
+
else:
|
|
275
|
+
# For 2D/3D input, add a new time dimension
|
|
276
|
+
stack_shape = (len(image_files),) + expected_shape
|
|
277
|
+
print(
|
|
278
|
+
f"Creating time series with shape: {stack_shape} ({output_order})"
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
# Use the same dtype as the first image
|
|
282
|
+
time_series = np.zeros(stack_shape, dtype=first_image.dtype)
|
|
283
|
+
|
|
284
|
+
# Load all images
|
|
285
|
+
time_series[0] = first_image
|
|
286
|
+
|
|
287
|
+
for i, image_file in enumerate(image_files[1:], 1):
|
|
288
|
+
try:
|
|
289
|
+
image = imread(image_file)
|
|
290
|
+
|
|
291
|
+
# Validate shape consistency
|
|
292
|
+
if image.shape != expected_shape:
|
|
293
|
+
raise ValueError(
|
|
294
|
+
f"Image {os.path.basename(image_file)} has shape {image.shape}, "
|
|
295
|
+
f"expected {expected_shape}. All images must have the same dimensions."
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
# Validate dtype consistency
|
|
299
|
+
if image.dtype != first_image.dtype:
|
|
300
|
+
print(
|
|
301
|
+
f"Warning: Converting {os.path.basename(image_file)} from {image.dtype} to {first_image.dtype}"
|
|
302
|
+
)
|
|
303
|
+
image = image.astype(first_image.dtype)
|
|
153
304
|
|
|
305
|
+
time_series[i] = image
|
|
154
306
|
|
|
155
|
-
|
|
156
|
-
|
|
307
|
+
if (i + 1) % 10 == 0 or i == len(image_files) - 1:
|
|
308
|
+
print(f"Loaded {i + 1}/{len(image_files)} images")
|
|
309
|
+
|
|
310
|
+
except Exception as e:
|
|
311
|
+
raise ValueError(
|
|
312
|
+
f"Error loading {image_file}: {str(e)}"
|
|
313
|
+
) from e
|
|
314
|
+
|
|
315
|
+
print(f"Successfully loaded all {len(image_files)} timepoints")
|
|
316
|
+
return time_series, output_order
|
|
157
317
|
|
|
158
318
|
|
|
159
|
-
# Advanced version with more options
|
|
160
319
|
@BatchProcessingRegistry.register(
|
|
161
320
|
name="Merge Timepoints",
|
|
162
321
|
suffix="_merge_timeseries",
|
|
163
|
-
description="
|
|
322
|
+
description="Merge folder timepoints into time series. Processes each folder ONCE (skips redundant calls). Set thread count to 1!",
|
|
164
323
|
parameters={
|
|
165
324
|
"subsample_factor": {
|
|
166
325
|
"type": int,
|
|
@@ -188,6 +347,17 @@ _processed_folders = set()
|
|
|
188
347
|
"default": False,
|
|
189
348
|
"description": "Use memory-efficient loading for very large datasets",
|
|
190
349
|
},
|
|
350
|
+
"overwrite_existing": {
|
|
351
|
+
"type": bool,
|
|
352
|
+
"default": False,
|
|
353
|
+
"description": "Overwrite existing merged file if it exists",
|
|
354
|
+
},
|
|
355
|
+
"dimension_order": {
|
|
356
|
+
"type": str,
|
|
357
|
+
"default": "auto",
|
|
358
|
+
"choices": ["auto", "YX", "ZYX", "CYX", "CZYX", "TZYX"],
|
|
359
|
+
"description": "Dimension order of input files (auto-detect or specify manually)",
|
|
360
|
+
},
|
|
191
361
|
},
|
|
192
362
|
)
|
|
193
363
|
def merge_timepoint_folder_advanced(
|
|
@@ -196,6 +366,8 @@ def merge_timepoint_folder_advanced(
|
|
|
196
366
|
max_timepoints: int = 0,
|
|
197
367
|
start_timepoint: int = 0,
|
|
198
368
|
memory_efficient: bool = False,
|
|
369
|
+
overwrite_existing: bool = False,
|
|
370
|
+
dimension_order: str = "auto",
|
|
199
371
|
) -> np.ndarray:
|
|
200
372
|
"""
|
|
201
373
|
Advanced timepoint merging with additional options for large datasets.
|
|
@@ -218,14 +390,14 @@ def merge_timepoint_folder_advanced(
|
|
|
218
390
|
Starting timepoint index (0-based)
|
|
219
391
|
memory_efficient : bool
|
|
220
392
|
Use memory-efficient loading (loads images one at a time)
|
|
393
|
+
overwrite_existing : bool
|
|
394
|
+
Overwrite existing merged file if it exists
|
|
221
395
|
|
|
222
396
|
Returns:
|
|
223
397
|
--------
|
|
224
398
|
numpy.ndarray
|
|
225
399
|
Time series array with selected timepoints
|
|
226
400
|
"""
|
|
227
|
-
global _processed_folders
|
|
228
|
-
|
|
229
401
|
# Get folder path and file suffix from batch processing context
|
|
230
402
|
import inspect
|
|
231
403
|
|
|
@@ -256,25 +428,62 @@ def merge_timepoint_folder_advanced(
|
|
|
256
428
|
if input_suffix is None:
|
|
257
429
|
input_suffix = os.path.splitext(current_file)[1]
|
|
258
430
|
|
|
259
|
-
#
|
|
260
|
-
|
|
261
|
-
if
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
431
|
+
# Generate output filename with parameters in the name for uniqueness
|
|
432
|
+
param_suffix = ""
|
|
433
|
+
if subsample_factor > 1:
|
|
434
|
+
param_suffix += f"_sub{subsample_factor}"
|
|
435
|
+
if start_timepoint > 0:
|
|
436
|
+
param_suffix += f"_start{start_timepoint}"
|
|
437
|
+
if max_timepoints > 0:
|
|
438
|
+
param_suffix += f"_max{max_timepoints}"
|
|
439
|
+
|
|
440
|
+
output_filename = f"{folder_name}_merged_timepoints{param_suffix}.tif"
|
|
441
|
+
output_path = os.path.join(output_folder, output_filename)
|
|
442
|
+
|
|
443
|
+
# Create a unique key for this processing task (folder + parameters)
|
|
444
|
+
processing_key = f"{folder_path}|{param_suffix}|{dimension_order}"
|
|
445
|
+
|
|
446
|
+
# Check if this folder has already been processed in this session
|
|
447
|
+
if processing_key in _PROCESSED_FOLDERS:
|
|
448
|
+
print(f"✅ Folder already processed in this session: {folder_name}")
|
|
449
|
+
print(" Skipping to avoid redundant processing")
|
|
450
|
+
return image
|
|
451
|
+
|
|
452
|
+
# Check if output file already exists
|
|
453
|
+
if os.path.exists(output_path) and not overwrite_existing:
|
|
454
|
+
print(f"🔵 Merged file already exists: {output_filename}")
|
|
455
|
+
print(f" Full path: {output_path}")
|
|
456
|
+
print(" Skipping this folder. To reprocess:")
|
|
457
|
+
print(" - Delete the existing file, or")
|
|
458
|
+
print(" - Use a different output folder, or")
|
|
459
|
+
print(" - Enable 'overwrite_existing' parameter")
|
|
460
|
+
# Mark as processed so we don't check again for other files in this folder
|
|
461
|
+
_PROCESSED_FOLDERS.add(processing_key)
|
|
265
462
|
return image
|
|
266
463
|
|
|
267
|
-
|
|
464
|
+
# If we're here and the file exists, we're overwriting
|
|
465
|
+
if os.path.exists(output_path):
|
|
466
|
+
print(f"⚠️ Overwriting existing file: {output_filename}")
|
|
268
467
|
|
|
269
|
-
print(f"🔄
|
|
468
|
+
print(f"🔄 PROCESSING FOLDER: {folder_name}")
|
|
469
|
+
print(f"📁 Output will be: {output_filename}")
|
|
270
470
|
print(f"Using file suffix: {input_suffix}")
|
|
271
471
|
|
|
272
472
|
# Use the same suffix from the batch processing widget
|
|
273
|
-
|
|
473
|
+
# Split comma-separated suffixes into a list
|
|
474
|
+
if isinstance(input_suffix, str):
|
|
475
|
+
extensions = [s.strip() for s in input_suffix.split(",") if s.strip()]
|
|
476
|
+
else:
|
|
477
|
+
extensions = [input_suffix]
|
|
274
478
|
|
|
275
479
|
# Find all timepoint images
|
|
276
480
|
try:
|
|
277
481
|
image_files = find_timepoint_images(folder_path, extensions)
|
|
482
|
+
|
|
483
|
+
# Exclude the output file if it exists in the folder (BEFORE sorting)
|
|
484
|
+
image_files = [f for f in image_files if f != output_path]
|
|
485
|
+
|
|
486
|
+
# Now sort the remaining files
|
|
278
487
|
image_files.sort(key=lambda x: natural_sort_key(os.path.basename(x)))
|
|
279
488
|
|
|
280
489
|
print(f"Found {len(image_files)} total timepoints")
|
|
@@ -300,7 +509,6 @@ def merge_timepoint_folder_advanced(
|
|
|
300
509
|
# Apply timepoint selection
|
|
301
510
|
if start_timepoint > 0:
|
|
302
511
|
if start_timepoint >= len(image_files):
|
|
303
|
-
_processed_folders.discard(advanced_key)
|
|
304
512
|
raise ValueError(
|
|
305
513
|
f"start_timepoint ({start_timepoint}) >= total timepoints ({len(image_files)})"
|
|
306
514
|
)
|
|
@@ -322,7 +530,6 @@ def merge_timepoint_folder_advanced(
|
|
|
322
530
|
print(f"Limited to {max_timepoints} timepoints")
|
|
323
531
|
|
|
324
532
|
if len(image_files) < 1:
|
|
325
|
-
_processed_folders.discard(advanced_key)
|
|
326
533
|
raise ValueError("No timepoints selected after applying filters")
|
|
327
534
|
|
|
328
535
|
print(f"Final selection: {len(image_files)} timepoints")
|
|
@@ -339,7 +546,25 @@ def merge_timepoint_folder_advanced(
|
|
|
339
546
|
|
|
340
547
|
# Load first image to determine shape and dtype
|
|
341
548
|
first_image = imread(image_files[0])
|
|
342
|
-
|
|
549
|
+
|
|
550
|
+
# Determine dimension handling based on user specification
|
|
551
|
+
if dimension_order == "auto":
|
|
552
|
+
# Auto-detect: 4D assumed to be TZYX
|
|
553
|
+
is_concatenate_time = len(first_image.shape) == 4
|
|
554
|
+
else:
|
|
555
|
+
# User-specified: only TZYX needs time concatenation
|
|
556
|
+
is_concatenate_time = dimension_order.upper() == "TZYX"
|
|
557
|
+
|
|
558
|
+
if is_concatenate_time:
|
|
559
|
+
# 4D TZYX input - concatenate along time axis
|
|
560
|
+
total_timepoints = len(image_files) * first_image.shape[0]
|
|
561
|
+
stack_shape = (total_timepoints,) + first_image.shape[1:]
|
|
562
|
+
print(
|
|
563
|
+
f"4D TZYX input detected: {len(image_files)} files × {first_image.shape[0]} timepoints = {total_timepoints} total"
|
|
564
|
+
)
|
|
565
|
+
else:
|
|
566
|
+
# 2D/3D input or 4D CZYX - add time dimension
|
|
567
|
+
stack_shape = (len(image_files),) + first_image.shape
|
|
343
568
|
|
|
344
569
|
# Create memory-mapped array if possible, otherwise regular array
|
|
345
570
|
try:
|
|
@@ -357,21 +582,45 @@ def merge_timepoint_folder_advanced(
|
|
|
357
582
|
time_series = np.zeros(stack_shape, dtype=first_image.dtype)
|
|
358
583
|
print(f"Created regular array: {stack_shape}")
|
|
359
584
|
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
585
|
+
# Handle 4D TZYX vs other formats differently
|
|
586
|
+
if is_concatenate_time:
|
|
587
|
+
# 4D TZYX: concatenate along time axis
|
|
588
|
+
current_t = 0
|
|
589
|
+
time_series[0 : first_image.shape[0]] = first_image
|
|
590
|
+
current_t += first_image.shape[0]
|
|
591
|
+
|
|
592
|
+
# Load remaining images one by one
|
|
593
|
+
for i, image_file in enumerate(image_files[1:], 1):
|
|
594
|
+
if i % 50 == 0:
|
|
595
|
+
print(
|
|
596
|
+
f"Loading file {i+1}/{len(image_files)} ({current_t}/{total_timepoints} timepoints)"
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
img = imread(image_file)
|
|
600
|
+
if img.shape != first_image.shape:
|
|
601
|
+
raise ValueError(
|
|
602
|
+
f"Shape mismatch at file {i}: {img.shape} vs {first_image.shape}"
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
next_t = current_t + img.shape[0]
|
|
606
|
+
time_series[current_t:next_t] = img
|
|
607
|
+
current_t = next_t
|
|
608
|
+
else:
|
|
609
|
+
# 2D/3D: simple stacking
|
|
610
|
+
time_series[0] = first_image
|
|
611
|
+
|
|
612
|
+
# Load remaining images one by one
|
|
613
|
+
for i, image_file in enumerate(image_files[1:], 1):
|
|
614
|
+
if i % 50 == 0:
|
|
615
|
+
print(f"Loading timepoint {i+1}/{len(image_files)}")
|
|
616
|
+
|
|
617
|
+
img = imread(image_file)
|
|
618
|
+
if img.shape != first_image.shape:
|
|
619
|
+
raise ValueError(
|
|
620
|
+
f"Shape mismatch at timepoint {i}: {img.shape} vs {first_image.shape}"
|
|
621
|
+
)
|
|
622
|
+
|
|
623
|
+
time_series[i] = img
|
|
375
624
|
|
|
376
625
|
# Convert back to regular array if using memmap
|
|
377
626
|
if isinstance(time_series, np.memmap):
|
|
@@ -380,13 +629,12 @@ def merge_timepoint_folder_advanced(
|
|
|
380
629
|
time_series = result
|
|
381
630
|
else:
|
|
382
631
|
# Use standard loading
|
|
383
|
-
time_series = load_and_validate_images(
|
|
632
|
+
time_series = load_and_validate_images(
|
|
633
|
+
image_files, dimension_order
|
|
634
|
+
)[0]
|
|
384
635
|
|
|
385
636
|
# Save the advanced time series
|
|
386
|
-
|
|
387
|
-
output_path = os.path.join(output_folder, output_filename)
|
|
388
|
-
|
|
389
|
-
print(f"💾 Saving advanced time series to: {output_path}")
|
|
637
|
+
print(f"💾 Saving time series to: {output_path}")
|
|
390
638
|
|
|
391
639
|
size_gb = time_series.nbytes / (1024**3)
|
|
392
640
|
use_bigtiff = size_gb > 2.0
|
|
@@ -398,20 +646,20 @@ def merge_timepoint_folder_advanced(
|
|
|
398
646
|
bigtiff=use_bigtiff,
|
|
399
647
|
)
|
|
400
648
|
|
|
401
|
-
print("✅ Successfully saved
|
|
649
|
+
print("✅ Successfully saved time series!")
|
|
402
650
|
print(f"📁 Output file: {output_filename}")
|
|
403
651
|
print(f"📊 File size: {size_gb:.2f} GB")
|
|
404
652
|
print(f"📐 Final shape: {time_series.shape}")
|
|
405
653
|
|
|
654
|
+
# Mark this folder as processed to avoid redundant processing
|
|
655
|
+
_PROCESSED_FOLDERS.add(processing_key)
|
|
656
|
+
|
|
406
657
|
# IMPORTANT: Return the original image unchanged so the batch processor
|
|
407
658
|
# doesn't save individual processed files. The merged file is already saved above.
|
|
408
659
|
return image
|
|
409
660
|
|
|
410
661
|
except Exception as e:
|
|
411
|
-
|
|
412
|
-
raise ValueError(
|
|
413
|
-
f"Error in advanced timepoint merging: {str(e)}"
|
|
414
|
-
) from e
|
|
662
|
+
raise ValueError(f"Error in timepoint merging: {str(e)}") from e
|
|
415
663
|
|
|
416
664
|
|
|
417
665
|
# Command-line utility function
|
|
@@ -467,7 +715,7 @@ def merge_timepoints_cli():
|
|
|
467
715
|
image_files = image_files[: args.max_timepoints]
|
|
468
716
|
|
|
469
717
|
# Load and save
|
|
470
|
-
result = load_and_validate_images(image_files)[0]
|
|
718
|
+
result = load_and_validate_images(image_files, "auto")[0]
|
|
471
719
|
tifffile.imwrite(args.output_file, result, compression="zlib")
|
|
472
720
|
|
|
473
721
|
print(f"Successfully saved time series to {args.output_file}")
|
|
@@ -252,16 +252,35 @@ def trackastra_tracking(
|
|
|
252
252
|
|
|
253
253
|
temp_dir = Path(os.path.dirname(img_path))
|
|
254
254
|
|
|
255
|
-
# Save the mask data
|
|
256
|
-
mask_path = img_path.replace(".tif", "_labels.tif")
|
|
257
255
|
# Create the tracking script
|
|
258
256
|
script_path = temp_dir / "run_tracking.py"
|
|
259
|
-
|
|
260
|
-
|
|
257
|
+
# Save the mask data
|
|
258
|
+
# For label images, use the original path as mask_path
|
|
259
|
+
if label_pattern in os.path.basename(img_path):
|
|
260
|
+
mask_path = img_path
|
|
261
|
+
# Find corresponding raw image by removing the label pattern
|
|
262
|
+
raw_base = os.path.basename(img_path).replace(label_pattern, "")
|
|
263
|
+
raw_path = os.path.join(os.path.dirname(img_path), raw_base + ".tif")
|
|
264
|
+
if not os.path.exists(raw_path):
|
|
265
|
+
print(f"Warning: Could not find raw image for {img_path}")
|
|
266
|
+
raw_path = img_path # Fallback to using label as input
|
|
267
|
+
else:
|
|
268
|
+
# For raw images, find the corresponding label image
|
|
269
|
+
raw_path = img_path
|
|
270
|
+
base_name = os.path.basename(img_path).replace(".tif", "")
|
|
271
|
+
mask_path = os.path.join(
|
|
272
|
+
os.path.dirname(img_path), base_name + label_pattern
|
|
273
|
+
)
|
|
274
|
+
if not os.path.exists(mask_path):
|
|
275
|
+
print(f"No label file found for {img_path}")
|
|
276
|
+
return image
|
|
277
|
+
|
|
278
|
+
output_path = temp_dir / os.path.basename(mask_path).replace(
|
|
279
|
+
label_pattern, "_tracked.tif"
|
|
261
280
|
)
|
|
262
281
|
|
|
263
282
|
script_content = create_trackastra_script(
|
|
264
|
-
str(
|
|
283
|
+
str(raw_path), str(mask_path), model, mode, str(output_path)
|
|
265
284
|
)
|
|
266
285
|
|
|
267
286
|
with open(script_path, "w") as f:
|