napari-tmidas 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. napari_tmidas/__init__.py +35 -5
  2. napari_tmidas/_crop_anything.py +1520 -609
  3. napari_tmidas/_env_manager.py +76 -0
  4. napari_tmidas/_file_conversion.py +1646 -1131
  5. napari_tmidas/_file_selector.py +1455 -216
  6. napari_tmidas/_label_inspection.py +83 -8
  7. napari_tmidas/_processing_worker.py +309 -0
  8. napari_tmidas/_reader.py +6 -10
  9. napari_tmidas/_registry.py +2 -2
  10. napari_tmidas/_roi_colocalization.py +1221 -84
  11. napari_tmidas/_tests/test_crop_anything.py +123 -0
  12. napari_tmidas/_tests/test_env_manager.py +89 -0
  13. napari_tmidas/_tests/test_grid_view_overlay.py +193 -0
  14. napari_tmidas/_tests/test_init.py +98 -0
  15. napari_tmidas/_tests/test_intensity_label_filter.py +222 -0
  16. napari_tmidas/_tests/test_label_inspection.py +86 -0
  17. napari_tmidas/_tests/test_processing_basic.py +500 -0
  18. napari_tmidas/_tests/test_processing_worker.py +142 -0
  19. napari_tmidas/_tests/test_regionprops_analysis.py +547 -0
  20. napari_tmidas/_tests/test_registry.py +70 -2
  21. napari_tmidas/_tests/test_scipy_filters.py +168 -0
  22. napari_tmidas/_tests/test_skimage_filters.py +259 -0
  23. napari_tmidas/_tests/test_split_channels.py +217 -0
  24. napari_tmidas/_tests/test_spotiflow.py +87 -0
  25. napari_tmidas/_tests/test_tyx_display_fix.py +142 -0
  26. napari_tmidas/_tests/test_ui_utils.py +68 -0
  27. napari_tmidas/_tests/test_widget.py +30 -0
  28. napari_tmidas/_tests/test_windows_basic.py +66 -0
  29. napari_tmidas/_ui_utils.py +57 -0
  30. napari_tmidas/_version.py +16 -3
  31. napari_tmidas/_widget.py +41 -4
  32. napari_tmidas/processing_functions/basic.py +557 -20
  33. napari_tmidas/processing_functions/careamics_env_manager.py +72 -99
  34. napari_tmidas/processing_functions/cellpose_env_manager.py +415 -112
  35. napari_tmidas/processing_functions/cellpose_segmentation.py +132 -191
  36. napari_tmidas/processing_functions/colocalization.py +513 -56
  37. napari_tmidas/processing_functions/grid_view_overlay.py +703 -0
  38. napari_tmidas/processing_functions/intensity_label_filter.py +422 -0
  39. napari_tmidas/processing_functions/regionprops_analysis.py +1280 -0
  40. napari_tmidas/processing_functions/sam2_env_manager.py +53 -69
  41. napari_tmidas/processing_functions/sam2_mp4.py +274 -195
  42. napari_tmidas/processing_functions/scipy_filters.py +403 -8
  43. napari_tmidas/processing_functions/skimage_filters.py +424 -212
  44. napari_tmidas/processing_functions/spotiflow_detection.py +949 -0
  45. napari_tmidas/processing_functions/spotiflow_env_manager.py +591 -0
  46. napari_tmidas/processing_functions/timepoint_merger.py +334 -86
  47. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.4.dist-info}/METADATA +70 -30
  48. napari_tmidas-0.2.4.dist-info/RECORD +63 -0
  49. napari_tmidas/_tests/__init__.py +0 -0
  50. napari_tmidas-0.2.2.dist-info/RECORD +0 -40
  51. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.4.dist-info}/WHEEL +0 -0
  52. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.4.dist-info}/entry_points.txt +0 -0
  53. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.4.dist-info}/licenses/LICENSE +0 -0
  54. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.4.dist-info}/top_level.txt +0 -0
@@ -3,45 +3,439 @@
3
3
  Basic image processing functions
4
4
  """
5
5
  import concurrent.futures
6
+ import inspect
6
7
  import os
7
8
  import traceback
9
+ import warnings
8
10
 
9
- import dask.array as da
10
11
  import numpy as np
11
- import tifffile
12
12
 
13
13
  from napari_tmidas._registry import BatchProcessingRegistry
14
14
 
15
+ # Lazy imports for optional heavy dependencies
16
+ try:
17
+ import dask.array as da
18
+
19
+ _HAS_DASK = True
20
+ except ImportError:
21
+ da = None
22
+ _HAS_DASK = False
23
+
24
+ try:
25
+ import tifffile
26
+
27
+ _HAS_TIFFFILE = True
28
+ except ImportError:
29
+ tifffile = None
30
+ _HAS_TIFFFILE = False
31
+
32
+
33
+ def _to_array(data: np.ndarray) -> np.ndarray:
34
+ arr = np.asarray(data)
35
+ if arr.ndim == 0:
36
+ raise ValueError("Label images must have at least one dimension")
37
+ return arr
38
+
39
+
40
+ def _nonzero_bounds(arr: np.ndarray) -> list[tuple[int, int]]:
41
+ if not np.any(arr):
42
+ return [(0, size) for size in arr.shape]
43
+
44
+ bounds: list[tuple[int, int]] = []
45
+ dims = range(arr.ndim)
46
+ for axis in dims:
47
+ reduction_axes = tuple(i for i in dims if i != axis)
48
+ axis_any = np.any(arr, axis=reduction_axes)
49
+ nonzero_idx = np.flatnonzero(axis_any)
50
+ if nonzero_idx.size == 0:
51
+ bounds.append((0, arr.shape[axis]))
52
+ else:
53
+ bounds.append((int(nonzero_idx[0]), int(nonzero_idx[-1]) + 1))
54
+ return bounds
55
+
56
+
57
+ def _match_ndim(reference: np.ndarray, candidate: np.ndarray) -> np.ndarray:
58
+ result = np.asarray(candidate)
59
+ if result.ndim > reference.ndim:
60
+ axes = [idx for idx, size in enumerate(result.shape) if size == 1]
61
+ while axes and result.ndim > reference.ndim:
62
+ result = np.squeeze(result, axis=axes.pop())
63
+ if result.ndim > reference.ndim:
64
+ raise ValueError(
65
+ "Unable to align label images with differing numbers of non-singleton dimensions"
66
+ )
67
+ while result.ndim < reference.ndim:
68
+ result = result[np.newaxis, ...]
69
+ return result
70
+
71
+
72
+ def _align_candidate(
73
+ reference: np.ndarray, candidate: np.ndarray
74
+ ) -> np.ndarray:
75
+ candidate = _match_ndim(reference, candidate)
76
+ if candidate.shape == reference.shape:
77
+ return candidate
78
+
79
+ aligned = np.zeros_like(reference)
80
+ if not np.any(candidate):
81
+ return aligned
82
+
83
+ ref_bounds = _nonzero_bounds(reference)
84
+ cand_bounds = _nonzero_bounds(candidate)
85
+
86
+ shifts: list[int] = []
87
+ for axis in range(reference.ndim):
88
+ ref_min, ref_max = ref_bounds[axis]
89
+ cand_min, cand_max = cand_bounds[axis]
90
+ ref_center = (ref_min + ref_max) / 2.0
91
+ cand_center = (cand_min + cand_max) / 2.0
92
+ shifts.append(int(round(ref_center - cand_center)))
93
+
94
+ target_slices: list[slice] = []
95
+ source_slices: list[slice] = []
96
+ for axis, shift in enumerate(shifts):
97
+ ref_len = reference.shape[axis]
98
+ cand_len = candidate.shape[axis]
99
+ ref_start = max(0, shift)
100
+ cand_start = max(0, -shift)
101
+ length = min(ref_len - ref_start, cand_len - cand_start)
102
+ if length <= 0:
103
+ return aligned
104
+ target_slices.append(slice(ref_start, ref_start + length))
105
+ source_slices.append(slice(cand_start, cand_start + length))
106
+
107
+ aligned[tuple(target_slices)] = candidate[tuple(source_slices)]
108
+ return aligned
109
+
15
110
 
16
111
  @BatchProcessingRegistry.register(
17
112
  name="Labels to Binary",
18
113
  suffix="_binary",
19
- description="Convert multi-label images to binary masks (all non-zero labels become 1)",
114
+ description="Convert a label image to a binary mask (255 for non-zero, 0 otherwise)",
115
+ parameters={},
20
116
  )
21
117
  def labels_to_binary(image: np.ndarray) -> np.ndarray:
118
+ arr = _to_array(image)
119
+ result = np.zeros(arr.shape, dtype=np.uint8)
120
+ np.copyto(result, 255, where=arr != 0)
121
+ return result
122
+
123
+
124
+ @BatchProcessingRegistry.register(
125
+ name="Invert Binary Labels",
126
+ suffix="_inverted",
127
+ description="Invert a binary label image (non-zero becomes 0, zero becomes 255)",
128
+ parameters={},
129
+ )
130
+ def invert_binary_labels(image: np.ndarray) -> np.ndarray:
131
+ arr = _to_array(image)
132
+ result = np.zeros(arr.shape, dtype=np.uint8)
133
+ np.copyto(result, 255, where=arr == 0)
134
+ return result
135
+
136
+
137
+ @BatchProcessingRegistry.register(
138
+ name="Filter Label by ID",
139
+ suffix="_filtered",
140
+ description="Keep only the specified label ID, set all other labels to background (0)",
141
+ parameters={
142
+ "label_id": {
143
+ "type": int,
144
+ "default": 1,
145
+ "min": 1,
146
+ "description": "Label ID to keep (all others become background)",
147
+ }
148
+ },
149
+ )
150
+ def filter_label_by_id(image: np.ndarray, label_id: int = 1) -> np.ndarray:
22
151
  """
23
- Convert multi-label images to binary masks.
152
+ Filter a label image to keep only the specified label ID.
153
+ All other label IDs are set to background (0).
154
+
155
+ Parameters
156
+ ----------
157
+ image : np.ndarray
158
+ Input label image
159
+ label_id : int
160
+ The label ID to keep (default: 1)
161
+
162
+ Returns
163
+ -------
164
+ np.ndarray
165
+ Filtered label image with only the specified label ID preserved
166
+ """
167
+ arr = _to_array(image)
168
+ result = np.where(arr == label_id, arr, 0).astype(arr.dtype)
169
+ return result
24
170
 
25
- This function takes a label image (where different regions have different label values)
26
- and converts it to a binary mask (where all labeled regions have a value of 1 and
27
- background has a value of 0).
28
171
 
29
- Parameters:
30
- -----------
31
- image : numpy.ndarray
32
- Input label image array
172
+ @BatchProcessingRegistry.register(
173
+ name="Mirror Labels",
174
+ suffix="_mirrored",
175
+ description="Mirror labels at their largest slice area along an axis, keeping original image shape",
176
+ parameters={
177
+ "axis": {
178
+ "type": int,
179
+ "default": 0,
180
+ "description": "Axis along which to mirror the labels",
181
+ }
182
+ },
183
+ )
184
+ def mirror_labels(image: np.ndarray, axis: int = 0) -> np.ndarray:
185
+ arr = _to_array(image)
186
+ if arr.ndim == 0:
187
+ raise ValueError("Cannot mirror a scalar")
188
+ if not isinstance(axis, int):
189
+ raise TypeError("axis must be an integer")
190
+ if axis >= arr.ndim or axis < -arr.ndim:
191
+ raise ValueError(
192
+ f"Axis {axis} is out of bounds for an image with {arr.ndim} dimensions"
193
+ )
33
194
 
34
- Returns:
35
- --------
195
+ axis = axis % arr.ndim
196
+
197
+ # Find the slice with the largest area (most non-zero pixels)
198
+ reduction_axes = tuple(i for i in range(arr.ndim) if i != axis)
199
+ slice_areas = np.sum(arr != 0, axis=reduction_axes, dtype=np.int64)
200
+
201
+ if slice_areas.size == 0 or np.max(slice_areas) == 0:
202
+ # No labels to mirror, return copy
203
+ return arr.copy()
204
+
205
+ # Find the index of the slice with maximum area
206
+ max_area_idx = int(np.argmax(slice_areas))
207
+
208
+ # Create result array (same shape as input)
209
+ result = np.zeros_like(arr)
210
+
211
+ # Get max label value for offset
212
+ max_label = int(np.max(arr)) if arr.size else 0
213
+
214
+ # Mirror labels from the max_area_idx position
215
+ for i in range(arr.shape[axis]):
216
+ # Calculate the mirrored position relative to max_area_idx
217
+ mirrored_i = 2 * max_area_idx - i
218
+
219
+ # Create slicers for current position i and mirrored position
220
+ slicer_i = [slice(None)] * arr.ndim
221
+ slicer_i[axis] = i
222
+ slicer_i = tuple(slicer_i)
223
+
224
+ # If mirrored position is within bounds, copy and offset the labels
225
+ if 0 <= mirrored_i < arr.shape[axis]:
226
+ slicer_mirrored = [slice(None)] * arr.ndim
227
+ slicer_mirrored[axis] = mirrored_i
228
+ slicer_mirrored = tuple(slicer_mirrored)
229
+
230
+ # Copy mirrored slice with offset labels
231
+ mirrored_slice = arr[slicer_mirrored]
232
+ result[slicer_i] = np.where(
233
+ mirrored_slice != 0, mirrored_slice + max_label, 0
234
+ )
235
+
236
+ return result.astype(arr.dtype, copy=False)
237
+
238
+
239
+ @BatchProcessingRegistry.register(
240
+ name="Intersect Label Images",
241
+ suffix="_intersected",
242
+ description="Compute the voxel-wise intersection of paired label images identified by suffix",
243
+ parameters={
244
+ "primary_suffix": {
245
+ "type": str,
246
+ "default": "_a.tif",
247
+ "description": "Suffix (including extension) of the primary label image",
248
+ },
249
+ "secondary_suffix": {
250
+ "type": str,
251
+ "default": "_b.tif",
252
+ "description": "Suffix (including extension) of the paired label image",
253
+ },
254
+ },
255
+ )
256
+ def intersect_label_images(
257
+ image: np.ndarray,
258
+ primary_suffix: str = "_a.tif",
259
+ secondary_suffix: str = "_b.tif",
260
+ ) -> np.ndarray:
261
+ """Return the intersection of two paired label images.
262
+
263
+ The function expects two label files that share a base name but differ by suffix.
264
+ When overlap occurs, the label IDs from the primary image are retained anywhere both
265
+ volumes contain non-zero voxels. Each pair is processed exactly once by treating the
266
+ file with ``primary_suffix`` as the active entry point; secondary label files are
267
+ skipped and left unchanged.
268
+ """
269
+
270
+ if not primary_suffix or not secondary_suffix:
271
+ raise ValueError(
272
+ "Both primary_suffix and secondary_suffix must be provided"
273
+ )
274
+
275
+ def _load_label_file(path: str) -> np.ndarray:
276
+ ext = os.path.splitext(path)[1].lower()
277
+ if ext == ".npy":
278
+ return np.load(path)
279
+ if _HAS_TIFFFILE and ext in {".tif", ".tiff", ".ome.tif", ".ome.tiff"}:
280
+ return tifffile.imread(path)
281
+
282
+ try:
283
+ from skimage.io import imread
284
+ except (
285
+ ImportError
286
+ ) as exc: # pragma: no cover - optional dependency path
287
+ raise ImportError(
288
+ "Install 'tifffile' or 'scikit-image' to load paired label images"
289
+ ) from exc
290
+
291
+ return imread(path)
292
+
293
+ current_file = None
294
+ for frame_info in inspect.stack():
295
+ frame_locals = frame_info.frame.f_locals
296
+ if "filepath" in frame_locals:
297
+ current_file = frame_locals["filepath"]
298
+ break
299
+
300
+ if current_file is None:
301
+ raise ValueError(
302
+ "Could not determine current file path for paired label lookup"
303
+ )
304
+
305
+ current_file = os.fspath(current_file)
306
+ folder_path = os.path.dirname(current_file)
307
+ filename = os.path.basename(current_file)
308
+
309
+ if filename.endswith(primary_suffix):
310
+ base_name = filename[: -len(primary_suffix)]
311
+ paired_suffix = secondary_suffix
312
+ elif filename.endswith(secondary_suffix):
313
+ warnings.warn(
314
+ (
315
+ f"Skipping secondary label image '{filename}'; only files ending with "
316
+ f"'{primary_suffix}' are processed by the 'Intersect Label Images' function."
317
+ ),
318
+ stacklevel=2,
319
+ )
320
+ return None
321
+ else:
322
+ raise ValueError(
323
+ f"Filename '{filename}' does not end with either '{primary_suffix}' or '{secondary_suffix}'"
324
+ )
325
+
326
+ paired_name = base_name + paired_suffix
327
+ paired_path = os.path.abspath(os.path.join(folder_path, paired_name))
328
+
329
+ if not os.path.exists(paired_path):
330
+ raise FileNotFoundError(f"Paired label image not found: {paired_path}")
331
+
332
+ current_array = _to_array(image)
333
+ paired_array = _to_array(_load_label_file(paired_path))
334
+
335
+ paired_aligned = _align_candidate(current_array, paired_array)
336
+
337
+ overlap_mask = (current_array != 0) & (paired_aligned != 0)
338
+ if not np.any(overlap_mask):
339
+ result_dtype = np.promote_types(
340
+ current_array.dtype, paired_aligned.dtype
341
+ )
342
+ return np.zeros(current_array.shape, dtype=result_dtype)
343
+
344
+ result_dtype = np.promote_types(current_array.dtype, paired_aligned.dtype)
345
+ result = np.zeros(current_array.shape, dtype=result_dtype)
346
+ np.copyto(
347
+ result,
348
+ current_array.astype(result_dtype, copy=False),
349
+ where=overlap_mask,
350
+ )
351
+
352
+ return result
353
+
354
+
355
+ @BatchProcessingRegistry.register(
356
+ name="Keep Slice Range by Area",
357
+ suffix="_area_range",
358
+ description="Zero out label content outside the min/max area slice range (preserves image shape for alignment)",
359
+ parameters={
360
+ "axis": {
361
+ "type": int,
362
+ "default": 0,
363
+ "description": "Axis index representing the slice dimension (negative values count from the end)",
364
+ }
365
+ },
366
+ )
367
+ def keep_slice_range_by_area(image: np.ndarray, axis: int = 0) -> np.ndarray:
368
+ """Keep label content only between the minimum-area and maximum-area slices (inclusive).
369
+
370
+ The per-slice area is measured as the number of non-zero pixels in the slice. When all slices
371
+ share the same area, the original volume is returned unchanged. This function preserves the
372
+ original image dimensions but zeros out label content outside the detected range, ensuring
373
+ alignment with corresponding image data is maintained.
374
+
375
+ Parameters
376
+ ----------
377
+ image:
378
+ 3D (or higher dimensional) label image as a NumPy array.
379
+ axis:
380
+ Axis index corresponding to the slice dimension that should be evaluated.
381
+
382
+ Returns
383
+ -------
36
384
  numpy.ndarray
37
- Binary mask with 1 for all non-zero labels and 0 for background
385
+ Volume with the same shape as input, but with label content zeroed outside the
386
+ minimum and maximum area slice range (inclusive).
38
387
  """
39
- # Make a copy of the input image to avoid modifying the original
40
- binary_mask = image.copy()
41
388
 
42
- binary_mask = (binary_mask > 0).astype(np.uint32)
389
+ if image.ndim < 3:
390
+ raise ValueError(
391
+ "Slice range trimming requires an array with at least 3 dimensions"
392
+ )
393
+ if not isinstance(axis, int):
394
+ raise TypeError("axis must be provided as an integer")
395
+ if axis >= image.ndim or axis < -image.ndim:
396
+ raise ValueError(
397
+ f"Axis {axis} is out of bounds for an image with {image.ndim} dimensions"
398
+ )
399
+
400
+ axis = axis % image.ndim
401
+
402
+ if image.shape[axis] == 0:
403
+ raise ValueError(
404
+ "Cannot determine slice range on an axis with zero length"
405
+ )
406
+
407
+ reduction_axes = tuple(i for i in range(image.ndim) if i != axis)
408
+ # Count non-zero pixels per slice to determine occupied area per slice
409
+ slice_areas = np.sum(image != 0, axis=reduction_axes, dtype=np.int64)
410
+
411
+ if slice_areas.size == 0:
412
+ return image.copy()
413
+
414
+ if slice_areas.min() == slice_areas.max():
415
+ return image.copy()
416
+
417
+ min_idx = int(np.argmin(slice_areas))
418
+ max_idx = int(np.argmax(slice_areas))
419
+
420
+ start = min(min_idx, max_idx)
421
+ end = max(min_idx, max_idx)
43
422
 
44
- return binary_mask
423
+ # Create a copy of the full image to preserve shape
424
+ result = image.copy()
425
+
426
+ # Zero out slices before the start
427
+ if start > 0:
428
+ before_slicer = [slice(None)] * image.ndim
429
+ before_slicer[axis] = slice(0, start)
430
+ result[tuple(before_slicer)] = 0
431
+
432
+ # Zero out slices after the end
433
+ if end < image.shape[axis] - 1:
434
+ after_slicer = [slice(None)] * image.ndim
435
+ after_slicer[axis] = slice(end + 1, None)
436
+ result[tuple(after_slicer)] = 0
437
+
438
+ return result
45
439
 
46
440
 
47
441
  @BatchProcessingRegistry.register(
@@ -168,7 +562,7 @@ def max_z_projection_tzyx(image: np.ndarray) -> np.ndarray:
168
562
 
169
563
  @BatchProcessingRegistry.register(
170
564
  name="Split Color Channels",
171
- suffix="_split_color_channels",
565
+ suffix="_split",
172
566
  description="Splits the color channels of the image",
173
567
  parameters={
174
568
  "num_channels": {
@@ -313,6 +707,9 @@ def split_channels(
313
707
  # Process output format
314
708
  result_channels = []
315
709
  for i, channel_img in enumerate(channels):
710
+ # Remove the channel dimension (which now has size 1 after split)
711
+ channel_img = np.squeeze(channel_img, axis=channel_axis)
712
+
316
713
  # Get original axes without channel
317
714
  axes_without_channel = axes.copy()
318
715
  del axes_without_channel[channel_axis]
@@ -368,6 +765,140 @@ def split_channels(
368
765
  return np.stack(result_channels, axis=0)
369
766
 
370
767
 
768
+ @BatchProcessingRegistry.register(
769
+ name="Merge Color Channels",
770
+ suffix="_merged_colors",
771
+ description="Merges separate channel images from a folder into a single multi-channel image",
772
+ parameters={
773
+ "channel_substring": {
774
+ "type": str,
775
+ "default": "_channel_",
776
+ "description": "Substring before channel number",
777
+ },
778
+ },
779
+ )
780
+ def merge_channels(
781
+ image: np.ndarray,
782
+ channel_substring: str = "_channel_",
783
+ ) -> np.ndarray:
784
+ """
785
+ Merge multiple single-channel images from a folder into one multi-channel image.
786
+
787
+ Identifies channel files by finding a substring followed by a 1-2 digit number.
788
+ Adds channels as the last dimension regardless of input dimensionality.
789
+
790
+ Args:
791
+ image: Current image being processed
792
+ channel_substring: Substring that appears before channel number in filenames
793
+
794
+ Returns:
795
+ Multi-channel image with channels as last dimension
796
+ """
797
+ # Get file context from batch processing
798
+ import inspect
799
+ import re
800
+
801
+ from skimage.io import imread
802
+
803
+ current_file = None
804
+
805
+ for frame_info in inspect.stack():
806
+ frame_locals = frame_info.frame.f_locals
807
+ if "filepath" in frame_locals:
808
+ current_file = frame_locals["filepath"]
809
+ break
810
+
811
+ if current_file is None:
812
+ raise ValueError("Could not determine current file path")
813
+
814
+ folder_path = os.path.dirname(current_file)
815
+ filename = os.path.basename(current_file)
816
+
817
+ # Create regex pattern to find channel substring followed by 1-2 digits
818
+ pattern = re.compile(rf"({re.escape(channel_substring)})(\d{{1,2}})")
819
+ match = pattern.search(filename)
820
+
821
+ if not match:
822
+ print(
823
+ f"⚠️ No channel pattern '{channel_substring}[number]' found in filename"
824
+ )
825
+ return image
826
+
827
+ # Extract base name and channel number
828
+ channel_num = int(match.group(2))
829
+ base_name = filename[: match.start()] + filename[match.end() :]
830
+
831
+ print(f"\n📐 Current image shape: {image.shape}")
832
+ print(f"✅ Found channel {channel_num} in file: {filename}")
833
+
834
+ # Find all related channel files
835
+ all_files = os.listdir(folder_path)
836
+ channel_files = {}
837
+
838
+ for file in all_files:
839
+ file_match = pattern.search(file)
840
+ if file_match:
841
+ # Check if base name matches (excluding channel part)
842
+ file_base = file[: file_match.start()] + file[file_match.end() :]
843
+ if file_base == base_name:
844
+ ch_num = int(file_match.group(2))
845
+ channel_files[ch_num] = os.path.join(folder_path, file)
846
+
847
+ # Sort by channel number
848
+ sorted_channels = sorted(channel_files.keys())
849
+ num_channels = len(sorted_channels)
850
+
851
+ if num_channels < 2:
852
+ print(
853
+ f"⚠️ Only found {num_channels} channel(s). Need at least 2 for merging."
854
+ )
855
+ return image
856
+
857
+ # Determine which channel acts as the primary trigger for merging
858
+ primary_channel = sorted_channels[0]
859
+
860
+ if channel_num != primary_channel:
861
+ print(
862
+ f"ℹ️ Channel {channel_num} is not the primary channel ({primary_channel}); skipping merge for this file."
863
+ )
864
+ return image
865
+
866
+ print(f"📊 Found {num_channels} channels: {sorted_channels}")
867
+
868
+ # Load all channels in order
869
+ # First channel is the current image
870
+ channels = []
871
+ for ch_num in sorted_channels:
872
+ if ch_num == channel_num:
873
+ # Use the already loaded image for current channel
874
+ channels.append(image)
875
+ else:
876
+ # Load other channel files
877
+ channel_path = channel_files[ch_num]
878
+ channel_data = imread(channel_path)
879
+
880
+ if channel_data.shape != image.shape:
881
+ raise ValueError(
882
+ f"Channel {ch_num} has different shape: {channel_data.shape} vs {image.shape}"
883
+ )
884
+
885
+ channels.append(channel_data)
886
+
887
+ print(
888
+ f" Channel {ch_num}: {os.path.basename(channel_files[ch_num])}"
889
+ )
890
+
891
+ # Stack channels as last dimension
892
+ merged = np.stack(channels, axis=-1)
893
+
894
+ print(
895
+ f"✨ Merged shape: {merged.shape} (channels added as last dimension)"
896
+ )
897
+
898
+ # Return merged array so downstream steps receive the combined channels
899
+ return merged
900
+
901
+
371
902
  @BatchProcessingRegistry.register(
372
903
  name="RGB to Labels",
373
904
  suffix="_labels",
@@ -542,7 +1073,13 @@ def split_tzyx_stack(
542
1073
  # Monkey patch ProcessingWorker.process_file to handle parallel TZYX splitting
543
1074
  try:
544
1075
  # Import tifffile here to ensure it's available for the monkey patch
545
- import tifffile
1076
+ if not _HAS_TIFFFILE:
1077
+ try:
1078
+ import tifffile
1079
+
1080
+ _HAS_TIFFFILE = True
1081
+ except ImportError:
1082
+ pass # tifffile not available, skip monkey patch
546
1083
 
547
1084
  from napari_tmidas._file_selector import ProcessingWorker
548
1085