napari-tmidas 0.2.2__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. napari_tmidas/__init__.py +35 -5
  2. napari_tmidas/_crop_anything.py +1520 -609
  3. napari_tmidas/_env_manager.py +76 -0
  4. napari_tmidas/_file_conversion.py +1646 -1131
  5. napari_tmidas/_file_selector.py +1455 -216
  6. napari_tmidas/_label_inspection.py +83 -8
  7. napari_tmidas/_processing_worker.py +309 -0
  8. napari_tmidas/_reader.py +6 -10
  9. napari_tmidas/_registry.py +2 -2
  10. napari_tmidas/_roi_colocalization.py +1221 -84
  11. napari_tmidas/_tests/test_crop_anything.py +123 -0
  12. napari_tmidas/_tests/test_env_manager.py +89 -0
  13. napari_tmidas/_tests/test_grid_view_overlay.py +193 -0
  14. napari_tmidas/_tests/test_init.py +98 -0
  15. napari_tmidas/_tests/test_intensity_label_filter.py +222 -0
  16. napari_tmidas/_tests/test_label_inspection.py +86 -0
  17. napari_tmidas/_tests/test_processing_basic.py +500 -0
  18. napari_tmidas/_tests/test_processing_worker.py +142 -0
  19. napari_tmidas/_tests/test_regionprops_analysis.py +547 -0
  20. napari_tmidas/_tests/test_registry.py +70 -2
  21. napari_tmidas/_tests/test_scipy_filters.py +168 -0
  22. napari_tmidas/_tests/test_skimage_filters.py +259 -0
  23. napari_tmidas/_tests/test_split_channels.py +217 -0
  24. napari_tmidas/_tests/test_spotiflow.py +87 -0
  25. napari_tmidas/_tests/test_tyx_display_fix.py +142 -0
  26. napari_tmidas/_tests/test_ui_utils.py +68 -0
  27. napari_tmidas/_tests/test_widget.py +30 -0
  28. napari_tmidas/_tests/test_windows_basic.py +66 -0
  29. napari_tmidas/_ui_utils.py +57 -0
  30. napari_tmidas/_version.py +16 -3
  31. napari_tmidas/_widget.py +41 -4
  32. napari_tmidas/processing_functions/basic.py +557 -20
  33. napari_tmidas/processing_functions/careamics_env_manager.py +72 -99
  34. napari_tmidas/processing_functions/cellpose_env_manager.py +415 -112
  35. napari_tmidas/processing_functions/cellpose_segmentation.py +132 -191
  36. napari_tmidas/processing_functions/colocalization.py +513 -56
  37. napari_tmidas/processing_functions/grid_view_overlay.py +703 -0
  38. napari_tmidas/processing_functions/intensity_label_filter.py +422 -0
  39. napari_tmidas/processing_functions/regionprops_analysis.py +1280 -0
  40. napari_tmidas/processing_functions/sam2_env_manager.py +53 -69
  41. napari_tmidas/processing_functions/sam2_mp4.py +274 -195
  42. napari_tmidas/processing_functions/scipy_filters.py +403 -8
  43. napari_tmidas/processing_functions/skimage_filters.py +424 -212
  44. napari_tmidas/processing_functions/spotiflow_detection.py +949 -0
  45. napari_tmidas/processing_functions/spotiflow_env_manager.py +591 -0
  46. napari_tmidas/processing_functions/timepoint_merger.py +334 -86
  47. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/METADATA +71 -30
  48. napari_tmidas-0.2.5.dist-info/RECORD +63 -0
  49. napari_tmidas/_tests/__init__.py +0 -0
  50. napari_tmidas-0.2.2.dist-info/RECORD +0 -40
  51. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/WHEEL +0 -0
  52. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/entry_points.txt +0 -0
  53. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/licenses/LICENSE +0 -0
  54. {napari_tmidas-0.2.2.dist-info → napari_tmidas-0.2.5.dist-info}/top_level.txt +0 -0
@@ -10,43 +10,448 @@ New functions can be added to the processing registry by decorating them with
10
10
  as the first argument, and any additional keyword arguments for parameters.
11
11
  """
12
12
 
13
+ from __future__ import annotations
14
+
13
15
  import concurrent.futures
14
16
  import os
15
17
  import sys
16
- from typing import Any, Dict, List
18
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
17
19
 
18
- import napari
19
20
  import numpy as np
20
- import tifffile
21
- from magicgui import magicgui
22
- from qtpy.QtCore import Qt, QThread, Signal
23
- from qtpy.QtWidgets import (
24
- QComboBox,
25
- QDoubleSpinBox,
26
- QFileDialog,
27
- QFormLayout,
28
- QHBoxLayout,
29
- QHeaderView,
30
- QLabel,
31
- QLineEdit,
32
- QProgressBar,
33
- QPushButton,
34
- QSpinBox,
35
- QTableWidget,
36
- QTableWidgetItem,
37
- QVBoxLayout,
38
- QWidget,
39
- )
40
- from skimage.io import imread
21
+
22
+ # Lazy imports for optional heavy dependencies
23
+ if TYPE_CHECKING:
24
+ import napari
25
+ import tifffile
26
+ import zarr
27
+ from magicgui import magicgui
28
+ from qtpy.QtCore import Qt, QThread, Signal
29
+ from qtpy.QtWidgets import (
30
+ QCheckBox,
31
+ QComboBox,
32
+ QDoubleSpinBox,
33
+ QFormLayout,
34
+ QHBoxLayout,
35
+ QHeaderView,
36
+ QLabel,
37
+ QLineEdit,
38
+ QProgressBar,
39
+ QPushButton,
40
+ QSpinBox,
41
+ QTableWidget,
42
+ QTableWidgetItem,
43
+ QVBoxLayout,
44
+ QWidget,
45
+ )
46
+ from skimage.io import imread
47
+
48
+ try:
49
+ import napari
50
+
51
+ _HAS_NAPARI = True
52
+ except ImportError:
53
+ napari = None
54
+ _HAS_NAPARI = False
55
+
56
+ try:
57
+ import tifffile
58
+
59
+ _HAS_TIFFFILE = True
60
+ except ImportError:
61
+ tifffile = None
62
+ _HAS_TIFFFILE = False
63
+
64
+ try:
65
+ import zarr
66
+
67
+ _HAS_ZARR = True
68
+ except ImportError:
69
+ zarr = None
70
+ _HAS_ZARR = False
71
+
72
+ try:
73
+ from magicgui import magicgui
74
+
75
+ _HAS_MAGICGUI = True
76
+ except ImportError:
77
+ # Create stub decorator
78
+ def magicgui(*args, **kwargs):
79
+ def decorator(func):
80
+ return func
81
+
82
+ if len(args) == 1 and callable(args[0]) and not kwargs:
83
+ return args[0]
84
+ return decorator
85
+
86
+ _HAS_MAGICGUI = False
87
+
88
+ try:
89
+ from qtpy.QtCore import Qt, QThread, Signal
90
+ from qtpy.QtWidgets import (
91
+ QCheckBox,
92
+ QComboBox,
93
+ QDoubleSpinBox,
94
+ QFormLayout,
95
+ QHBoxLayout,
96
+ QHeaderView,
97
+ QLabel,
98
+ QLineEdit,
99
+ QMessageBox,
100
+ QProgressBar,
101
+ QPushButton,
102
+ QSpinBox,
103
+ QTableWidget,
104
+ QTableWidgetItem,
105
+ QVBoxLayout,
106
+ QWidget,
107
+ )
108
+
109
+ _HAS_QTPY = True
110
+ except ImportError:
111
+ Qt = QThread = Signal = None
112
+ QCheckBox = QComboBox = QDoubleSpinBox = QFormLayout = QHBoxLayout = None
113
+ QHeaderView = QLabel = QLineEdit = QMessageBox = QProgressBar = (
114
+ QPushButton
115
+ ) = None
116
+ QSpinBox = QTableWidget = QTableWidgetItem = QVBoxLayout = QWidget = None
117
+ _HAS_QTPY = False
118
+
119
+ try:
120
+ from skimage.io import imread
121
+
122
+ _HAS_SKIMAGE = True
123
+ except ImportError:
124
+ imread = None
125
+ _HAS_SKIMAGE = False
126
+
127
+ # Create stub base classes when dependencies are missing
128
+ if not _HAS_QTPY:
129
+ # Create minimal stubs to allow class definitions
130
+ class QTableWidget:
131
+ pass
132
+
133
+ class QThread:
134
+ pass
135
+
136
+ class QWidget:
137
+ pass
138
+
139
+ def Signal(*args):
140
+ return None
141
+
41
142
 
42
143
  # Import registry and processing functions
43
144
  from napari_tmidas._registry import BatchProcessingRegistry
145
+ from napari_tmidas._ui_utils import add_browse_button_to_folder_field
44
146
 
45
147
  sys.path.append("src/napari_tmidas")
46
148
  from napari_tmidas.processing_functions import (
47
149
  discover_and_load_processing_functions,
48
150
  )
49
151
 
152
+ # Import cancellation functions for subprocess-based processing
153
+ try:
154
+ from napari_tmidas.processing_functions.cellpose_env_manager import (
155
+ cancel_cellpose_processing,
156
+ )
157
+ except ImportError:
158
+ cancel_cellpose_processing = None
159
+
160
+ # Check for OME-Zarr support
161
+ try:
162
+ from napari_ome_zarr import napari_get_reader
163
+
164
+ OME_ZARR_AVAILABLE = True
165
+ print("napari-ome-zarr found - enhanced Zarr support enabled")
166
+ except ImportError:
167
+ OME_ZARR_AVAILABLE = False
168
+ print(
169
+ "Tip: Install napari-ome-zarr for better Zarr support: pip install napari-ome-zarr"
170
+ )
171
+
172
+ try:
173
+ import dask.array as da
174
+
175
+ DASK_AVAILABLE = True
176
+ except ImportError:
177
+ DASK_AVAILABLE = False
178
+ print(
179
+ "Tip: Install dask for better performance with large datasets: pip install dask"
180
+ )
181
+
182
+
183
+ def is_label_image(image: np.ndarray) -> bool:
184
+ """
185
+ Determine if an image should be treated as a label image based on its dtype.
186
+
187
+ This function uses the same logic as Napari's guess_labels() function,
188
+ checking if the dtype is one of the integer types commonly used for labels.
189
+
190
+ Parameters:
191
+ -----------
192
+ image : np.ndarray
193
+ The image array to check
194
+
195
+ Returns:
196
+ --------
197
+ bool
198
+ True if the image dtype suggests it's a label image, False otherwise
199
+ """
200
+ if hasattr(image, "dtype"):
201
+ return image.dtype in (np.int32, np.uint32, np.int64, np.uint64)
202
+ return False
203
+
204
+
205
+ def load_zarr_with_napari_ome_zarr(
206
+ filepath: str, verbose: bool = True
207
+ ) -> Optional[List[Tuple]]:
208
+ """
209
+ Load zarr using napari-ome-zarr reader with enhanced error handling
210
+ """
211
+ if not OME_ZARR_AVAILABLE:
212
+ return None
213
+
214
+ try:
215
+ # Try multiple approaches to get the reader
216
+ reader_func = napari_get_reader(filepath)
217
+ if reader_func is None:
218
+ if verbose:
219
+ print(f"napari-ome-zarr: No reader available for {filepath}")
220
+ return None
221
+
222
+ # Try to read the data
223
+ layer_data_list = reader_func(filepath)
224
+
225
+ if layer_data_list and len(layer_data_list) > 0:
226
+ if verbose:
227
+ print(
228
+ f"napari-ome-zarr: Successfully loaded {len(layer_data_list)} layers"
229
+ )
230
+
231
+ # Enhance layer metadata
232
+ enhanced_layers = []
233
+ for i, (data, add_kwargs, layer_type) in enumerate(
234
+ layer_data_list
235
+ ):
236
+ # Ensure proper naming
237
+ if "name" not in add_kwargs or not add_kwargs["name"]:
238
+ basename = os.path.basename(filepath)
239
+ if layer_type == "image":
240
+ add_kwargs["name"] = f"C{i+1}: {basename}"
241
+ elif layer_type == "labels":
242
+ add_kwargs["name"] = f"Labels{i+1}: {basename}"
243
+ else:
244
+ add_kwargs["name"] = (
245
+ f"{layer_type.title()}{i+1}: {basename}"
246
+ )
247
+
248
+ # Set appropriate blending for multi-channel images
249
+ if layer_type == "image" and len(layer_data_list) > 1:
250
+ add_kwargs["blending"] = "additive"
251
+
252
+ # Ensure proper colormap assignment for multi-channel
253
+ if layer_type == "image" and "colormap" not in add_kwargs:
254
+ channel_colormaps = [
255
+ "red",
256
+ "green",
257
+ "blue",
258
+ "cyan",
259
+ "magenta",
260
+ "yellow",
261
+ ]
262
+ add_kwargs["colormap"] = channel_colormaps[
263
+ i % len(channel_colormaps)
264
+ ]
265
+
266
+ enhanced_layers.append((data, add_kwargs, layer_type))
267
+
268
+ return enhanced_layers
269
+ else:
270
+ if verbose:
271
+ print(
272
+ f"napari-ome-zarr: Reader returned empty layer list for {filepath}"
273
+ )
274
+ return None
275
+
276
+ except (ImportError, ValueError, TypeError, OSError) as e:
277
+ if verbose:
278
+ print(f"napari-ome-zarr: Failed to load {filepath}: {e}")
279
+ import traceback
280
+
281
+ traceback.print_exc()
282
+ return None
283
+
284
+
285
+ def load_zarr_basic(filepath: str) -> Union[np.ndarray, Any]:
286
+ """
287
+ Basic zarr loading with dask support as fallback
288
+ """
289
+ try:
290
+ root = zarr.open(filepath, mode="r")
291
+
292
+ # Handle zarr groups vs single arrays
293
+ if hasattr(root, "arrays"):
294
+ arrays_list = list(root.arrays())
295
+ if not arrays_list:
296
+ raise ValueError(f"No arrays found in zarr group: {filepath}")
297
+
298
+ # Try to find the main data array
299
+ # Look for arrays named '0', 'data', or take the first one
300
+ main_array = None
301
+ for name, array in arrays_list:
302
+ if name in ["0", "data"]:
303
+ main_array = array
304
+ break
305
+
306
+ if main_array is None:
307
+ main_array = arrays_list[0][1]
308
+
309
+ zarr_array = main_array
310
+ else:
311
+ zarr_array = root
312
+
313
+ # Convert to dask array for lazy loading if available
314
+ if DASK_AVAILABLE:
315
+ print(f"Loading zarr as dask array with shape: {zarr_array.shape}")
316
+ return da.from_zarr(zarr_array)
317
+ else:
318
+ print(
319
+ f"Loading zarr as numpy array with shape: {zarr_array.shape}"
320
+ )
321
+ return np.array(zarr_array)
322
+
323
+ except (ValueError, TypeError, OSError) as e:
324
+ print(f"Error in basic zarr loading for {filepath}: {e}")
325
+ raise
326
+
327
+
328
+ def is_ome_zarr(filepath: str) -> bool:
329
+ """
330
+ Check if a zarr file is OME-Zarr format by looking for OME metadata
331
+ """
332
+ try:
333
+ if not os.path.exists(filepath):
334
+ return False
335
+
336
+ root = zarr.open(filepath, mode="r")
337
+
338
+ if hasattr(root, "attrs") and (
339
+ "ome" in root.attrs
340
+ or "omero" in root.attrs
341
+ or "multiscales" in root.attrs
342
+ ):
343
+ return True
344
+
345
+ # Check for .zattrs file with OME metadata
346
+ zattrs_path = os.path.join(filepath, ".zattrs")
347
+ if os.path.exists(zattrs_path):
348
+ import json
349
+
350
+ try:
351
+ with open(zattrs_path) as f:
352
+ attrs = json.load(f)
353
+ if (
354
+ "ome" in attrs
355
+ or "omero" in attrs
356
+ or "multiscales" in attrs
357
+ ):
358
+ return True
359
+ except (OSError, json.JSONDecodeError):
360
+ pass
361
+
362
+ return False
363
+
364
+ except (ValueError, TypeError, OSError):
365
+ return False
366
+
367
+
368
+ def get_zarr_info(filepath: str) -> dict:
369
+ """Get detailed information about a zarr dataset"""
370
+ info = {
371
+ "is_ome_zarr": False,
372
+ "is_multiscale": False,
373
+ "num_arrays": 0,
374
+ "arrays": [],
375
+ "shape": None,
376
+ "dtype": None,
377
+ "chunks": None,
378
+ "has_labels": False,
379
+ "resolution_levels": 0,
380
+ }
381
+
382
+ try:
383
+ root = zarr.open(filepath, mode="r")
384
+ info["is_ome_zarr"] = is_ome_zarr(filepath)
385
+
386
+ if hasattr(root, "arrays"):
387
+ arrays_list = list(root.arrays())
388
+ info["num_arrays"] = len(arrays_list)
389
+ info["arrays"] = [name for name, _ in arrays_list]
390
+
391
+ if (
392
+ info["is_ome_zarr"]
393
+ and hasattr(root, "attrs")
394
+ and "multiscales" in root.attrs
395
+ ):
396
+ info["is_multiscale"] = True
397
+ multiscales = root.attrs["multiscales"]
398
+ if multiscales and len(multiscales) > 0:
399
+ datasets = multiscales[0].get("datasets", [])
400
+ info["resolution_levels"] = len(datasets)
401
+
402
+ if arrays_list:
403
+ first_array = arrays_list[0][1]
404
+ info["shape"] = first_array.shape
405
+ info["dtype"] = str(first_array.dtype)
406
+ info["chunks"] = first_array.chunks
407
+
408
+ info["has_labels"] = "labels" in info["arrays"]
409
+
410
+ else:
411
+ info["num_arrays"] = 1
412
+ info["shape"] = root.shape
413
+ info["dtype"] = str(root.dtype)
414
+ info["chunks"] = root.chunks
415
+
416
+ except (ValueError, TypeError, OSError) as e:
417
+ print(f"Error getting zarr info for {filepath}: {e}")
418
+
419
+ return info
420
+
421
+
422
+ def load_image_file(filepath: str) -> Union[np.ndarray, List, Any]:
423
+ """
424
+ Load image from file, supporting both TIFF and Zarr formats with proper metadata handling
425
+ """
426
+ if filepath.lower().endswith(".zarr"):
427
+
428
+ # Try to use napari-ome-zarr reader first for proper metadata handling
429
+ if OME_ZARR_AVAILABLE:
430
+ try:
431
+ layer_data_list = load_zarr_with_napari_ome_zarr(filepath)
432
+ if layer_data_list:
433
+ print(
434
+ f"Loaded {len(layer_data_list)} layers from OME-Zarr"
435
+ )
436
+ return layer_data_list
437
+ except (ImportError, ValueError, TypeError, OSError) as e:
438
+ print(
439
+ f"napari-ome-zarr reader failed: {e}, falling back to basic zarr loading"
440
+ )
441
+
442
+ # Fallback to basic zarr loading with dask
443
+ return load_zarr_basic(filepath)
444
+ else:
445
+ # Use tifffile for TIFF files to preserve dimension order
446
+ # (skimage.io.imread may transpose dimensions)
447
+ if _HAS_TIFFFILE and (
448
+ filepath.lower().endswith(".tif")
449
+ or filepath.lower().endswith(".tiff")
450
+ ):
451
+ return tifffile.imread(filepath)
452
+ else:
453
+ return imread(filepath)
454
+
50
455
 
51
456
  class ProcessedFilesTableWidget(QTableWidget):
52
457
  """
@@ -65,9 +470,9 @@ class ProcessedFilesTableWidget(QTableWidget):
65
470
  # Track file mappings
66
471
  self.file_pairs = {}
67
472
 
68
- # Currently loaded images
69
- self.current_original_image = None
70
- self.current_processed_image = None
473
+ # Currently loaded images (can be multiple for multi-channel)
474
+ self.current_original_images = []
475
+ self.current_processed_images = []
71
476
 
72
477
  # For tracking multi-output files
73
478
  self.multi_output_files = {}
@@ -108,15 +513,6 @@ class ProcessedFilesTableWidget(QTableWidget):
108
513
  def update_processed_files(self, processing_info: List[Dict]):
109
514
  """
110
515
  Update table with processed files
111
-
112
- Args:
113
- processing_info: List of dictionaries containing:
114
- {
115
- 'original_file': original filepath,
116
- 'processed_file': processed filepath (single output)
117
- - OR -
118
- 'processed_files': list of processed filepaths (multi-output)
119
- }
120
516
  """
121
517
  for item in processing_info:
122
518
  original_file = item["original_file"]
@@ -214,9 +610,57 @@ class ProcessedFilesTableWidget(QTableWidget):
214
610
  if filepath:
215
611
  self._load_processed_image(filepath)
216
612
 
613
+ def _clear_current_images(self, image_list):
614
+ """Helper to clear a list of current images"""
615
+ for img_layer in image_list:
616
+ try:
617
+ if img_layer in self.viewer.layers:
618
+ self.viewer.layers.remove(img_layer)
619
+ else:
620
+ # Try by name if reference doesn't work
621
+ layer_names = [layer.name for layer in self.viewer.layers]
622
+ if img_layer.name in layer_names:
623
+ self.viewer.layers.remove(img_layer.name)
624
+ except (KeyError, ValueError, AttributeError) as e:
625
+ print(f"Warning: Could not remove layer: {e}")
626
+ image_list.clear()
627
+
628
+ def _should_enable_3d_view(self, data):
629
+ """
630
+ Check if 3D view should be enabled based on data dimensions.
631
+
632
+ Conservative approach: Only enable 3D view for clearly spatial 3D data (Z-stacks),
633
+ not for time series which should use 2D view with time slider.
634
+ """
635
+ if not hasattr(data, "shape") or len(data.shape) < 3:
636
+ return False
637
+
638
+ shape = data.shape
639
+
640
+ # If first dimension is channels (2-4), check remaining dims
641
+ if shape[0] >= 2 and shape[0] <= 4:
642
+ meaningful_dims = shape[1:]
643
+ else:
644
+ meaningful_dims = shape
645
+
646
+ # Only enable 3D view for data with 4+ dimensions (like TZYX, CZYX)
647
+ # or 3D data with many slices (likely a Z-stack, not time series)
648
+ if len(meaningful_dims) >= 4:
649
+ # TZYX or similar - check Z dimension
650
+ z_dim = meaningful_dims[1] if len(meaningful_dims) >= 4 else 1
651
+ return z_dim > 1
652
+ elif len(meaningful_dims) == 3:
653
+ # Could be ZYX (spatial) or TYX (temporal)
654
+ # Only enable 3D for many slices (likely Z-stack)
655
+ # 10+ slices suggests Z-stack, fewer suggests time series
656
+ first_dim = meaningful_dims[0]
657
+ return first_dim > 10
658
+
659
+ return False
660
+
217
661
  def _load_original_image(self, filepath: str):
218
662
  """
219
- Load original image into viewer
663
+ Load original image into viewer with proper multi-channel support using napari-ome-zarr
220
664
  """
221
665
  # Ensure filepath is valid
222
666
  if not filepath or not os.path.exists(filepath):
@@ -224,60 +668,223 @@ class ProcessedFilesTableWidget(QTableWidget):
224
668
  self.viewer.status = f"Error: File not found: {filepath}"
225
669
  return
226
670
 
227
- # Remove existing original layer if it exists
228
- if self.current_original_image is not None:
229
- try:
230
- # Check if the layer is still in the viewer
231
- if self.current_original_image in self.viewer.layers:
232
- self.viewer.layers.remove(self.current_original_image)
233
- else:
234
- # If not found by reference, try by name
235
- layer_names = [layer.name for layer in self.viewer.layers]
236
- if self.current_original_image.name in layer_names:
237
- self.viewer.layers.remove(
238
- self.current_original_image.name
239
- )
240
- except (KeyError, ValueError) as e:
241
- print(
242
- f"Warning: Could not remove previous original layer: {e}"
243
- )
244
-
245
- # Reset the current original image reference
246
- self.current_original_image = None
671
+ # Remove existing original layers
672
+ self._clear_current_images(self.current_original_images)
247
673
 
248
674
  # Load new image
249
675
  try:
250
676
  # Display status while loading
251
677
  self.viewer.status = f"Loading {os.path.basename(filepath)}..."
252
678
 
253
- image = imread(filepath)
254
- # remove singletons
255
- image = np.squeeze(image)
256
- # check if label image by checking file name
257
- is_label = "labels" in os.path.basename(
258
- filepath
259
- ) or "semantic" in os.path.basename(filepath)
679
+ # For zarr files, use viewer.open() with the napari-ome-zarr plugin directly
680
+ if filepath.lower().endswith(".zarr") and OME_ZARR_AVAILABLE:
681
+ print("Using viewer.open() with napari-ome-zarr plugin")
682
+
683
+ # Use napari's built-in open method with the plugin
684
+ # This is exactly what napari does when you open a zarr file
685
+ try:
686
+ layers = self.viewer.open(
687
+ filepath, plugin="napari-ome-zarr"
688
+ )
689
+
690
+ # Track the added layers
691
+ if layers:
692
+ if isinstance(layers, list):
693
+ self.current_original_images.extend(layers)
694
+ else:
695
+ self.current_original_images.append(layers)
696
+
697
+ # Check if we should enable 3D view
698
+ if len(self.current_original_images) > 0:
699
+ first_layer = self.current_original_images[0]
700
+ if hasattr(
701
+ first_layer, "data"
702
+ ) and self._should_enable_3d_view(
703
+ first_layer.data
704
+ ):
705
+ self.viewer.dims.ndisplay = 3
706
+ print(
707
+ f"Switched to 3D view for data with shape: {first_layer.data.shape}"
708
+ )
709
+
710
+ self.viewer.status = f"Loaded {len(self.current_original_images)} layers from {os.path.basename(filepath)}"
711
+ return
712
+ else:
713
+ print(
714
+ "napari-ome-zarr returned no layers, falling back to manual loading"
715
+ )
716
+ except (ImportError, ValueError, TypeError, OSError) as e:
717
+ print(
718
+ f"napari-ome-zarr failed: {e}, falling back to manual loading"
719
+ )
720
+
721
+ # Fallback for non-zarr files or if napari-ome-zarr fails
722
+ # Load image using the unified loader function
723
+ image_data = load_image_file(filepath)
724
+
725
+ # Handle multi-layer data from OME-Zarr or enhanced basic loading
726
+ if isinstance(image_data, list):
727
+ # Channel-specific colormaps: R, G, B, then additional colors
728
+ channel_colormaps = [
729
+ "red",
730
+ "green",
731
+ "blue",
732
+ "cyan",
733
+ "magenta",
734
+ "yellow",
735
+ "orange",
736
+ "purple",
737
+ "pink",
738
+ "gray",
739
+ ]
740
+
741
+ # This is from napari-ome-zarr reader or enhanced basic loading - add each layer separately
742
+ for layer_idx, layer_info in enumerate(image_data):
743
+ # Handle different formats of layer_info
744
+ if isinstance(layer_info, tuple) and len(layer_info) == 3:
745
+ # Format: (data, add_kwargs, layer_type)
746
+ data, add_kwargs, layer_type = layer_info
747
+ elif (
748
+ isinstance(layer_info, tuple) and len(layer_info) == 2
749
+ ):
750
+ # Format: (data, add_kwargs) - assume image type
751
+ data, add_kwargs = layer_info
752
+ layer_type = "image"
753
+ else:
754
+ # Just data - create minimal kwargs
755
+ data = layer_info
756
+ add_kwargs = {}
757
+ layer_type = "image"
758
+
759
+ base_filename = os.path.basename(filepath)
760
+
761
+ if layer_type == "image":
762
+ # Check if this is a multi-channel image that needs to be split using channel_axis
763
+ if hasattr(data, "shape") and len(data.shape) >= 3:
764
+ # Look for a channel dimension (small dimension, typically <= 10)
765
+ potential_channel_dims = []
766
+ for dim_idx, dim_size in enumerate(data.shape):
767
+ if dim_size <= 10 and dim_size > 1:
768
+ potential_channel_dims.append(
769
+ (dim_idx, dim_size)
770
+ )
771
+
772
+ # If we found a potential channel dimension, use napari's channel_axis
773
+ if potential_channel_dims:
774
+ # Use the first potential channel dimension
775
+ channel_axis, num_channels = (
776
+ potential_channel_dims[0]
777
+ )
778
+ print(
779
+ f"Using napari channel_axis={channel_axis} for {num_channels} channels"
780
+ )
781
+
782
+ # Let napari handle channel splitting automatically with proper colormaps
783
+ layers = self.viewer.add_image(
784
+ data,
785
+ channel_axis=channel_axis,
786
+ name=f"Original: {base_filename}",
787
+ blending="additive",
788
+ )
789
+
790
+ # Track all the layers napari created
791
+ if isinstance(layers, list):
792
+ self.current_original_images.extend(layers)
793
+ else:
794
+ self.current_original_images.append(layers)
795
+
796
+ continue # Skip the normal single-layer processing
797
+
798
+ # Normal single-layer processing (no channel splitting needed)
799
+ # Override/set colormap for proper channel assignment
800
+ if "colormap" not in add_kwargs:
801
+ add_kwargs["colormap"] = (
802
+ channel_colormaps[layer_idx]
803
+ if layer_idx < len(channel_colormaps)
804
+ else "gray"
805
+ )
806
+
807
+ if "blending" not in add_kwargs:
808
+ add_kwargs["blending"] = (
809
+ "additive" # Enable proper multi-channel blending
810
+ )
811
+
812
+ # Ensure proper naming
813
+ if "name" not in add_kwargs or not add_kwargs["name"]:
814
+ add_kwargs["name"] = (
815
+ f"C{layer_idx+1}: {base_filename}"
816
+ )
817
+
818
+ layer = self.viewer.add_image(data, **add_kwargs)
819
+ self.current_original_images.append(layer)
820
+
821
+ elif layer_type == "labels":
822
+ if "name" not in add_kwargs or not add_kwargs["name"]:
823
+ add_kwargs["name"] = (
824
+ f"Labels{layer_idx+1}: {base_filename}"
825
+ )
826
+
827
+ layer = self.viewer.add_labels(data, **add_kwargs)
828
+ self.current_original_images.append(layer)
829
+
830
+ # Switch to 3D view if data has meaningful 3D dimensions
831
+ if len(self.current_original_images) > 0:
832
+ # Get the first layer's data safely
833
+ first_layer = self.current_original_images[0]
834
+ if hasattr(first_layer, "data"):
835
+ first_layer_data = first_layer.data
836
+ if self._should_enable_3d_view(first_layer_data):
837
+ self.viewer.dims.ndisplay = 3
838
+ print(
839
+ f"Switched to 3D view for data with shape: {first_layer_data.shape}"
840
+ )
841
+
842
+ self.viewer.status = f"Loaded {len(self.current_original_images)} channels from {os.path.basename(filepath)}"
843
+ return
844
+
845
+ # Handle single image data (TIFF or simple zarr)
846
+ image = image_data
847
+
848
+ # Remove singletons if it's a numpy array
849
+ if hasattr(image, "squeeze") and not hasattr(image, "chunks"):
850
+ image = np.squeeze(image)
851
+
852
+ # Don't automatically split channels - let napari handle with sliders
853
+ # This avoids confusion between channels (C) and time (T) dimensions
854
+ # Users can manually split if needed using the "Split Color Channels" function
855
+ base_filename = os.path.basename(filepath)
856
+ # check if label image by checking image dtype
857
+ is_label = is_label_image(image)
858
+
260
859
  if is_label:
261
- image = image.astype(np.uint32)
262
- self.current_original_image = self.viewer.add_labels(
263
- image, name=f"Labels: {os.path.basename(filepath)}"
860
+ if hasattr(image, "astype"):
861
+ image = image.astype(np.uint32)
862
+ layer = self.viewer.add_labels(
863
+ image, name=f"Labels: {base_filename}"
264
864
  )
265
865
  else:
266
- self.current_original_image = self.viewer.add_image(
267
- image, name=f"Original: {os.path.basename(filepath)}"
866
+ layer = self.viewer.add_image(
867
+ image, name=f"Original: {base_filename}"
268
868
  )
269
869
 
270
- # Update status with success message
271
- self.viewer.status = f"Loaded {os.path.basename(filepath)}"
870
+ self.current_original_images.append(layer)
871
+
872
+ # Don't automatically switch to 3D view - let user decide
873
+ # napari will show appropriate sliders for all dimensions
874
+
875
+ self.viewer.status = f"Loaded {base_filename}"
272
876
 
273
- except (ValueError, TypeError, OSError, tifffile.TiffFileError) as e:
877
+ except (ValueError, TypeError, OSError, ImportError) as e:
274
878
  print(f"Error loading original image {filepath}: {e}")
879
+ import traceback
880
+
881
+ traceback.print_exc()
275
882
  self.viewer.status = f"Error processing {filepath}: {e}"
276
883
 
277
884
  def _load_processed_image(self, filepath: str):
278
885
  """
279
- Load processed image into viewer, distinguishing labels by filename pattern
280
- and ensure it's always shown on top
886
+ Load processed image into viewer with multi-channel support and ensure it's always shown on top
887
+ Also handles points data from spot detection functions.
281
888
  """
282
889
  # Ensure filepath is valid
283
890
  if not filepath or not os.path.exists(filepath):
@@ -285,70 +892,320 @@ class ProcessedFilesTableWidget(QTableWidget):
285
892
  self.viewer.status = f"Error: File not found: {filepath}"
286
893
  return
287
894
 
288
- # Remove existing processed layer if it exists
289
- if self.current_processed_image is not None:
895
+ # Remove existing processed layers
896
+ self._clear_current_images(self.current_processed_images)
897
+
898
+ # Special handling for .npy files (likely points data from spot detection)
899
+ if filepath.lower().endswith(".npy"):
290
900
  try:
291
- # Check if the layer is still in the viewer
292
- if self.current_processed_image in self.viewer.layers:
293
- self.viewer.layers.remove(self.current_processed_image)
901
+ data = np.load(filepath)
902
+
903
+ # Check if this is points data
904
+ if (
905
+ isinstance(data, np.ndarray)
906
+ and data.ndim == 2
907
+ and data.shape[1] in [2, 3] # 2D or 3D coordinates
908
+ and data.dtype in [np.float32, np.float64]
909
+ ): # Coordinate data
910
+
911
+ print(f"Loading points data: {data.shape} points")
912
+
913
+ # Determine if 2D or 3D points
914
+ is_3d = data.shape[1] == 3
915
+
916
+ # Set appropriate point properties
917
+ point_properties = {
918
+ "size": 8,
919
+ "symbol": "ring",
920
+ "opacity": 1,
921
+ "face_color": [1.0, 0.5, 0.2],
922
+ "border_color": [1.0, 0.5, 0.2],
923
+ }
924
+
925
+ if is_3d:
926
+ point_properties["out_of_slice_display"] = True
927
+
928
+ # Add points layer
929
+ points_layer = self.viewer.add_points(
930
+ data,
931
+ name=f"Spots ({os.path.basename(filepath)})",
932
+ **point_properties,
933
+ )
934
+
935
+ # Track the layer
936
+ self.current_processed_images = [points_layer]
937
+
938
+ self.viewer.status = f"Loaded {len(data)} spots from {os.path.basename(filepath)}"
939
+ print(
940
+ f"Successfully loaded {len(data)} spots as points layer"
941
+ )
942
+ return
943
+
294
944
  else:
295
- # If not found by reference, try by name
296
- layer_names = [layer.name for layer in self.viewer.layers]
297
- if self.current_processed_image.name in layer_names:
298
- self.viewer.layers.remove(
299
- self.current_processed_image.name
300
- )
301
- except (KeyError, ValueError) as e:
302
- print(
303
- f"Warning: Could not remove previous processed layer: {e}"
304
- )
945
+ print(
946
+ "NPY file doesn't contain points data, treating as image"
947
+ )
948
+ # Fall through to regular image loading
305
949
 
306
- # Reset the current processed image reference
307
- self.current_processed_image = None
950
+ except (OSError, ValueError, AttributeError) as e:
951
+ print(f"Error loading NPY file as points: {e}")
952
+ # Fall through to regular image loading
308
953
 
309
- # Load new image
954
+ # Load new image (original logic)
310
955
  try:
311
956
  # Display status while loading
312
957
  self.viewer.status = f"Loading {os.path.basename(filepath)}..."
313
958
 
314
- image = imread(filepath)
315
- # remove singletons
316
- image = np.squeeze(image)
317
- filename = os.path.basename(filepath)
959
+ # For zarr files, use viewer.open() with the napari-ome-zarr plugin directly
960
+ if filepath.lower().endswith(".zarr") and OME_ZARR_AVAILABLE:
961
+ print(
962
+ "Using viewer.open() with napari-ome-zarr plugin for processed image"
963
+ )
964
+
965
+ # Use napari's built-in open method with the plugin
966
+ try:
967
+ layers = self.viewer.open(
968
+ filepath, plugin="napari-ome-zarr"
969
+ )
970
+
971
+ # Track the added layers and rename them as processed
972
+ if layers:
973
+ if isinstance(layers, list):
974
+ for layer in layers:
975
+ layer.name = f"Processed {layer.name}"
976
+ self.current_processed_images.append(layer)
977
+ else:
978
+ layers.name = f"Processed {layers.name}"
979
+ self.current_processed_images.append(layers)
980
+
981
+ # Switch to 3D view if data has meaningful 3D dimensions
982
+ if len(self.current_processed_images) > 0:
983
+ first_layer = self.current_processed_images[0]
984
+ if hasattr(first_layer, "data"):
985
+ first_layer_data = first_layer.data
986
+ if self._should_enable_3d_view(
987
+ first_layer_data
988
+ ):
989
+ self.viewer.dims.ndisplay = 3
990
+ print(
991
+ f"Switched to 3D view for processed data with shape: {first_layer_data.shape}"
992
+ )
993
+
994
+ # Move all processed layers to top
995
+ for layer in self.current_processed_images:
996
+ if layer in self.viewer.layers:
997
+ layer_index = self.viewer.layers.index(layer)
998
+ if layer_index < len(self.viewer.layers) - 1:
999
+ self.viewer.layers.move(
1000
+ layer_index,
1001
+ len(self.viewer.layers) - 1,
1002
+ )
1003
+
1004
+ self.viewer.status = f"Loaded {len(self.current_processed_images)} processed layers from {os.path.basename(filepath)}"
1005
+ return
1006
+ else:
1007
+ print(
1008
+ "napari-ome-zarr returned no layers for processed image, falling back"
1009
+ )
1010
+ except (ImportError, ValueError, TypeError, OSError) as e:
1011
+ print(
1012
+ f"napari-ome-zarr failed for processed image: {e}, falling back"
1013
+ )
318
1014
 
319
- # Check if filename contains label indicators
320
- is_label = "labels" in filename or "semantic" in filename
1015
+ # Fallback for non-zarr files or if napari-ome-zarr fails
1016
+ # Load image using the unified loader function
1017
+ image_data = load_image_file(filepath)
1018
+
1019
+ # Handle multi-layer data from OME-Zarr or enhanced basic loading
1020
+ if isinstance(image_data, list):
1021
+ # Channel-specific colormaps: R, G, B, then additional colors
1022
+ channel_colormaps = [
1023
+ "red",
1024
+ "green",
1025
+ "blue",
1026
+ "cyan",
1027
+ "magenta",
1028
+ "yellow",
1029
+ "orange",
1030
+ "purple",
1031
+ "pink",
1032
+ "gray",
1033
+ ]
1034
+
1035
+ # This is from napari-ome-zarr reader or enhanced basic loading - add each layer separately
1036
+ for layer_idx, layer_info in enumerate(image_data):
1037
+ # Handle different formats of layer_info
1038
+ if isinstance(layer_info, tuple) and len(layer_info) == 3:
1039
+ # Format: (data, add_kwargs, layer_type)
1040
+ data, add_kwargs, layer_type = layer_info
1041
+ elif (
1042
+ isinstance(layer_info, tuple) and len(layer_info) == 2
1043
+ ):
1044
+ # Format: (data, add_kwargs) - assume image type
1045
+ data, add_kwargs = layer_info
1046
+ layer_type = "image"
1047
+ else:
1048
+ # Just data - create minimal kwargs
1049
+ data = layer_info
1050
+ add_kwargs = {}
1051
+ layer_type = "image"
1052
+
1053
+ # Ensure proper naming and colormaps for processed images
1054
+ filename = os.path.basename(filepath)
1055
+
1056
+ if layer_type == "image":
1057
+ # Check if this is a multi-channel image that needs to be split using channel_axis
1058
+ if hasattr(data, "shape") and len(data.shape) >= 3:
1059
+ # Look for a channel dimension (small dimension, typically <= 10)
1060
+ potential_channel_dims = []
1061
+ for dim_idx, dim_size in enumerate(data.shape):
1062
+ if dim_size <= 10 and dim_size > 1:
1063
+ potential_channel_dims.append(
1064
+ (dim_idx, dim_size)
1065
+ )
1066
+
1067
+ # If we found a potential channel dimension, use napari's channel_axis
1068
+ if potential_channel_dims:
1069
+ # Use the first potential channel dimension
1070
+ channel_axis, num_channels = (
1071
+ potential_channel_dims[0]
1072
+ )
1073
+ print(
1074
+ f"Using napari channel_axis={channel_axis} for {num_channels} processed channels"
1075
+ )
1076
+
1077
+ # Let napari handle channel splitting automatically with proper colormaps
1078
+ layers = self.viewer.add_image(
1079
+ data,
1080
+ channel_axis=channel_axis,
1081
+ name=f"Processed: {filename}",
1082
+ blending="additive",
1083
+ )
1084
+
1085
+ # Track all the layers napari created
1086
+ if isinstance(layers, list):
1087
+ self.current_processed_images.extend(
1088
+ layers
1089
+ )
1090
+ else:
1091
+ self.current_processed_images.append(
1092
+ layers
1093
+ )
1094
+
1095
+ continue # Skip the normal single-layer processing
1096
+
1097
+ # Normal single-layer processing (no channel splitting needed)
1098
+ # Override/set colormap for proper channel assignment
1099
+ if "colormap" not in add_kwargs:
1100
+ add_kwargs["colormap"] = (
1101
+ channel_colormaps[layer_idx]
1102
+ if layer_idx < len(channel_colormaps)
1103
+ else "gray"
1104
+ )
1105
+
1106
+ if "blending" not in add_kwargs:
1107
+ add_kwargs["blending"] = "additive"
1108
+
1109
+ # Ensure proper naming for processed images
1110
+ if "name" not in add_kwargs or not add_kwargs["name"]:
1111
+ add_kwargs["name"] = (
1112
+ f"Processed C{layer_idx+1}: {filename}"
1113
+ )
1114
+ elif not add_kwargs["name"].startswith("Processed"):
1115
+ add_kwargs["name"] = (
1116
+ f"Processed {add_kwargs['name']}"
1117
+ )
1118
+
1119
+ layer = self.viewer.add_image(data, **add_kwargs)
1120
+ self.current_processed_images.append(layer)
1121
+
1122
+ elif layer_type == "labels":
1123
+ if "name" not in add_kwargs or not add_kwargs["name"]:
1124
+ add_kwargs["name"] = (
1125
+ f"Processed Labels{layer_idx+1}: {filename}"
1126
+ )
1127
+ elif not add_kwargs["name"].startswith("Processed"):
1128
+ add_kwargs["name"] = (
1129
+ f"Processed {add_kwargs['name']}"
1130
+ )
1131
+
1132
+ layer = self.viewer.add_labels(data, **add_kwargs)
1133
+ self.current_processed_images.append(layer)
1134
+
1135
+ # Switch to 3D view if data has meaningful 3D dimensions
1136
+ if len(self.current_processed_images) > 0:
1137
+ # Get the first layer's data safely
1138
+ first_layer = self.current_processed_images[0]
1139
+ if hasattr(first_layer, "data"):
1140
+ first_layer_data = first_layer.data
1141
+ if self._should_enable_3d_view(first_layer_data):
1142
+ self.viewer.dims.ndisplay = 3
1143
+ print(
1144
+ f"Switched to 3D view for processed data with shape: {first_layer_data.shape}"
1145
+ )
1146
+
1147
+ # Move all processed layers to top
1148
+ for layer in self.current_processed_images:
1149
+ if layer in self.viewer.layers:
1150
+ layer_index = self.viewer.layers.index(layer)
1151
+ if layer_index < len(self.viewer.layers) - 1:
1152
+ self.viewer.layers.move(
1153
+ layer_index, len(self.viewer.layers) - 1
1154
+ )
1155
+
1156
+ self.viewer.status = f"Loaded {len(self.current_processed_images)} processed channels from {os.path.basename(filepath)}"
1157
+ return
1158
+
1159
+ # Handle single image data
1160
+ image = image_data
1161
+
1162
+ # Remove singletons if it's a numpy array
1163
+ if hasattr(image, "squeeze") and not hasattr(image, "chunks"):
1164
+ image = np.squeeze(image)
1165
+
1166
+ # Don't automatically split channels - let napari handle with sliders
1167
+ # This avoids confusion between channels (C) and time (T) dimensions
1168
+ filename = os.path.basename(filepath)
1169
+ # Check if image dtype indicates labels
1170
+ is_label = is_label_image(image)
321
1171
 
322
1172
  # Add the layer using the appropriate method
323
1173
  if is_label:
324
1174
  # Ensure it's an appropriate dtype for labels
325
- if not np.issubdtype(image.dtype, np.integer):
1175
+ if hasattr(image, "astype") and not np.issubdtype(
1176
+ image.dtype, np.integer
1177
+ ):
326
1178
  image = image.astype(np.uint32)
327
1179
 
328
- self.current_processed_image = self.viewer.add_labels(
329
- image, name=f"Labels: {filename}"
1180
+ layer = self.viewer.add_labels(
1181
+ image, name=f"Processed Labels: {filename}"
330
1182
  )
331
1183
  else:
332
- self.current_processed_image = self.viewer.add_image(
1184
+ layer = self.viewer.add_image(
333
1185
  image, name=f"Processed: {filename}"
334
1186
  )
335
1187
 
1188
+ self.current_processed_images.append(layer)
1189
+
1190
+ # Don't automatically switch to 3D view - let user decide
1191
+ # napari will show appropriate sliders for all dimensions
1192
+
336
1193
  # Move the processed layer to the top of the stack
337
- # Get the index of the current processed layer
338
- layer_index = self.viewer.layers.index(
339
- self.current_processed_image
340
- )
341
- # Move it to the top (last position in the list)
342
- if layer_index < len(self.viewer.layers) - 1:
343
- self.viewer.layers.move(
344
- layer_index, len(self.viewer.layers) - 1
345
- )
1194
+ if layer in self.viewer.layers:
1195
+ layer_index = self.viewer.layers.index(layer)
1196
+ if layer_index < len(self.viewer.layers) - 1:
1197
+ self.viewer.layers.move(
1198
+ layer_index, len(self.viewer.layers) - 1
1199
+ )
346
1200
 
347
1201
  # Update status with success message
348
1202
  self.viewer.status = f"Loaded {filename} (moved to top layer)"
349
1203
 
350
- except (ValueError, TypeError, OSError, tifffile.TiffFileError) as e:
1204
+ except (ValueError, TypeError, OSError, ImportError) as e:
351
1205
  print(f"Error loading processed image {filepath}: {e}")
1206
+ import traceback
1207
+
1208
+ traceback.print_exc()
352
1209
  self.viewer.status = f"Error processing {filepath}: {e}"
353
1210
 
354
1211
  def _load_image(self, filepath: str):
@@ -398,6 +1255,11 @@ class ParameterWidget(QWidget):
398
1255
  widget.setDecimals(3)
399
1256
  if default_value is not None:
400
1257
  widget.setValue(default_value)
1258
+ elif param_type is bool:
1259
+ # Use checkbox for boolean parameters
1260
+ widget = QCheckBox()
1261
+ if default_value is not None:
1262
+ widget.setChecked(bool(default_value))
401
1263
  else:
402
1264
  # Default to text input for other types
403
1265
  widget = QLineEdit(
@@ -418,6 +1280,8 @@ class ParameterWidget(QWidget):
418
1280
 
419
1281
  if isinstance(widget, (QSpinBox, QDoubleSpinBox)):
420
1282
  values[param_name] = widget.value()
1283
+ elif isinstance(widget, QCheckBox):
1284
+ values[param_name] = widget.isChecked()
421
1285
  else:
422
1286
  # For text inputs, try to convert to the appropriate type
423
1287
  try:
@@ -436,10 +1300,13 @@ class ParameterWidget(QWidget):
436
1300
  "label": "Select Folder",
437
1301
  "value": "",
438
1302
  },
439
- input_suffix={"label": "File Suffix (Example: .tif)", "value": ""},
1303
+ input_suffix={
1304
+ "label": "File Suffix (Example: .tif,.zarr)",
1305
+ "value": ".tif,.zarr",
1306
+ },
440
1307
  )
441
1308
  def file_selector(
442
- viewer: napari.Viewer, input_folder: str, input_suffix: str = ".tif"
1309
+ viewer: napari.Viewer, input_folder: str, input_suffix: str = ".tif,.zarr"
443
1310
  ) -> List[str]:
444
1311
  """
445
1312
  Find files in a specified input folder with a given suffix and prepare for batch processing.
@@ -449,12 +1316,16 @@ def file_selector(
449
1316
  viewer.status = f"Invalid input folder: {input_folder}"
450
1317
  return []
451
1318
 
452
- # Find matching files
453
- matching_files = [
454
- os.path.join(input_folder, f)
455
- for f in os.listdir(input_folder)
456
- if f.endswith(input_suffix)
457
- ]
1319
+ # Parse multiple suffixes
1320
+ suffixes = [s.strip() for s in input_suffix.split(",") if s.strip()]
1321
+ if not suffixes:
1322
+ suffixes = [".tif"] # Fallback to tif if no valid suffixes
1323
+
1324
+ # Find matching files with multiple suffix support
1325
+ matching_files = []
1326
+ for f in os.listdir(input_folder):
1327
+ if any(f.endswith(suffix) for suffix in suffixes):
1328
+ matching_files.append(os.path.join(input_folder, f))
458
1329
 
459
1330
  # Create a results widget with batch processing option
460
1331
  results_widget = FileResultsWidget(
@@ -475,56 +1346,11 @@ def file_selector(
475
1346
  return matching_files
476
1347
 
477
1348
 
478
- # Modify the file_selector widget to add a browse button after it's created
479
- def _add_browse_button_to_selector(file_selector_widget):
480
- """
481
- Add a browse button to the file selector widget
482
- """
483
- # Get the container widget that holds the input_folder widget
484
- container = file_selector_widget.native
485
-
486
- # Create a browse button
487
- browse_button = QPushButton("Browse...")
488
-
489
- # Get access to the input_folder widget
490
- input_folder_widget = file_selector_widget.input_folder.native
491
-
492
- # Get the parent of the input_folder widget
493
- parent_layout = input_folder_widget.parentWidget().layout()
494
-
495
- # Create a container for input field and browse button
496
- container_widget = QWidget()
497
- h_layout = QHBoxLayout(container_widget)
498
- h_layout.setContentsMargins(0, 0, 0, 0)
499
-
500
- # Add the input field to our container
501
- h_layout.addWidget(input_folder_widget)
502
-
503
- # Add the browse button
504
- h_layout.addWidget(browse_button)
505
-
506
- # Replace the input field with our container
507
- layout_index = parent_layout.indexOf(input_folder_widget)
508
- parent_layout.removeWidget(input_folder_widget)
509
- parent_layout.insertWidget(layout_index, container_widget)
510
-
511
- # Connect button to browse action
512
- def browse_folder():
513
- folder = QFileDialog.getExistingDirectory(
514
- container,
515
- "Select Folder",
516
- file_selector_widget.input_folder.value or os.path.expanduser("~"),
517
- )
518
- if folder:
519
- file_selector_widget.input_folder.value = folder
520
-
521
- browse_button.clicked.connect(browse_folder)
522
-
523
- return file_selector_widget
524
-
525
-
526
1349
  # Create a modified file_selector with browse button
527
- file_selector = _add_browse_button_to_selector(file_selector)
1350
+ if _HAS_MAGICGUI and _HAS_QTPY:
1351
+ file_selector = add_browse_button_to_folder_field(
1352
+ file_selector, "input_folder"
1353
+ )
528
1354
 
529
1355
 
530
1356
  # Processing worker for multithreading
@@ -608,13 +1434,54 @@ class ProcessingWorker(QThread):
608
1434
  self.processing_finished.emit()
609
1435
 
610
1436
  def process_file(self, filepath):
611
- """Process a single file with support for large TIFF files and removal of all singleton dimensions"""
1437
+ """Process a single file with support for large TIFF and Zarr files"""
612
1438
  try:
613
- # Load the image
614
- image = imread(filepath)
615
- image_dtype = image.dtype
1439
+ # Load the image using the unified loader
1440
+ image_data = load_image_file(filepath)
616
1441
 
617
- print(f"Original image shape: {image.shape}, dtype: {image_dtype}")
1442
+ # Handle multi-layer data from OME-Zarr - extract first layer for processing
1443
+ if isinstance(image_data, list):
1444
+ print(
1445
+ f"Processing first layer of multi-layer file: {filepath}"
1446
+ )
1447
+ # Take the first image layer
1448
+ for data, add_kwargs, layer_type in image_data:
1449
+ if layer_type == "image":
1450
+ image = data
1451
+ # Extract metadata if available
1452
+ if isinstance(add_kwargs, dict):
1453
+ metadata = add_kwargs.get("metadata", {})
1454
+ if "axes" in metadata:
1455
+ print(f"Zarr axes: {metadata['axes']}")
1456
+ if "channel_axis" in metadata:
1457
+ print(
1458
+ f"Channel axis: {metadata['channel_axis']}"
1459
+ )
1460
+ break
1461
+ else:
1462
+ # No image layer found, take first available
1463
+ image = image_data[0][0]
1464
+ else:
1465
+ image = image_data
1466
+
1467
+ # Store original dtype for saving
1468
+ if hasattr(image, "dtype"):
1469
+ image_dtype = image.dtype
1470
+ else:
1471
+ image_dtype = np.float32
1472
+
1473
+ # Get shape information for different array types
1474
+ if hasattr(image, "shape"):
1475
+ shape_info = f"{image.shape}"
1476
+ elif hasattr(image, "__array__"):
1477
+ # For array-like objects
1478
+ try:
1479
+ arr = np.asarray(image)
1480
+ shape_info = f"{arr.shape} (converted from array-like)"
1481
+ except (ValueError, TypeError, AttributeError):
1482
+ shape_info = "unknown (array conversion failed)"
1483
+ else:
1484
+ shape_info = "unknown (no shape attribute)"
618
1485
 
619
1486
  # Check if this is a folder-processing function that shouldn't save individual files
620
1487
  function_name = getattr(
@@ -624,30 +1491,261 @@ class ProcessingWorker(QThread):
624
1491
  "timepoint" in function_name.lower()
625
1492
  or "merge" in function_name.lower()
626
1493
  or "folder" in function_name.lower()
1494
+ or "grid" in function_name.lower()
627
1495
  )
628
1496
 
1497
+ # Convert dask array to numpy for processing functions that don't support dask
1498
+ if hasattr(image, "chunks") and hasattr(image, "compute"):
1499
+ print("Converting dask array to numpy for processing...")
1500
+ # For very large arrays, we might want to process in chunks
1501
+ try:
1502
+ image = image.compute()
1503
+ except MemoryError:
1504
+ print(
1505
+ "Memory error computing dask array, trying chunked processing..."
1506
+ )
1507
+ # Could implement chunked processing here if needed
1508
+ raise
1509
+
629
1510
  # Apply processing with parameters
630
- processed_image = self.processing_func(image, **self.param_values)
1511
+ # For zarr files, pass the original filepath to enable optimized processing
1512
+ if filepath.lower().endswith(".zarr"):
1513
+ # Add filepath for zarr-aware processing functions
1514
+ processing_params = {
1515
+ **self.param_values,
1516
+ "_source_filepath": filepath,
1517
+ }
1518
+ else:
1519
+ processing_params = self.param_values
1520
+
1521
+ processed_result = self.processing_func(image, **processing_params)
1522
+
1523
+ if processed_result is None:
1524
+ # Allow processing functions to signal that this file should be skipped
1525
+ # Suppress message for grid_overlay since it's expected to return None for most files
1526
+ if not is_folder_function:
1527
+ print(
1528
+ "Processing function returned None; skipping save for this file."
1529
+ )
1530
+ return None
1531
+
1532
+ # Check if result is points data (for spot detection functions)
1533
+ if (
1534
+ isinstance(processed_result, np.ndarray)
1535
+ and processed_result.ndim == 2
1536
+ and processed_result.shape[1] in [2, 3] # 2D or 3D coordinates
1537
+ and processed_result.dtype in [np.float32, np.float64]
1538
+ ): # Coordinate data
1539
+
1540
+ print(f"Detected points data: {processed_result.shape} points")
1541
+
1542
+ # Save points as numpy array
1543
+ filename = os.path.basename(filepath)
1544
+ name, _ = os.path.splitext(filename)
1545
+ points_filename = f"{name}_spots.npy"
1546
+ points_filepath = os.path.join(
1547
+ self.output_folder, points_filename
1548
+ )
1549
+
1550
+ np.save(points_filepath, processed_result)
1551
+ print(f"Saved points to: {points_filepath}")
1552
+
1553
+ # Also save as CSV if requested
1554
+ if hasattr(self, "param_values") and self.param_values.get(
1555
+ "output_csv", False
1556
+ ):
1557
+ csv_filename = f"{name}_spots.csv"
1558
+ csv_filepath = os.path.join(
1559
+ self.output_folder, csv_filename
1560
+ )
1561
+
1562
+ try:
1563
+ # Try to save as CSV with pandas
1564
+ import pandas as pd
1565
+
1566
+ columns = (
1567
+ ["y", "x"]
1568
+ if processed_result.shape[1] == 2
1569
+ else ["z", "y", "x"]
1570
+ )
1571
+ df = pd.DataFrame(processed_result, columns=columns)
1572
+ df.to_csv(csv_filepath, index=False)
1573
+ print(f"Saved CSV to: {csv_filepath}")
1574
+ except ImportError:
1575
+ # Fallback to numpy if pandas not available
1576
+ np.savetxt(
1577
+ csv_filepath,
1578
+ processed_result,
1579
+ delimiter=",",
1580
+ header=(
1581
+ "y,x"
1582
+ if processed_result.shape[1] == 2
1583
+ else "z,y,x"
1584
+ ),
1585
+ comments="",
1586
+ )
1587
+ print(f"Saved CSV (numpy fallback) to: {csv_filepath}")
1588
+
1589
+ return {
1590
+ "original_file": filepath,
1591
+ "processed_file": points_filepath,
1592
+ }
1593
+
1594
+ # Handle functions that return multiple outputs (e.g., channel splitting, layer subdivision)
1595
+ if (
1596
+ isinstance(processed_result, (list, tuple))
1597
+ and len(processed_result) > 1
1598
+ ):
1599
+ # Multiple outputs - save each as separate file
1600
+ processed_files = []
1601
+ base_name = os.path.splitext(os.path.basename(filepath))[0]
1602
+
1603
+ # Check if this is a layer subdivision function (returns 3 outputs)
1604
+ if (
1605
+ len(processed_result) == 3
1606
+ and self.output_suffix == "_layer"
1607
+ ):
1608
+ layer_names = [
1609
+ "_inner",
1610
+ "_middle",
1611
+ "_outer",
1612
+ ]
1613
+ for idx, (img, layer_name) in enumerate(
1614
+ zip(processed_result, layer_names)
1615
+ ):
1616
+ if not isinstance(img, np.ndarray):
1617
+ continue
1618
+
1619
+ # Remove singleton dimensions
1620
+ img = np.squeeze(img)
1621
+
1622
+ # Generate output filename with layer name
1623
+ output_filename = f"{base_name}{layer_name}.tif"
1624
+ output_path = os.path.join(
1625
+ self.output_folder, output_filename
1626
+ )
1627
+
1628
+ print(
1629
+ f"Layer {idx + 1} ({layer_name}) shape: {img.shape}"
1630
+ )
1631
+
1632
+ # Calculate approx file size in GB
1633
+ size_gb = img.size * img.itemsize / (1024**3)
1634
+ print(f"Estimated file size: {size_gb:.2f} GB")
1635
+
1636
+ # Check data range
1637
+ data_min = np.min(img) if img.size > 0 else 0
1638
+ data_max = np.max(img) if img.size > 0 else 0
1639
+ print(
1640
+ f"Layer {idx + 1} data range: {data_min} to {data_max}"
1641
+ )
1642
+
1643
+ # For very large files, use BigTIFF format
1644
+ use_bigtiff = size_gb > 2.0
1645
+
1646
+ # Layer subdivision outputs should always be saved as uint32
1647
+ # to ensure Napari auto-detects them as labels
1648
+ save_dtype = np.uint32
1649
+
1650
+ print(
1651
+ f"Saving layer {layer_name} as {save_dtype.__name__} with bigtiff={use_bigtiff}"
1652
+ )
1653
+ tifffile.imwrite(
1654
+ output_path,
1655
+ img.astype(save_dtype),
1656
+ compression="zlib",
1657
+ bigtiff=use_bigtiff,
1658
+ )
1659
+
1660
+ processed_files.append(output_path)
1661
+ else:
1662
+ # Default behavior for other multi-output functions (e.g., channel splitting)
1663
+ for idx, img in enumerate(processed_result):
1664
+ if not isinstance(img, np.ndarray):
1665
+ continue
1666
+
1667
+ # Remove singleton dimensions
1668
+ img = np.squeeze(img)
1669
+
1670
+ # Generate output filename
1671
+ output_filename = (
1672
+ f"{base_name}_ch{idx + 1}{self.output_suffix}"
1673
+ )
1674
+ output_path = os.path.join(
1675
+ self.output_folder, output_filename
1676
+ )
1677
+
1678
+ print(f"Output {idx + 1} shape: {img.shape}")
1679
+
1680
+ # Calculate approx file size in GB
1681
+ size_gb = img.size * img.itemsize / (1024**3)
1682
+ print(f"Estimated file size: {size_gb:.2f} GB")
1683
+
1684
+ # Check data range
1685
+ data_min = np.min(img) if img.size > 0 else 0
1686
+ data_max = np.max(img) if img.size > 0 else 0
1687
+ print(
1688
+ f"Output {idx + 1} data range: {data_min} to {data_max}"
1689
+ )
1690
+
1691
+ # For very large files, use BigTIFF format
1692
+ use_bigtiff = size_gb > 2.0
1693
+
1694
+ # Check if this is a label image based on dtype
1695
+ is_label = is_label_image(img)
1696
+
1697
+ if is_label:
1698
+ # For labels, always use uint32 to ensure Napari recognizes them
1699
+ # Napari auto-detects labels based on dtype (int32/uint32/int64/uint64)
1700
+ save_dtype = np.uint32
1701
+
1702
+ print(
1703
+ f"Label image detected, saving as {save_dtype.__name__} with bigtiff={use_bigtiff}"
1704
+ )
1705
+ tifffile.imwrite(
1706
+ output_path,
1707
+ img.astype(save_dtype),
1708
+ compression="zlib",
1709
+ bigtiff=use_bigtiff,
1710
+ )
1711
+ else:
1712
+ print(
1713
+ f"Regular image, saving with dtype {image_dtype} and bigtiff={use_bigtiff}"
1714
+ )
1715
+ tifffile.imwrite(
1716
+ output_path,
1717
+ img.astype(image_dtype),
1718
+ compression="zlib",
1719
+ bigtiff=use_bigtiff,
1720
+ )
1721
+
1722
+ processed_files.append(output_path)
1723
+
1724
+ return {
1725
+ "original_file": filepath,
1726
+ "processed_files": processed_files,
1727
+ }
1728
+
1729
+ # Handle as image data (original logic)
1730
+ processed_image = processed_result
631
1731
 
632
1732
  print(
633
1733
  f"Processed image shape before removing singletons: {processed_image.shape}, dtype: {processed_image.dtype}"
634
1734
  )
635
1735
 
636
- # For folder functions, check if the output is the same as input (indicating no individual file should be saved)
1736
+ # For folder functions, check if the output is the same as input
637
1737
  if is_folder_function:
638
- # If the function returns the original image unchanged, it means it handled saving internally
639
1738
  if np.array_equal(processed_image, image):
640
1739
  print(
641
1740
  "Folder function returned unchanged image - skipping individual file save"
642
1741
  )
643
- return None # Return None to indicate no file should be created
1742
+ return None
644
1743
  else:
645
1744
  print(
646
1745
  "Folder function returned different data - will save individual file"
647
1746
  )
648
1747
 
649
1748
  # Remove ALL singleton dimensions from the processed image
650
- # This will keep only dimensions with size > 1
651
1749
  processed_image = np.squeeze(processed_image)
652
1750
 
653
1751
  print(
@@ -657,27 +1755,75 @@ class ProcessingWorker(QThread):
657
1755
  # Generate new filename base
658
1756
  filename = os.path.basename(filepath)
659
1757
  name, ext = os.path.splitext(filename)
660
- if name.endswith(self.input_suffix):
1758
+
1759
+ # Handle multiple input suffixes for filename generation
1760
+ input_suffixes = [
1761
+ s.strip() for s in self.input_suffix.split(",") if s.strip()
1762
+ ]
1763
+ matched_suffix = ""
1764
+ for suffix in input_suffixes:
1765
+ suffix_clean = suffix.replace(
1766
+ ".", ""
1767
+ ) # Remove dot for comparison
1768
+ if name.endswith(suffix_clean):
1769
+ matched_suffix = suffix_clean
1770
+ break
1771
+
1772
+ if matched_suffix:
661
1773
  new_filename_base = (
662
- name[: -len(self.input_suffix)] + self.output_suffix
1774
+ name[: -len(matched_suffix)] + self.output_suffix
663
1775
  )
664
1776
  else:
665
1777
  new_filename_base = name + self.output_suffix
666
1778
 
1779
+ # For zarr input, default to .tif output unless processing function specifies otherwise
1780
+ if filepath.lower().endswith(".zarr") and ext == ".zarr":
1781
+ ext = ".tif"
1782
+
667
1783
  # Check if the first dimension should be treated as channels
668
- # If processed_image has more dimensions than the original image,
669
- # assume the first dimension represents channels
670
- is_multi_channel = (processed_image.ndim > image.ndim - 1) or (
671
- processed_image.ndim == image.ndim
672
- and processed_image.shape[0] <= 10
1784
+ # Respect dimension_order hint if provided, otherwise use heuristic (2-4 channels for RGB/RGBA)
1785
+ dimension_order_hint = processing_params.get(
1786
+ "dimension_order", "Auto"
673
1787
  )
674
1788
 
675
- if (
676
- is_multi_channel and processed_image.shape[0] <= 10
677
- ): # Reasonable number of channels
1789
+ # Only split if dimension_order indicates channels (CYX, TCYX, etc. with C first)
1790
+ # or if Auto and shape suggests channels (2-4)
1791
+ is_multi_channel = False
1792
+ if dimension_order_hint in [
1793
+ "CYX",
1794
+ "CZYX",
1795
+ "TCYX",
1796
+ "ZCYX",
1797
+ "TZCYX",
1798
+ ]:
1799
+ # User explicitly said first dim is channels - split it
1800
+ is_multi_channel = (
1801
+ processed_image.ndim > 2 and processed_image.shape[0] > 1
1802
+ )
1803
+ print(
1804
+ f"dimension_order='{dimension_order_hint}' indicates channels, will split {processed_image.shape[0]} channels"
1805
+ )
1806
+ elif dimension_order_hint in ["TYX", "ZYX", "TZYX"]:
1807
+ # User explicitly said it's NOT channels (time or Z) - don't split
1808
+ is_multi_channel = False
1809
+ print(
1810
+ f"dimension_order='{dimension_order_hint}' indicates time/Z dimension, will NOT split channels"
1811
+ )
1812
+ elif dimension_order_hint == "Auto":
1813
+ # Auto mode: use old heuristic (2-4 suggests channels)
1814
+ is_multi_channel = (
1815
+ processed_image.ndim > 2
1816
+ and processed_image.shape[0] <= 4
1817
+ and processed_image.shape[0] > 1
1818
+ )
1819
+ if is_multi_channel:
1820
+ print(
1821
+ f"Auto mode: shape[0]={processed_image.shape[0]} <= 4, assuming channels"
1822
+ )
1823
+
1824
+ if is_multi_channel:
678
1825
  # Save each channel as a separate image
679
1826
  processed_files = []
680
-
681
1827
  num_channels = processed_image.shape[0]
682
1828
  print(
683
1829
  f"Treating first dimension as channels. Saving {num_channels} separate channel files"
@@ -709,22 +1855,16 @@ class ProcessingWorker(QThread):
709
1855
  )
710
1856
  print(f"Channel {i} data range: {data_min} to {data_max}")
711
1857
 
712
- # For very large files, we need to use BigTIFF format
713
- use_bigtiff = (
714
- size_gb > 2.0
715
- ) # Use BigTIFF for files over 2GB
1858
+ # For very large files, use BigTIFF format
1859
+ use_bigtiff = size_gb > 2.0
716
1860
 
717
- if (
718
- "labels" in channel_filename
719
- or "semantic" in channel_filename
720
- ):
721
- # Choose appropriate integer type based on data range
722
- if data_max <= 255:
723
- save_dtype = np.uint8
724
- elif data_max <= 65535:
725
- save_dtype = np.uint16
726
- else:
727
- save_dtype = np.uint32
1861
+ # Check if this is a label image based on dtype
1862
+ is_label = is_label_image(channel_image)
1863
+
1864
+ if is_label:
1865
+ # For labels, always use uint32 to ensure Napari recognizes them
1866
+ # Napari auto-detects labels based on dtype (int32/uint32/int64/uint64)
1867
+ save_dtype = np.uint32
728
1868
 
729
1869
  print(
730
1870
  f"Label image detected, saving as {save_dtype.__name__} with bigtiff={use_bigtiff}"
@@ -736,12 +1876,9 @@ class ProcessingWorker(QThread):
736
1876
  bigtiff=use_bigtiff,
737
1877
  )
738
1878
  else:
739
- # Handle large images with bigtiff format
740
1879
  print(
741
1880
  f"Regular image channel, saving with dtype {image_dtype} and bigtiff={use_bigtiff}"
742
1881
  )
743
-
744
- # Save with original dtype and bigtiff format if needed
745
1882
  tifffile.imwrite(
746
1883
  channel_filepath,
747
1884
  channel_image.astype(image_dtype),
@@ -751,7 +1888,6 @@ class ProcessingWorker(QThread):
751
1888
 
752
1889
  processed_files.append(channel_filepath)
753
1890
 
754
- # Return processing info
755
1891
  return {
756
1892
  "original_file": filepath,
757
1893
  "processed_files": processed_files,
@@ -770,8 +1906,8 @@ class ProcessingWorker(QThread):
770
1906
  )
771
1907
  print(f"Estimated file size: {size_gb:.2f} GB")
772
1908
 
773
- # For very large files, we need to use BigTIFF format
774
- use_bigtiff = size_gb > 2.0 # Use BigTIFF for files over 2GB
1909
+ # For very large files, use BigTIFF format
1910
+ use_bigtiff = size_gb > 2.0
775
1911
 
776
1912
  # Check data range
777
1913
  data_min = (
@@ -782,13 +1918,11 @@ class ProcessingWorker(QThread):
782
1918
  )
783
1919
  print(f"Data range: {data_min} to {data_max}")
784
1920
 
785
- if (
786
- "labels" in new_filename_base
787
- or "semantic" in new_filename_base
788
- ):
1921
+ # Check if this is a label image based on dtype
1922
+ is_label = is_label_image(processed_image)
789
1923
 
1924
+ if is_label:
790
1925
  save_dtype = np.uint32
791
-
792
1926
  print(
793
1927
  f"Saving label image as {save_dtype.__name__} with bigtiff={use_bigtiff}"
794
1928
  )
@@ -809,7 +1943,6 @@ class ProcessingWorker(QThread):
809
1943
  bigtiff=use_bigtiff,
810
1944
  )
811
1945
 
812
- # Return processing info
813
1946
  return {
814
1947
  "original_file": filepath,
815
1948
  "processed_file": new_filepath,
@@ -872,6 +2005,44 @@ class FileResultsWidget(QWidget):
872
2005
 
873
2006
  # Create processing function selector
874
2007
  processing_layout = QVBoxLayout()
2008
+
2009
+ # Add dimension order selector FIRST (before function selector)
2010
+ dim_order_layout = QHBoxLayout()
2011
+ dim_order_label = QLabel("Dimension Order (optional hint):")
2012
+ dim_order_label.setToolTip(
2013
+ "Help processing functions interpret multi-dimensional data.\n"
2014
+ "• Auto: Let function decide (default)\n"
2015
+ "• YX: 2D image\n"
2016
+ "• CYX: Channels first (e.g., RGB)\n"
2017
+ "• TYX: Time series\n"
2018
+ "• ZYX: Z-stack\n"
2019
+ "• TCYX, TZYX, etc.: Combined dimensions\n"
2020
+ "\nNote: Not all functions use this hint."
2021
+ )
2022
+ dim_order_layout.addWidget(dim_order_label)
2023
+
2024
+ self.dimension_order = QComboBox()
2025
+ self.dimension_order.addItems(
2026
+ [
2027
+ "Auto",
2028
+ "YX",
2029
+ "CYX",
2030
+ "TYX",
2031
+ "ZYX",
2032
+ "TCYX",
2033
+ "TZYX",
2034
+ "ZCYX",
2035
+ "TZCYX",
2036
+ ]
2037
+ )
2038
+ self.dimension_order.setToolTip(
2039
+ "Dimension interpretation hint for processing functions"
2040
+ )
2041
+ dim_order_layout.addWidget(self.dimension_order)
2042
+ dim_order_layout.addStretch()
2043
+ processing_layout.addLayout(dim_order_layout)
2044
+
2045
+ # Now add processing function selector
875
2046
  processing_label = QLabel("Select Processing Function:")
876
2047
  processing_layout.addWidget(processing_label)
877
2048
 
@@ -971,6 +2142,7 @@ class FileResultsWidget(QWidget):
971
2142
  "folder" in function_name.lower()
972
2143
  or "timepoint" in function_name.lower()
973
2144
  or "merge" in function_name.lower()
2145
+ or "grid" in function_name.lower()
974
2146
  or "folder" in description.lower()
975
2147
  or "cellpose" in description.lower()
976
2148
  or "careamics" in description.lower()
@@ -1053,6 +2225,17 @@ class FileResultsWidget(QWidget):
1053
2225
  processing_func = function_info["func"]
1054
2226
  output_suffix = function_info["suffix"]
1055
2227
 
2228
+ # Ensure grid overlay cache is reset before each new run
2229
+ if getattr(processing_func, "__name__", "") == "create_grid_overlay":
2230
+ try:
2231
+ from napari_tmidas.processing_functions.grid_view_overlay import (
2232
+ reset_grid_cache,
2233
+ )
2234
+
2235
+ reset_grid_cache()
2236
+ except ImportError:
2237
+ pass
2238
+
1056
2239
  # Get parameter values if available
1057
2240
  param_values = {}
1058
2241
  if hasattr(self, "param_widget_instance") and hasattr(
@@ -1060,6 +2243,12 @@ class FileResultsWidget(QWidget):
1060
2243
  ):
1061
2244
  param_values = self.param_widget_instance.get_parameter_values()
1062
2245
 
2246
+ # Add dimension order hint if not "Auto"
2247
+ if hasattr(self, "dimension_order"):
2248
+ dim_order = self.dimension_order.currentText()
2249
+ if dim_order != "Auto":
2250
+ param_values["dimension_order"] = dim_order
2251
+
1063
2252
  # Determine output folder
1064
2253
  output_folder = self.output_folder.text().strip()
1065
2254
  if not output_folder:
@@ -1147,6 +2336,52 @@ class FileResultsWidget(QWidget):
1147
2336
  f"Completed processing {len(self.processed_files_info)} files"
1148
2337
  )
1149
2338
 
2339
+ # For grid overlay function, load and display the result
2340
+ if hasattr(self, "processing_selector"):
2341
+ function_name = self.processing_selector.currentText()
2342
+ if "grid" in function_name.lower():
2343
+ # Import here to avoid circular dependency
2344
+ try:
2345
+ from napari_tmidas.processing_functions.grid_view_overlay import (
2346
+ _grid_output_path,
2347
+ )
2348
+
2349
+ if _grid_output_path:
2350
+ import tifffile
2351
+
2352
+ # Load TIF image
2353
+ grid_image = tifffile.imread(_grid_output_path)
2354
+
2355
+ # Add to viewer
2356
+ self.viewer.add_image(
2357
+ grid_image,
2358
+ name=f"Grid Overlay ({len(self.file_list)} pairs)",
2359
+ rgb=True,
2360
+ )
2361
+ print("\n✨ Grid overlay added to napari viewer!")
2362
+
2363
+ # Show message box with output location
2364
+ msg = QMessageBox(self)
2365
+ msg.setIcon(QMessageBox.Information)
2366
+ msg.setWindowTitle("Grid Overlay Complete")
2367
+ msg.setText(
2368
+ f"Grid overlay created successfully!\n\nProcessed {len(self.file_list)} image pairs"
2369
+ )
2370
+ msg.setInformativeText(
2371
+ f"Saved to:\n{_grid_output_path}"
2372
+ )
2373
+ msg.setStandardButtons(QMessageBox.Ok)
2374
+ msg.exec_()
2375
+
2376
+ # Reset the grid cache for next run
2377
+ from napari_tmidas.processing_functions.grid_view_overlay import (
2378
+ reset_grid_cache,
2379
+ )
2380
+
2381
+ reset_grid_cache()
2382
+ except (FileNotFoundError, OSError, ValueError) as e:
2383
+ print(f"Could not load grid overlay: {e}")
2384
+
1150
2385
  def processing_error(self, filepath, error_msg):
1151
2386
  """Handle processing errors"""
1152
2387
  print(f"Error processing {filepath}: {error_msg}")
@@ -1154,6 +2389,10 @@ class FileResultsWidget(QWidget):
1154
2389
 
1155
2390
  def cancel_processing(self):
1156
2391
  """Cancel the current processing operation"""
2392
+ # Cancel any running cellpose subprocesses
2393
+ if cancel_cellpose_processing:
2394
+ cancel_cellpose_processing()
2395
+
1157
2396
  if self.worker and self.worker.isRunning():
1158
2397
  self.worker.stop()
1159
2398
  self.worker.wait() # Wait for the thread to finish