napari-ome-arrow 0.0.4__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,711 @@
1
+ """Stack pattern and scale helpers for the OME-Arrow reader."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ import re
7
+ import warnings
8
+ from collections import Counter
9
+ from collections.abc import Sequence
10
+ from concurrent.futures import ThreadPoolExecutor
11
+ from functools import lru_cache
12
+ from pathlib import Path
13
+ from typing import Any
14
+
15
+ import numpy as np
16
+ from ome_arrow.core import OMEArrow
17
+ from ome_arrow.ingest import from_stack_pattern_path
18
+
19
+
20
+ def _collect_stack_files(
21
+ paths: Sequence[str],
22
+ ) -> tuple[list[Path], Path] | None:
23
+ """Collect stack candidates and their shared folder if applicable.
24
+
25
+ Args:
26
+ paths: Input paths passed by napari.
27
+
28
+ Returns:
29
+ A tuple of (files, folder) or None if stack detection fails.
30
+ """
31
+ if any(any(c in p for c in "<>*") for p in paths):
32
+ return None
33
+
34
+ if len(paths) == 1:
35
+ candidate = Path(paths[0])
36
+ if (
37
+ candidate.exists()
38
+ and candidate.is_dir()
39
+ and candidate.suffix.lower() not in {".zarr", ".ome.zarr"}
40
+ ):
41
+ files = sorted(p for p in candidate.iterdir() if p.is_file())
42
+ if len(files) > 1:
43
+ return files, candidate
44
+ return None
45
+
46
+ path_objs = [Path(p) for p in paths]
47
+ if not all(p.exists() and p.is_file() for p in path_objs):
48
+ return None
49
+
50
+ parents = {p.parent for p in path_objs}
51
+ if len(parents) != 1:
52
+ return None
53
+
54
+ return sorted(path_objs), parents.pop()
55
+
56
+
57
+ def _suggest_stack_pattern(files: Sequence[Path], folder: Path) -> str:
58
+ """Suggest a stack pattern string for a set of files.
59
+
60
+ Args:
61
+ files: Files to analyze for a stack pattern.
62
+ folder: Folder that contains the files.
63
+
64
+ Returns:
65
+ Suggested stack pattern string.
66
+ """
67
+ if not files:
68
+ return str(folder / ".*")
69
+
70
+ # Prefer the most common suffix to avoid mixing file types.
71
+ suffix_counts = Counter(p.suffix.lower() for p in files)
72
+ preferred_suffix = (
73
+ suffix_counts.most_common(1)[0][0] if suffix_counts else ""
74
+ )
75
+ candidates = [
76
+ p for p in files if p.suffix.lower() == preferred_suffix
77
+ ] or list(files)
78
+ names = [p.name for p in candidates]
79
+
80
+ def _unique_in_order(values: Sequence[str]) -> list[str]:
81
+ # Preserve original ordering for channel tokens.
82
+ seen = set()
83
+ ordered: list[str] = []
84
+ for value in values:
85
+ if value in seen:
86
+ continue
87
+ seen.add(value)
88
+ ordered.append(value)
89
+ return ordered
90
+
91
+ def _suggest_from_z_token(values: Sequence[str]) -> str | None:
92
+ # Look for a Z token like "Z000" or "ZS000" and build a range.
93
+ z_re = re.compile(
94
+ r"^(?P<pre>.*?)(?P<zprefix>Z[S]?)(?P<znum>\d+)(?P<post>.*)$"
95
+ )
96
+ matches = [z_re.match(v) for v in values]
97
+ if not all(m is not None for m in matches):
98
+ return None
99
+
100
+ zprefixes = {m.group("zprefix") for m in matches} # type: ignore[union-attr]
101
+ if len(zprefixes) != 1:
102
+ return None
103
+ zprefix = next(iter(zprefixes))
104
+
105
+ pre_parts = [m.group("pre") for m in matches] # type: ignore[union-attr]
106
+ post_parts = [m.group("post") for m in matches] # type: ignore[union-attr]
107
+ if len(set(post_parts)) != 1:
108
+ return None
109
+ post = post_parts[0]
110
+
111
+ z_values = [m.group("znum") for m in matches] # type: ignore[union-attr]
112
+ z_width = max(len(v) for v in z_values)
113
+ z_nums = [int(v) for v in z_values]
114
+ z_unique = sorted(set(z_nums))
115
+ if len(z_unique) == (max(z_unique) - min(z_unique) + 1):
116
+ z_token = f"<{str(min(z_unique)).zfill(z_width)}-{str(max(z_unique)).zfill(z_width)}>"
117
+ else:
118
+ z_token = (
119
+ "<" + ",".join(str(v).zfill(z_width) for v in z_unique) + ">"
120
+ )
121
+
122
+ # Separate channel tokens from Z tokens when possible.
123
+ pre_prefix = os.path.commonprefix(pre_parts)
124
+ pre_suffix = os.path.commonprefix([p[::-1] for p in pre_parts])[::-1]
125
+ if pre_prefix:
126
+ middle_parts = [p[len(pre_prefix) :] for p in pre_parts]
127
+ else:
128
+ middle_parts = pre_parts
129
+ if pre_suffix:
130
+ middle_parts = [
131
+ p[: -len(pre_suffix)] if p.endswith(pre_suffix) else p
132
+ for p in middle_parts
133
+ ]
134
+ channel_values = _unique_in_order(middle_parts)
135
+ channel_token = ""
136
+ if len(set(channel_values)) > 1:
137
+ channel_token = "<" + ",".join(channel_values) + ">"
138
+ elif channel_values and channel_values[0]:
139
+ channel_token = channel_values[0]
140
+
141
+ suggested = (
142
+ f"{pre_prefix}{channel_token}{pre_suffix}{zprefix}{z_token}{post}"
143
+ )
144
+ return suggested
145
+
146
+ suggestion = _suggest_from_z_token(names)
147
+ if suggestion is not None:
148
+ return str(folder / suggestion)
149
+
150
+ # Generic numeric token scanning when no explicit Z token exists.
151
+ split_names = [re.split(r"(\d+)", name) for name in names]
152
+ if split_names and all(
153
+ len(parts) == len(split_names[0]) for parts in split_names
154
+ ):
155
+ static_ok = True
156
+ for idx in range(0, len(split_names[0]), 2):
157
+ token = split_names[0][idx]
158
+ if any(parts[idx] != token for parts in split_names):
159
+ static_ok = False
160
+ break
161
+ if static_ok:
162
+ pattern_parts: list[str] = []
163
+ for idx, token in enumerate(split_names[0]):
164
+ if idx % 2 == 0:
165
+ pattern_parts.append(token)
166
+ continue
167
+ values = [parts[idx] for parts in split_names]
168
+ unique = sorted(set(values), key=lambda v: int(v))
169
+ if len(unique) == 1:
170
+ pattern_parts.append(unique[0])
171
+ continue
172
+ width = max(len(v) for v in unique)
173
+ nums = [int(v) for v in unique]
174
+ if len(unique) == (max(nums) - min(nums) + 1):
175
+ start = str(min(nums)).zfill(width)
176
+ end = str(max(nums)).zfill(width)
177
+ pattern_parts.append(f"<{start}-{end}>")
178
+ else:
179
+ values_str = ",".join(str(n).zfill(width) for n in nums)
180
+ pattern_parts.append(f"<{values_str}>")
181
+ pattern_name = "".join(pattern_parts)
182
+ return str(folder / pattern_name)
183
+
184
+ matches = [re.search(r"(\d+)(?!.*\d)", name) for name in names]
185
+ if all(m is not None for m in matches):
186
+ prefix = names[0][: matches[0].start()]
187
+ suffix = names[0][matches[0].end() :]
188
+ if all(
189
+ name[: m.start()] == prefix and name[m.end() :] == suffix
190
+ for name, m in zip(names, matches, strict=False)
191
+ ):
192
+ nums = [int(m.group(1)) for m in matches if m is not None]
193
+ width = max(len(m.group(1)) for m in matches if m is not None)
194
+ start = str(min(nums)).zfill(width)
195
+ end = str(max(nums)).zfill(width)
196
+ pattern_name = f"{prefix}<{start}-{end}>{suffix}"
197
+ return str(folder / pattern_name)
198
+
199
+ common_prefix = os.path.commonprefix(names)
200
+ common_suffix = os.path.commonprefix([n[::-1] for n in names])[::-1]
201
+ prefix = re.escape(common_prefix)
202
+ suffix = re.escape(common_suffix)
203
+ pattern_name = f"{prefix}.*{suffix}"
204
+ return str(folder / pattern_name)
205
+
206
+
207
+ def _stack_default_dim_for_pattern(pattern: str) -> str:
208
+ """Infer the default dimension token for a stack pattern.
209
+
210
+ Args:
211
+ pattern: Stack pattern string.
212
+
213
+ Returns:
214
+ Default dimension token ("Z" or "C").
215
+ """
216
+ dim_tokens = {"z", "zs", "sec", "fp", "focal", "focalplane"}
217
+ for idx, ch in enumerate(pattern):
218
+ if ch != "<":
219
+ continue
220
+ before = pattern[:idx]
221
+ match = re.search(r"([A-Za-z]+)$", before)
222
+ if not match:
223
+ continue
224
+ token = match.group(1).lower()
225
+ if token in dim_tokens:
226
+ return "C"
227
+ return "Z"
228
+
229
+
230
+ def _detect_dim_token(before_text: str) -> str | None:
231
+ """Detect a dimension token from text preceding a placeholder.
232
+
233
+ Args:
234
+ before_text: Text before the placeholder in the pattern.
235
+
236
+ Returns:
237
+ Dimension token ("C", "T", "Z", "S") or None if not detected.
238
+ """
239
+ match = re.search(r"([A-Za-z]+)$", before_text)
240
+ if not match:
241
+ return None
242
+ token = match.group(1).lower()
243
+ if token in {"c", "ch", "w", "wavelength"}:
244
+ return "C"
245
+ if token in {"t", "tl", "tp", "timepoint"}:
246
+ return "T"
247
+ if token in {"z", "zs", "sec", "fp", "focal", "focalplane"}:
248
+ return "Z"
249
+ if token in {"s", "sp", "series"}:
250
+ return "S"
251
+ return None
252
+
253
+
254
+ def _channel_names_from_pattern(
255
+ pattern: str, default_dim_for_unspecified: str
256
+ ) -> list[str] | None:
257
+ """Extract channel names from a stack pattern if available.
258
+
259
+ Args:
260
+ pattern: Stack pattern string.
261
+ default_dim_for_unspecified: Default dimension token to use.
262
+
263
+ Returns:
264
+ Channel names if a channel placeholder is found, otherwise None.
265
+ """
266
+ # Matches numeric ranges like "000-015" or "1-10:2" inside <...>.
267
+ num_range = re.compile(r"^(?P<a>\d+)\-(?P<b>\d+)(?::(?P<step>\d+))?$")
268
+
269
+ def parse_choices(raw: str) -> list[str] | None:
270
+ # Support comma lists or numeric ranges within angle brackets.
271
+ raw = raw.strip()
272
+ if not raw:
273
+ return None
274
+ if "," in raw and not num_range.match(raw):
275
+ return [p.strip() for p in raw.split(",") if p.strip()]
276
+ match = num_range.match(raw)
277
+ if match:
278
+ a, b = match.group("a"), match.group("b")
279
+ step = int(match.group("step") or "1")
280
+ start, stop = int(a), int(b)
281
+ if stop < start:
282
+ return None
283
+ width = max(len(a), len(b))
284
+ return [str(v).zfill(width) for v in range(start, stop + 1, step)]
285
+ return None
286
+
287
+ i = 0
288
+ while i < len(pattern):
289
+ if pattern[i] != "<":
290
+ i += 1
291
+ continue
292
+ j = pattern.find(">", i + 1)
293
+ if j == -1:
294
+ break
295
+ raw_inside = pattern[i + 1 : j]
296
+ dim = _detect_dim_token(pattern[:i]) or default_dim_for_unspecified
297
+ if dim.upper() == "C":
298
+ choices = parse_choices(raw_inside)
299
+ if choices:
300
+ return choices
301
+ i = j + 1
302
+ return None
303
+
304
+
305
+ def _replace_channel_placeholder(
306
+ pattern: str, channel_value: str, default_dim_for_unspecified: str
307
+ ) -> str:
308
+ """Replace the first channel placeholder in a pattern.
309
+
310
+ Args:
311
+ pattern: Stack pattern string.
312
+ channel_value: Channel value to insert.
313
+ default_dim_for_unspecified: Default dimension token to use.
314
+
315
+ Returns:
316
+ Pattern string with the channel placeholder replaced.
317
+ """
318
+ out: list[str] = []
319
+ i = 0
320
+ replaced = False
321
+ while i < len(pattern):
322
+ if pattern[i] != "<":
323
+ out.append(pattern[i])
324
+ i += 1
325
+ continue
326
+ j = pattern.find(">", i + 1)
327
+ if j == -1:
328
+ out.append(pattern[i:])
329
+ break
330
+ raw_inside = pattern[i + 1 : j]
331
+ dim = _detect_dim_token(pattern[:i]) or default_dim_for_unspecified
332
+ if not replaced and dim.upper() == "C":
333
+ out.append(channel_value)
334
+ replaced = True
335
+ else:
336
+ out.append(f"<{raw_inside}>")
337
+ i = j + 1
338
+ return "".join(out)
339
+
340
+
341
+ def _files_from_pattern(pattern: str) -> list[Path]:
342
+ """List files that match a stack pattern.
343
+
344
+ Args:
345
+ pattern: Stack pattern string.
346
+
347
+ Returns:
348
+ Files matching the pattern, sorted by Z-like tokens when possible.
349
+ """
350
+ p = Path(pattern)
351
+ folder = p.parent
352
+ name = p.name
353
+ # Convert <...> placeholders to a wildcard regex.
354
+ regex = re.escape(name)
355
+ regex = re.sub(r"\\<[^>]+\\>", r"[^/]+", regex)
356
+ compiled = re.compile(f"^{regex}$")
357
+ candidates = [f for f in folder.iterdir() if f.is_file()]
358
+ matched = [f for f in candidates if compiled.match(f.name)]
359
+
360
+ def z_key(path: Path) -> tuple[int, str]:
361
+ # Prefer Z-like numeric ordering when present.
362
+ match = re.search(r"Z[S]?(\d+)", path.name)
363
+ return (int(match.group(1)) if match else -1, path.name)
364
+
365
+ return sorted(matched, key=z_key)
366
+
367
+
368
+ def _read_rgb_stack_pattern(pattern: str) -> tuple[np.ndarray, bool]:
369
+ """Read a stack pattern using RGB-aware fallbacks.
370
+
371
+ Args:
372
+ pattern: Stack pattern string.
373
+
374
+ Returns:
375
+ Tuple of (stack array, is_rgb).
376
+
377
+ Raises:
378
+ ImportError: If required optional dependencies are missing.
379
+ FileNotFoundError: If no files match the pattern.
380
+ ValueError: If frame shapes are unsupported or inconsistent.
381
+ """
382
+ try:
383
+ from bioio import BioImage
384
+ from bioio_ome_tiff import Reader as OMEReader
385
+ from bioio_tifffile import Reader as TiffReader
386
+ except Exception as exc: # pragma: no cover - optional dependency
387
+ raise ImportError(
388
+ "RGB stack fallback requires bioio and bioio_tifffile."
389
+ ) from exc
390
+
391
+ # Try to load a stack using image readers that can report RGB frames.
392
+ files = _files_from_pattern(pattern)
393
+ if not files:
394
+ raise FileNotFoundError(f"No files matched pattern: {pattern}")
395
+
396
+ shape_ref: tuple[int, ...] | None = None
397
+
398
+ def _read_frame(fpath: Path) -> tuple[np.ndarray, bool]:
399
+ # Normalize RGB channel placement to last axis.
400
+ reader = (
401
+ OMEReader
402
+ if fpath.suffix.lower() in (".ome.tif", ".ome.tiff")
403
+ else TiffReader
404
+ )
405
+ img = BioImage(image=str(fpath), reader=reader)
406
+ arr = np.asarray(img.data)
407
+
408
+ if arr.ndim == 2:
409
+ return arr, False
410
+ if arr.ndim == 3:
411
+ if arr.shape[-1] in (3, 4):
412
+ return arr, True
413
+ if arr.shape[0] in (3, 4):
414
+ return np.moveaxis(arr, 0, -1), True
415
+ raise ValueError(
416
+ f"Unsupported 3D frame shape {arr.shape} for {fpath.name}"
417
+ )
418
+ raise ValueError(
419
+ f"Unsupported frame dimensions {arr.shape} for {fpath.name}"
420
+ )
421
+
422
+ with ThreadPoolExecutor(
423
+ max_workers=min(8, max(1, len(files)))
424
+ ) as executor:
425
+ results = list(executor.map(_read_frame, files))
426
+
427
+ frames: list[np.ndarray] = []
428
+ rgb_flags: list[bool] = []
429
+ for frame, is_rgb in results:
430
+ if shape_ref is None:
431
+ shape_ref = frame.shape
432
+ elif frame.shape != shape_ref:
433
+ raise ValueError(
434
+ f"Shape mismatch for stack: {frame.shape} vs {shape_ref}"
435
+ )
436
+ frames.append(frame)
437
+ rgb_flags.append(is_rgb)
438
+
439
+ rgb = any(rgb_flags)
440
+
441
+ stack = np.stack(frames, axis=0)
442
+ return stack, rgb
443
+
444
+
445
+ def _prompt_stack_pattern(files: Sequence[Path], folder: Path) -> str | None:
446
+ """Prompt for a stack pattern when multiple files are detected.
447
+
448
+ Args:
449
+ files: Stack candidate files.
450
+ folder: Folder containing the files.
451
+
452
+ Returns:
453
+ A pattern string or None to skip stack parsing.
454
+ """
455
+ suggested = _suggest_stack_pattern(files, folder)
456
+
457
+ try:
458
+ from qtpy import QtWidgets
459
+ except Exception:
460
+ warnings.warn(
461
+ "Multiple files detected but Qt is not available; "
462
+ "loading files individually.",
463
+ stacklevel=2,
464
+ )
465
+ return None
466
+
467
+ app = QtWidgets.QApplication.instance()
468
+ if app is None:
469
+ warnings.warn(
470
+ "Multiple files detected but no QApplication instance; "
471
+ "loading files individually.",
472
+ stacklevel=2,
473
+ )
474
+ return None
475
+
476
+ label = (
477
+ "Multiple files detected. Enter a stack pattern string to load as a 3D stack.\n"
478
+ "Use <...> for indices (e.g. z<000-120> or c<111,222>), or a regex like .* for non-numbered files.\n"
479
+ "If no dimension token (z/c/t) is present, Z is assumed for this stack.\n\n"
480
+ "Edit if needed and press OK, or Cancel to load files individually."
481
+ )
482
+ text, ok = QtWidgets.QInputDialog.getText(
483
+ None,
484
+ "napari-ome-arrow: stack pattern",
485
+ label,
486
+ text=suggested,
487
+ )
488
+ if not ok:
489
+ return None
490
+ value = text.strip()
491
+ return value or None
492
+
493
+
494
+ def _parse_stack_scale(text: str) -> tuple[float, ...]:
495
+ """Parse a stack scale string into numeric values.
496
+
497
+ Args:
498
+ text: Comma- or space-separated scale values.
499
+
500
+ Returns:
501
+ Tuple of scale values.
502
+
503
+ Raises:
504
+ ValueError: If the scale is invalid or has the wrong length.
505
+ """
506
+ # Accept comma- or space-delimited values.
507
+ tokens = [t for t in re.split(r"[,\s]+", text.strip()) if t]
508
+ if len(tokens) not in (3, 5):
509
+ raise ValueError("Expected 3 values (Z,Y,X) or 5 values (T,C,Z,Y,X).")
510
+ values = tuple(float(v) for v in tokens)
511
+ if any(v <= 0 for v in values):
512
+ raise ValueError("Scale values must be positive.")
513
+ return values
514
+
515
+
516
+ def _format_stack_scale(values: Sequence[float]) -> str:
517
+ """Format scale values as a comma-separated string.
518
+
519
+ Args:
520
+ values: Scale values.
521
+
522
+ Returns:
523
+ String representation of the scale values.
524
+ """
525
+ return ",".join(f"{v:g}" for v in values)
526
+
527
+
528
+ def _scale_from_ome_arrow(
529
+ obj: OMEArrow,
530
+ ) -> tuple[float, float, float] | None:
531
+ """Extract Z/Y/X scale from OME-Arrow metadata.
532
+
533
+ Args:
534
+ obj: OMEArrow instance to inspect.
535
+
536
+ Returns:
537
+ (z, y, x) scale tuple, or None if unavailable or defaulted to 1.0.
538
+ """
539
+ try:
540
+ record = obj.data.as_py()
541
+ pixels_meta = record.get("pixels_meta", {}) if record else {}
542
+ if not any(
543
+ k in pixels_meta
544
+ for k in (
545
+ "physical_size_z",
546
+ "physical_size_y",
547
+ "physical_size_x",
548
+ )
549
+ ):
550
+ return None
551
+ z = float(pixels_meta.get("physical_size_z") or 1.0)
552
+ y = float(pixels_meta.get("physical_size_y") or 1.0)
553
+ x = float(pixels_meta.get("physical_size_x") or 1.0)
554
+ if z == 1.0 and y == 1.0 and x == 1.0:
555
+ return None
556
+ return z, y, x
557
+ except Exception:
558
+ return None
559
+
560
+
561
+ def _prompt_stack_scale(
562
+ sample_path: str, default_scale: Sequence[float] | None
563
+ ) -> tuple[float, ...] | None:
564
+ """Prompt for stack scale values when in a Qt context.
565
+
566
+ Args:
567
+ sample_path: Path used for prompt labeling.
568
+ default_scale: Default scale to prefill or return on cancel.
569
+
570
+ Returns:
571
+ Scale tuple or None if no override is provided.
572
+ """
573
+ try:
574
+ from qtpy import QtWidgets
575
+ except Exception:
576
+ return default_scale
577
+
578
+ app = QtWidgets.QApplication.instance()
579
+ if app is None:
580
+ return default_scale
581
+
582
+ default_text = (
583
+ _format_stack_scale(default_scale) if default_scale else "1.0,1.0,1.0"
584
+ )
585
+ detected_note = f"Detected: {default_text}\n" if default_scale else ""
586
+ label = (
587
+ f"Enter voxel spacing for '{Path(sample_path).name}' in microns.\n"
588
+ "Format: Z,Y,X (or T,C,Z,Y,X for full axis order).\n"
589
+ f"{detected_note}"
590
+ "Leave blank to keep the metadata/default values.\n"
591
+ "You can also set NAPARI_OME_ARROW_STACK_SCALE to prefill this."
592
+ )
593
+ text, ok = QtWidgets.QInputDialog.getText(
594
+ None,
595
+ "napari-ome-arrow: stack scale",
596
+ label,
597
+ text=default_text,
598
+ )
599
+ if not ok:
600
+ return default_scale
601
+ value = text.strip()
602
+ if not value:
603
+ return default_scale
604
+ try:
605
+ return _parse_stack_scale(value)
606
+ except ValueError as exc:
607
+ warnings.warn(
608
+ f"Invalid stack scale '{value}': {exc}. Using defaults instead.",
609
+ stacklevel=2,
610
+ )
611
+ return default_scale
612
+
613
+
614
+ @lru_cache(maxsize=128)
615
+ def _infer_stack_scale_from_pattern(
616
+ pattern: str, stack_default_dim: str | None
617
+ ) -> tuple[float, float, float] | None:
618
+ """Infer scale from a stack pattern using OME-Arrow metadata.
619
+
620
+ Args:
621
+ pattern: Stack pattern string.
622
+ stack_default_dim: Default dimension token for the stack.
623
+
624
+ Returns:
625
+ (z, y, x) scale tuple, or None if inference fails.
626
+ """
627
+ try:
628
+ # Use ome-arrow ingestion to infer physical size from a stack.
629
+ scalar = from_stack_pattern_path(
630
+ pattern,
631
+ default_dim_for_unspecified=stack_default_dim,
632
+ map_series_to="T",
633
+ clamp_to_uint16=True,
634
+ )
635
+ obj = OMEArrow(scalar)
636
+ except Exception:
637
+ return None
638
+ return _scale_from_ome_arrow(obj)
639
+
640
+
641
+ def _normalize_stack_scale(scale: Sequence[float]) -> tuple[float, ...]:
642
+ """Normalize a stack scale into TCZYX order.
643
+
644
+ Args:
645
+ scale: Scale values in Z,Y,X or T,C,Z,Y,X order.
646
+
647
+ Returns:
648
+ Normalized scale values in T,C,Z,Y,X order.
649
+
650
+ Raises:
651
+ ValueError: If the input has an unsupported length.
652
+ """
653
+ if len(scale) == 3:
654
+ z, y, x = scale
655
+ return (1.0, 1.0, z, y, x)
656
+ if len(scale) == 5:
657
+ return tuple(scale)
658
+ raise ValueError("Stack scale must have 3 or 5 values.")
659
+
660
+
661
+ def _scale_for_array(
662
+ arr: np.ndarray,
663
+ mode: str,
664
+ add_kwargs: dict[str, Any],
665
+ scale: Sequence[float],
666
+ ) -> tuple[float, ...] | None:
667
+ """Compute the scale tuple appropriate for a specific array and mode.
668
+
669
+ Args:
670
+ arr: Data array to be displayed.
671
+ mode: "image" or "labels".
672
+ add_kwargs: Layer kwargs, possibly including "channel_axis".
673
+ scale: Scale values in Z,Y,X or T,C,Z,Y,X order.
674
+
675
+ Returns:
676
+ Scale tuple aligned to the array's dimensionality, or None.
677
+ """
678
+ # Normalize scale to TCZYX, then trim per array dimensionality.
679
+ scale_tczyx = _normalize_stack_scale(scale)
680
+ channel_axis = add_kwargs.get("channel_axis")
681
+ if arr.ndim == 5:
682
+ if channel_axis is not None:
683
+ # Drop the channel axis; napari splits channels before applying scale.
684
+ return (
685
+ scale_tczyx[0],
686
+ scale_tczyx[2],
687
+ scale_tczyx[3],
688
+ scale_tczyx[4],
689
+ )
690
+ return scale_tczyx
691
+ if arr.ndim == 4:
692
+ if mode == "image" and channel_axis is not None:
693
+ if channel_axis == 0:
694
+ return (scale_tczyx[2], scale_tczyx[3], scale_tczyx[4])
695
+ return (
696
+ scale_tczyx[1],
697
+ scale_tczyx[2],
698
+ scale_tczyx[3],
699
+ scale_tczyx[4],
700
+ )
701
+ return (
702
+ scale_tczyx[0],
703
+ scale_tczyx[2],
704
+ scale_tczyx[3],
705
+ scale_tczyx[4],
706
+ )
707
+ if arr.ndim == 3:
708
+ return (scale_tczyx[2], scale_tczyx[3], scale_tczyx[4])
709
+ if arr.ndim == 2:
710
+ return (scale_tczyx[3], scale_tczyx[4])
711
+ return None