senoquant 1.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. senoquant/__init__.py +6 -0
  2. senoquant/_reader.py +7 -0
  3. senoquant/_widget.py +33 -0
  4. senoquant/napari.yaml +83 -0
  5. senoquant/reader/__init__.py +5 -0
  6. senoquant/reader/core.py +369 -0
  7. senoquant/tabs/__init__.py +15 -0
  8. senoquant/tabs/batch/__init__.py +10 -0
  9. senoquant/tabs/batch/backend.py +641 -0
  10. senoquant/tabs/batch/config.py +270 -0
  11. senoquant/tabs/batch/frontend.py +1283 -0
  12. senoquant/tabs/batch/io.py +326 -0
  13. senoquant/tabs/batch/layers.py +86 -0
  14. senoquant/tabs/quantification/__init__.py +1 -0
  15. senoquant/tabs/quantification/backend.py +228 -0
  16. senoquant/tabs/quantification/features/__init__.py +80 -0
  17. senoquant/tabs/quantification/features/base.py +142 -0
  18. senoquant/tabs/quantification/features/marker/__init__.py +5 -0
  19. senoquant/tabs/quantification/features/marker/config.py +69 -0
  20. senoquant/tabs/quantification/features/marker/dialog.py +437 -0
  21. senoquant/tabs/quantification/features/marker/export.py +879 -0
  22. senoquant/tabs/quantification/features/marker/feature.py +119 -0
  23. senoquant/tabs/quantification/features/marker/morphology.py +285 -0
  24. senoquant/tabs/quantification/features/marker/rows.py +654 -0
  25. senoquant/tabs/quantification/features/marker/thresholding.py +46 -0
  26. senoquant/tabs/quantification/features/roi.py +346 -0
  27. senoquant/tabs/quantification/features/spots/__init__.py +5 -0
  28. senoquant/tabs/quantification/features/spots/config.py +62 -0
  29. senoquant/tabs/quantification/features/spots/dialog.py +477 -0
  30. senoquant/tabs/quantification/features/spots/export.py +1292 -0
  31. senoquant/tabs/quantification/features/spots/feature.py +112 -0
  32. senoquant/tabs/quantification/features/spots/morphology.py +279 -0
  33. senoquant/tabs/quantification/features/spots/rows.py +241 -0
  34. senoquant/tabs/quantification/frontend.py +815 -0
  35. senoquant/tabs/segmentation/__init__.py +1 -0
  36. senoquant/tabs/segmentation/backend.py +131 -0
  37. senoquant/tabs/segmentation/frontend.py +1009 -0
  38. senoquant/tabs/segmentation/models/__init__.py +5 -0
  39. senoquant/tabs/segmentation/models/base.py +146 -0
  40. senoquant/tabs/segmentation/models/cpsam/details.json +65 -0
  41. senoquant/tabs/segmentation/models/cpsam/model.py +150 -0
  42. senoquant/tabs/segmentation/models/default_2d/details.json +69 -0
  43. senoquant/tabs/segmentation/models/default_2d/model.py +664 -0
  44. senoquant/tabs/segmentation/models/default_3d/details.json +69 -0
  45. senoquant/tabs/segmentation/models/default_3d/model.py +682 -0
  46. senoquant/tabs/segmentation/models/hf.py +71 -0
  47. senoquant/tabs/segmentation/models/nuclear_dilation/__init__.py +1 -0
  48. senoquant/tabs/segmentation/models/nuclear_dilation/details.json +26 -0
  49. senoquant/tabs/segmentation/models/nuclear_dilation/model.py +96 -0
  50. senoquant/tabs/segmentation/models/perinuclear_rings/__init__.py +1 -0
  51. senoquant/tabs/segmentation/models/perinuclear_rings/details.json +34 -0
  52. senoquant/tabs/segmentation/models/perinuclear_rings/model.py +132 -0
  53. senoquant/tabs/segmentation/stardist_onnx_utils/__init__.py +2 -0
  54. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/__init__.py +3 -0
  55. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/data/__init__.py +6 -0
  56. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/data/generate.py +470 -0
  57. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/data/prepare.py +273 -0
  58. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/data/rawdata.py +112 -0
  59. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/data/transform.py +384 -0
  60. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/__init__.py +0 -0
  61. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/blocks.py +184 -0
  62. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/losses.py +79 -0
  63. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/nets.py +165 -0
  64. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/predict.py +467 -0
  65. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/probability.py +67 -0
  66. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/train.py +148 -0
  67. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/io/__init__.py +163 -0
  68. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/__init__.py +52 -0
  69. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/base_model.py +329 -0
  70. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/care_isotropic.py +160 -0
  71. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/care_projection.py +178 -0
  72. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/care_standard.py +446 -0
  73. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/care_upsampling.py +54 -0
  74. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/config.py +254 -0
  75. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/pretrained.py +119 -0
  76. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/scripts/__init__.py +0 -0
  77. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/scripts/care_predict.py +180 -0
  78. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/utils/__init__.py +5 -0
  79. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/utils/plot_utils.py +159 -0
  80. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/utils/six.py +18 -0
  81. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/utils/tf.py +644 -0
  82. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/utils/utils.py +272 -0
  83. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/version.py +1 -0
  84. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/docs/source/conf.py +368 -0
  85. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/setup.py +68 -0
  86. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/tests/test_datagen.py +169 -0
  87. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/tests/test_models.py +462 -0
  88. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/tests/test_utils.py +166 -0
  89. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/tools/create_zip_contents.py +34 -0
  90. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/__init__.py +30 -0
  91. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/big.py +624 -0
  92. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/bioimageio_utils.py +494 -0
  93. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/data/__init__.py +39 -0
  94. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/geometry/__init__.py +10 -0
  95. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/geometry/geom2d.py +215 -0
  96. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/geometry/geom3d.py +349 -0
  97. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/matching.py +483 -0
  98. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/models/__init__.py +28 -0
  99. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/models/base.py +1217 -0
  100. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/models/model2d.py +594 -0
  101. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/models/model3d.py +696 -0
  102. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/nms.py +384 -0
  103. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/plot/__init__.py +2 -0
  104. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/plot/plot.py +74 -0
  105. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/plot/render.py +298 -0
  106. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/rays3d.py +373 -0
  107. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/sample_patches.py +65 -0
  108. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/scripts/__init__.py +0 -0
  109. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/scripts/predict2d.py +90 -0
  110. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/scripts/predict3d.py +93 -0
  111. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/utils.py +408 -0
  112. senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/version.py +1 -0
  113. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/__init__.py +45 -0
  114. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/convert/__init__.py +17 -0
  115. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/convert/cli.py +55 -0
  116. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/convert/core.py +285 -0
  117. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/__init__.py +15 -0
  118. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/cli.py +36 -0
  119. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/divisibility.py +193 -0
  120. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/probe.py +100 -0
  121. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/receptive_field.py +182 -0
  122. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/rf_cli.py +48 -0
  123. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/valid_sizes.py +278 -0
  124. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/post/__init__.py +8 -0
  125. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/post/core.py +157 -0
  126. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/pre/__init__.py +17 -0
  127. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/pre/core.py +226 -0
  128. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/predict/__init__.py +5 -0
  129. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/predict/core.py +401 -0
  130. senoquant/tabs/settings/__init__.py +1 -0
  131. senoquant/tabs/settings/backend.py +29 -0
  132. senoquant/tabs/settings/frontend.py +19 -0
  133. senoquant/tabs/spots/__init__.py +1 -0
  134. senoquant/tabs/spots/backend.py +139 -0
  135. senoquant/tabs/spots/frontend.py +800 -0
  136. senoquant/tabs/spots/models/__init__.py +5 -0
  137. senoquant/tabs/spots/models/base.py +94 -0
  138. senoquant/tabs/spots/models/rmp/details.json +61 -0
  139. senoquant/tabs/spots/models/rmp/model.py +499 -0
  140. senoquant/tabs/spots/models/udwt/details.json +103 -0
  141. senoquant/tabs/spots/models/udwt/model.py +482 -0
  142. senoquant/utils.py +25 -0
  143. senoquant-1.0.0b1.dist-info/METADATA +193 -0
  144. senoquant-1.0.0b1.dist-info/RECORD +148 -0
  145. senoquant-1.0.0b1.dist-info/WHEEL +5 -0
  146. senoquant-1.0.0b1.dist-info/entry_points.txt +2 -0
  147. senoquant-1.0.0b1.dist-info/licenses/LICENSE +28 -0
  148. senoquant-1.0.0b1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1292 @@
1
+ """Spots feature export logic.
2
+
3
+ This module produces two export tables for every configured nuclear or
4
+ cytoplasmic segmentation:
5
+
6
+ 1. A **cells** table with morphology, ROI membership, and per-channel
7
+ spot summaries (counts and mean spot intensity per cell).
8
+ 2. A **spots** table with per-spot geometry, ROI membership, and the
9
+ channel the spot belongs to.
10
+
11
+ The export matches the markers feature style for morphology and physical
12
+ unit reporting. If physical pixel sizes are available in the metadata for
13
+ the first configured channel image, both pixel and physical units are
14
+ saved for centroids and areas/volumes.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import csv
20
+ import warnings
21
+ from pathlib import Path
22
+ from typing import Iterable, Sequence, TYPE_CHECKING
23
+
24
+ import numpy as np
25
+ from skimage.measure import regionprops_table
26
+
27
+ from senoquant.utils import layer_data_asarray
28
+ from .config import SpotsFeatureData
29
+ from ..base import FeatureConfig
30
+ from .morphology import add_morphology_columns
31
+
32
+ if TYPE_CHECKING:
33
+ from ..roi import ROIConfig
34
+
35
+
36
+ def export_spots(
37
+ feature: FeatureConfig,
38
+ temp_dir: Path,
39
+ viewer=None,
40
+ export_format: str = "csv",
41
+ ) -> Iterable[Path]:
42
+ """Export spots feature outputs into a temporary directory.
43
+
44
+ Parameters
45
+ ----------
46
+ feature : FeatureConfig
47
+ Spots feature configuration to export. Must contain a
48
+ :class:`SpotsFeatureData` payload with at least one segmentation
49
+ and one channel.
50
+ temp_dir : Path
51
+ Temporary directory where outputs should be written.
52
+ viewer : object, optional
53
+ Napari viewer instance used to resolve layers by name and read
54
+ layer data. When ``None``, export is skipped.
55
+ export_format : str, optional
56
+ File format for exports (``"csv"`` or ``"xlsx"``). Values are
57
+ normalized to lower case.
58
+
59
+ Returns
60
+ -------
61
+ iterable of Path
62
+ Paths to files produced by the export routine. Each segmentation
63
+ produces two tables: ``*_cells`` and ``*_spots``. If no outputs
64
+ are produced, an empty list is returned.
65
+
66
+ Notes
67
+ -----
68
+ - Cell morphology comes from the segmentation labels.
69
+ - Spot-to-cell assignment is based on the spot centroid location.
70
+ - Spot intensities are computed from the channel image referenced by
71
+ each channel config. Missing or mismatched images result in ``NaN``
72
+ mean intensities for those spots.
73
+ - When ``export_colocalization`` is enabled on the feature, additional
74
+ colocalization columns are appended to both tables.
75
+ - Physical units are derived from ``layer.metadata["physical_pixel_sizes"]``
76
+ when available (same convention as the markers export).
77
+
78
+ Workflow summary
79
+ ----------------
80
+ 1. Resolve the requested cell segmentation and compute cell morphology.
81
+ 2. Build per-channel spot exports (counts, mean intensity, spot rows).
82
+ 3. Optionally compute colocalization adjacency and append columns.
83
+ 4. Write ``*_cells`` and ``*_spots`` outputs for each segmentation.
84
+ """
85
+ data = feature.data
86
+ if not isinstance(data, SpotsFeatureData) or viewer is None:
87
+ return []
88
+
89
+ # --- Normalize inputs and pre-filter channel configs ---
90
+ export_format = (export_format or "csv").lower()
91
+ outputs: list[Path] = []
92
+ channels = [
93
+ channel
94
+ for channel in data.channels
95
+ if channel.channel and channel.spots_segmentation
96
+ ]
97
+ # Require both segmentations and channels to export anything.
98
+ if not data.segmentations or not channels:
99
+ return []
100
+
101
+ # --- Resolve a reference channel for physical pixel sizes ---
102
+ first_channel_layer = None
103
+ for channel in channels:
104
+ first_channel_layer = _find_layer(viewer, channel.channel, "Image")
105
+ if first_channel_layer is not None:
106
+ break
107
+
108
+ for index, segmentation in enumerate(data.segmentations, start=0):
109
+ # --- Resolve the cell segmentation labels layer ---
110
+ label_name = segmentation.label.strip()
111
+ if not label_name:
112
+ continue
113
+ labels_layer = _find_layer(viewer, label_name, "Labels")
114
+ if labels_layer is None:
115
+ continue
116
+ cell_labels = layer_data_asarray(labels_layer)
117
+ if cell_labels.size == 0:
118
+ continue
119
+
120
+ # --- Compute per-cell morphology from the segmentation ---
121
+ cell_ids, cell_centroids = _compute_centroids(cell_labels)
122
+ if cell_ids.size == 0:
123
+ continue
124
+
125
+ # --- Derive physical pixel sizes from metadata if available ---
126
+ cell_pixel_sizes = _pixel_sizes(labels_layer, cell_labels.ndim)
127
+ if cell_pixel_sizes is None and first_channel_layer is not None:
128
+ cell_pixel_sizes = _pixel_sizes(
129
+ first_channel_layer, cell_labels.ndim
130
+ )
131
+
132
+ # --- Seed the cell table with morphology and ROI membership columns ---
133
+ cell_rows = _initialize_rows(
134
+ cell_ids, cell_centroids, cell_pixel_sizes
135
+ )
136
+
137
+ # --- Add morphological descriptors to the cell table ---
138
+ add_morphology_columns(cell_rows, cell_labels, cell_ids, cell_pixel_sizes)
139
+
140
+ _add_roi_columns(
141
+ cell_rows,
142
+ cell_labels,
143
+ cell_ids,
144
+ viewer,
145
+ data.rois,
146
+ label_name,
147
+ )
148
+ cell_header = list(cell_rows[0].keys()) if cell_rows else []
149
+
150
+ # --- Prepare containers and ROI masks for the spots table ---
151
+ spot_rows: list[dict[str, object]] = []
152
+ spot_header: list[str] = []
153
+ spot_table_pixel_sizes = None
154
+ if first_channel_layer is not None:
155
+ spot_table_pixel_sizes = _pixel_sizes(
156
+ first_channel_layer, cell_labels.ndim
157
+ )
158
+ spot_roi_columns = _spot_roi_columns(
159
+ viewer, data.rois, label_name, cell_labels.shape
160
+ )
161
+
162
+ # --- Resolve per-channel label layers before heavy computation ---
163
+ channel_entries = _build_channel_entries(
164
+ viewer, channels, cell_labels.shape, label_name
165
+ )
166
+ adjacency: dict[tuple[int, int], set[tuple[int, int]]] = {}
167
+ if data.export_colocalization and len(channel_entries) >= 2:
168
+ adjacency = _build_colocalization_adjacency(channel_entries)
169
+
170
+ # --- Compute per-channel cell metrics + per-spot rows ---
171
+ spot_lookup: dict[tuple[int, int], dict[str, object]] = {}
172
+ for channel_index, entry in enumerate(channel_entries):
173
+ _append_channel_exports(
174
+ channel_index,
175
+ entry,
176
+ cell_labels,
177
+ cell_ids,
178
+ cell_header,
179
+ cell_rows,
180
+ spot_rows,
181
+ spot_header,
182
+ spot_lookup,
183
+ spot_table_pixel_sizes,
184
+ spot_roi_columns,
185
+ )
186
+
187
+ # --- Apply colocalization columns (if requested) ---
188
+ if data.export_colocalization:
189
+ _apply_colocalization_columns(
190
+ cell_rows,
191
+ cell_ids,
192
+ cell_header,
193
+ spot_rows,
194
+ spot_lookup,
195
+ adjacency,
196
+ channel_entries,
197
+ int(cell_labels.max()),
198
+ )
199
+
200
+ # --- Emit cells and spots tables for the segmentation ---
201
+ file_stem = _sanitize_name(label_name or f"segmentation_{index}")
202
+ if cell_rows:
203
+ cell_path = temp_dir / f"{file_stem}_cells.{export_format}"
204
+ _write_table(cell_path, cell_header, cell_rows, export_format)
205
+ outputs.append(cell_path)
206
+ if not spot_header:
207
+ spot_header = _spot_header(
208
+ cell_labels.ndim, spot_table_pixel_sizes, spot_roi_columns
209
+ )
210
+ if data.export_colocalization:
211
+ if "colocalizes_with" not in spot_header:
212
+ spot_header.append("colocalizes_with")
213
+ for row in spot_rows:
214
+ row.setdefault("colocalizes_with", "")
215
+ spot_path = temp_dir / f"{file_stem}_spots.{export_format}"
216
+ _write_table(spot_path, spot_header, spot_rows, export_format)
217
+ outputs.append(spot_path)
218
+
219
+ return outputs
220
+
221
+
222
+ def _build_channel_entries(
223
+ viewer: object,
224
+ channels: list,
225
+ cell_shape: tuple[int, ...],
226
+ label_name: str,
227
+ ) -> list[dict[str, object]]:
228
+ """Resolve channel layers into export-ready entries.
229
+
230
+ Parameters
231
+ ----------
232
+ viewer : object
233
+ Napari viewer instance used to resolve layers.
234
+ channels : list
235
+ Spots channel configurations (image + labels names).
236
+ cell_shape : tuple of int
237
+ Shape of the cell segmentation labels for validation.
238
+ label_name : str
239
+ Cell labels layer name (for warning context).
240
+
241
+ Returns
242
+ -------
243
+ list of dict
244
+ Each entry includes:
245
+ - ``channel_label`` : str
246
+ Display label for the channel.
247
+ - ``channel_layer`` : object or None
248
+ Image layer for intensity calculation.
249
+ - ``spots_labels`` : numpy.ndarray
250
+ Spots segmentation labels aligned to ``cell_shape``.
251
+
252
+ Notes
253
+ -----
254
+ Channels are filtered out when their segmentation layer is missing or
255
+ the segmentation does not match the cell labels shape.
256
+ """
257
+ entries: list[dict[str, object]] = []
258
+ for channel in channels:
259
+ # Resolve channel display label and layer references.
260
+ channel_label = _channel_label(channel)
261
+ channel_layer = _find_layer(viewer, channel.channel, "Image")
262
+ spots_layer = _find_layer(viewer, channel.spots_segmentation, "Labels")
263
+ if spots_layer is None:
264
+ warnings.warn(
265
+ "Spots export: spots segmentation layer "
266
+ f"'{channel.spots_segmentation}' not found.",
267
+ RuntimeWarning,
268
+ )
269
+ continue
270
+ spots_labels = layer_data_asarray(spots_layer)
271
+ if spots_labels.shape != cell_shape:
272
+ warnings.warn(
273
+ "Spots export: segmentation shape mismatch for "
274
+ f"'{label_name}' vs '{channel.spots_segmentation}'. "
275
+ "Skipping this channel for the segmentation.",
276
+ RuntimeWarning,
277
+ )
278
+ continue
279
+ entries.append(
280
+ {
281
+ "channel_label": channel_label,
282
+ "channel_layer": channel_layer,
283
+ "spots_labels": spots_labels,
284
+ }
285
+ )
286
+ return entries
287
+
288
+
289
+ def _append_channel_exports(
290
+ channel_index: int,
291
+ entry: dict[str, object],
292
+ cell_labels: np.ndarray,
293
+ cell_ids: np.ndarray,
294
+ cell_header: list[str],
295
+ cell_rows: list[dict[str, object]],
296
+ spot_rows: list[dict[str, object]],
297
+ spot_header: list[str],
298
+ spot_lookup: dict[tuple[int, int], dict[str, object]],
299
+ spot_table_pixel_sizes: np.ndarray | None,
300
+ spot_roi_columns: list[tuple[str, np.ndarray]],
301
+ ) -> None:
302
+ """Compute and append per-channel cell/spot metrics.
303
+
304
+ Parameters
305
+ ----------
306
+ channel_index : int
307
+ Index of the channel in the resolved channel list.
308
+ entry : dict
309
+ Channel entry from :func:`_build_channel_entries`.
310
+ cell_labels : numpy.ndarray
311
+ Cell segmentation labels array.
312
+ cell_ids : numpy.ndarray
313
+ Cell ids derived from the segmentation.
314
+ cell_header : list of str
315
+ Header list for the cells table, updated in-place.
316
+ cell_rows : list of dict
317
+ Cell rows updated in-place.
318
+ spot_rows : list of dict
319
+ Spot rows appended to in-place.
320
+ spot_header : list of str
321
+ Spot header list updated in-place.
322
+ spot_lookup : dict
323
+ Mapping from ``(channel_index, spot_id)`` to row metadata.
324
+ spot_table_pixel_sizes : numpy.ndarray or None
325
+ Pixel sizes to use for spot physical units.
326
+ spot_roi_columns : list of tuple
327
+ ROI masks for spot ROI membership columns.
328
+ """
329
+ channel_label = entry["channel_label"]
330
+ channel_layer = entry["channel_layer"]
331
+ spots_labels = entry["spots_labels"]
332
+
333
+ # Compute spot centroids in the channel segmentation.
334
+ spot_ids, spot_centroids = _compute_centroids(spots_labels)
335
+ if spot_ids.size == 0:
336
+ # No spots -> still emit per-cell count/mean columns with zeros/nans.
337
+ _append_cell_metrics(
338
+ cell_rows,
339
+ np.zeros_like(cell_ids, dtype=int),
340
+ np.full_like(cell_ids, np.nan, dtype=float),
341
+ channel_label,
342
+ cell_header,
343
+ )
344
+ return
345
+
346
+ # Spot areas (pixels) and mean intensity (per spot).
347
+ spot_area_px = _pixel_counts(spots_labels, spot_ids)
348
+ spot_mean_intensity = None
349
+ if channel_layer is not None:
350
+ image = layer_data_asarray(channel_layer)
351
+ if image.shape != spots_labels.shape:
352
+ warnings.warn(
353
+ "Spots export: image/spot shape mismatch for "
354
+ f"'{channel_label}'. Spot intensity values will be empty.",
355
+ RuntimeWarning,
356
+ )
357
+ else:
358
+ raw_sum = _intensity_sum(spots_labels, image, spot_ids)
359
+ spot_mean_intensity = _safe_divide(raw_sum, spot_area_px)
360
+ if spot_mean_intensity is None:
361
+ spot_mean_intensity = np.full(spot_area_px.shape, np.nan, dtype=float)
362
+
363
+ # Assign spots to cells using the centroid location.
364
+ cell_ids_for_spots = _spot_cell_ids_from_centroids(
365
+ cell_labels, spot_centroids
366
+ )
367
+ valid_mask = cell_ids_for_spots > 0
368
+ valid_cell_ids = cell_ids_for_spots[valid_mask]
369
+ valid_spot_ids = spot_ids[valid_mask]
370
+ valid_centroids = spot_centroids[valid_mask]
371
+ valid_areas = spot_area_px[valid_mask]
372
+ valid_means = spot_mean_intensity[valid_mask]
373
+
374
+ # Aggregate per-cell metrics and append columns to the cell table.
375
+ cell_counts, cell_means = _cell_spot_metrics(
376
+ valid_cell_ids, valid_means, int(cell_labels.max())
377
+ )
378
+ _append_cell_metrics(
379
+ cell_rows,
380
+ cell_counts[cell_ids],
381
+ cell_means[cell_ids],
382
+ channel_label,
383
+ cell_header,
384
+ )
385
+
386
+ # Append per-spot rows for this channel, preserving ROI membership.
387
+ spot_rows_for_channel = _spot_rows(
388
+ valid_spot_ids,
389
+ valid_cell_ids,
390
+ valid_centroids,
391
+ valid_areas,
392
+ valid_means,
393
+ channel_label,
394
+ spot_table_pixel_sizes,
395
+ spot_roi_columns,
396
+ )
397
+ if spot_rows_for_channel:
398
+ if not spot_header:
399
+ spot_header.extend(list(spot_rows_for_channel[0].keys()))
400
+ for row, spot_id, cell_id in zip(
401
+ spot_rows_for_channel, valid_spot_ids, valid_cell_ids
402
+ ):
403
+ spot_lookup[(channel_index, int(spot_id))] = {
404
+ "row": row,
405
+ "cell_id": int(cell_id),
406
+ }
407
+ spot_rows.extend(spot_rows_for_channel)
408
+
409
+
410
+ def _build_colocalization_adjacency(
411
+ channel_entries: list[dict[str, object]]
412
+ ) -> dict[tuple[int, int], set[tuple[int, int]]]:
413
+ """Build adjacency between overlapping spots across channels.
414
+
415
+ Parameters
416
+ ----------
417
+ channel_entries : list of dict
418
+ Channel entries with ``spots_labels`` arrays.
419
+
420
+ Returns
421
+ -------
422
+ dict
423
+ Mapping of ``(channel_index, spot_id)`` to a set of overlapping
424
+ ``(channel_index, spot_id)`` pairs.
425
+
426
+ Notes
427
+ -----
428
+ Two spots are considered colocalized when their label masks overlap.
429
+ """
430
+ adjacency: dict[tuple[int, int], set[tuple[int, int]]] = {}
431
+ for idx_a, entry_a in enumerate(channel_entries):
432
+ labels_a = entry_a["spots_labels"]
433
+ for idx_b in range(idx_a + 1, len(channel_entries)):
434
+ labels_b = channel_entries[idx_b]["spots_labels"]
435
+ mask = (labels_a > 0) & (labels_b > 0)
436
+ if not np.any(mask):
437
+ continue
438
+ pairs = np.column_stack((labels_a[mask], labels_b[mask]))
439
+ unique_pairs = np.unique(pairs, axis=0)
440
+ for spot_a, spot_b in unique_pairs:
441
+ key_a = (idx_a, int(spot_a))
442
+ key_b = (idx_b, int(spot_b))
443
+ adjacency.setdefault(key_a, set()).add(key_b)
444
+ adjacency.setdefault(key_b, set()).add(key_a)
445
+ return adjacency
446
+
447
+
448
+ def _apply_colocalization_columns(
449
+ cell_rows: list[dict[str, object]],
450
+ cell_ids: np.ndarray,
451
+ cell_header: list[str],
452
+ spot_rows: list[dict[str, object]],
453
+ spot_lookup: dict[tuple[int, int], dict[str, object]],
454
+ adjacency: dict[tuple[int, int], set[tuple[int, int]]],
455
+ channel_entries: list[dict[str, object]],
456
+ max_cell_id: int,
457
+ ) -> None:
458
+ """Append colocalization columns to cell and spot rows.
459
+
460
+ Parameters
461
+ ----------
462
+ cell_rows : list of dict
463
+ Cell rows updated in-place.
464
+ cell_ids : numpy.ndarray
465
+ Cell id array aligned to ``cell_rows``.
466
+ cell_header : list of str
467
+ Cell header updated in-place.
468
+ spot_rows : list of dict
469
+ Spot rows updated in-place.
470
+ spot_lookup : dict
471
+ Mapping from ``(channel_index, spot_id)`` to spot row and cell id.
472
+ adjacency : dict
473
+ Colocalization adjacency built by
474
+ :func:`_build_colocalization_adjacency`.
475
+ channel_entries : list of dict
476
+ Channel entries used to map channel indices to labels.
477
+ max_cell_id : int
478
+ Maximum cell id in the segmentation, used to size count arrays.
479
+
480
+ Notes
481
+ -----
482
+ ``colocalizes_with`` is a semicolon-delimited list of
483
+ ``"<channel_label>:<spot_id>"`` entries.
484
+ ``colocalization_event_count`` counts unique overlapping spot pairs
485
+ within the same cell.
486
+ """
487
+ channel_labels = [entry["channel_label"] for entry in channel_entries]
488
+ for key, info in spot_lookup.items():
489
+ others = adjacency.get(key, set())
490
+ names: list[str] = []
491
+ for other in others:
492
+ if other not in spot_lookup:
493
+ continue
494
+ other_label = channel_labels[other[0]]
495
+ names.append(f"{other_label}:{other[1]}")
496
+ info["row"]["colocalizes_with"] = (
497
+ ";".join(sorted(set(names))) if names else ""
498
+ )
499
+ for row in spot_rows:
500
+ row.setdefault("colocalizes_with", "")
501
+
502
+ colocalization_key = "colocalization_event_count"
503
+ event_counts = np.zeros(max_cell_id + 1, dtype=int)
504
+ seen_pairs: set[tuple[tuple[int, int], tuple[int, int]]] = set()
505
+ for key, others in adjacency.items():
506
+ if key not in spot_lookup:
507
+ continue
508
+ for other in others:
509
+ if other not in spot_lookup:
510
+ continue
511
+ pair = (key, other) if key < other else (other, key)
512
+ if pair in seen_pairs:
513
+ continue
514
+ seen_pairs.add(pair)
515
+ cell_id_a = spot_lookup[key]["cell_id"]
516
+ cell_id_b = spot_lookup[other]["cell_id"]
517
+ if cell_id_a > 0 and cell_id_a == cell_id_b:
518
+ event_counts[cell_id_a] += 1
519
+ for row, cell_id in zip(cell_rows, cell_ids):
520
+ row[colocalization_key] = int(event_counts[cell_id])
521
+ if colocalization_key not in cell_header:
522
+ cell_header.append(colocalization_key)
523
+
524
+
525
+ def _find_layer(viewer, name: str, layer_type: str):
526
+ """Return a viewer layer by name and class name.
527
+
528
+ Parameters
529
+ ----------
530
+ viewer : object
531
+ Napari viewer instance containing layers.
532
+ name : str
533
+ Layer name to locate.
534
+ layer_type : str
535
+ Layer class name to match (e.g., ``"Image"`` or ``"Labels"``).
536
+
537
+ Returns
538
+ -------
539
+ object or None
540
+ Matching layer instance, or ``None`` if not found.
541
+ """
542
+ for layer in viewer.layers:
543
+ if layer.__class__.__name__ == layer_type and layer.name == name:
544
+ return layer
545
+ return None
546
+
547
+
548
+ def _compute_centroids(labels: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
549
+ """Compute centroid coordinates for each non-zero label.
550
+
551
+ Parameters
552
+ ----------
553
+ labels : numpy.ndarray
554
+ Integer label image. ``0`` is treated as background.
555
+
556
+ Returns
557
+ -------
558
+ tuple of numpy.ndarray
559
+ ``(label_ids, centroids)`` where ``label_ids`` is a 1D array of
560
+ label ids and ``centroids`` is an ``(N, D)`` array of centroid
561
+ coordinates in pixel units.
562
+ """
563
+ props = regionprops_table(labels, properties=("label", "centroid"))
564
+ label_ids = np.asarray(props.get("label", []), dtype=int)
565
+ centroid_cols = [key for key in props if key.startswith("centroid-")]
566
+ if not centroid_cols:
567
+ return label_ids, np.empty((0, labels.ndim), dtype=float)
568
+ centroids = np.column_stack([props[key] for key in centroid_cols]).astype(
569
+ float
570
+ )
571
+ return label_ids, centroids
572
+
573
+
574
+ def _pixel_counts(labels: np.ndarray, label_ids: np.ndarray) -> np.ndarray:
575
+ """Return pixel counts for each label id.
576
+
577
+ Parameters
578
+ ----------
579
+ labels : numpy.ndarray
580
+ Integer label image.
581
+ label_ids : numpy.ndarray
582
+ Label ids to extract counts for.
583
+
584
+ Returns
585
+ -------
586
+ numpy.ndarray
587
+ Pixel counts for each provided label id.
588
+ """
589
+ labels_flat = labels.ravel()
590
+ max_label = int(labels_flat.max()) if labels_flat.size else 0
591
+ counts = np.bincount(labels_flat, minlength=max_label + 1)
592
+ return counts[label_ids]
593
+
594
+
595
+ def _intensity_sum(
596
+ labels: np.ndarray, image: np.ndarray, label_ids: np.ndarray
597
+ ) -> np.ndarray:
598
+ """Return raw intensity sums for each label id.
599
+
600
+ Parameters
601
+ ----------
602
+ labels : numpy.ndarray
603
+ Integer label image.
604
+ image : numpy.ndarray
605
+ Image data aligned to ``labels``.
606
+ label_ids : numpy.ndarray
607
+ Label ids to extract sums for.
608
+
609
+ Returns
610
+ -------
611
+ numpy.ndarray
612
+ Raw integrated intensities for each provided label id.
613
+ """
614
+ labels_flat = labels.ravel()
615
+ image_flat = np.nan_to_num(image.ravel(), nan=0.0)
616
+ max_label = int(labels_flat.max()) if labels_flat.size else 0
617
+ sums = np.bincount(labels_flat, weights=image_flat, minlength=max_label + 1)
618
+ return sums[label_ids]
619
+
620
+
621
+ def _safe_float(value) -> float | None:
622
+ """Convert a metadata value to float when possible.
623
+
624
+ Parameters
625
+ ----------
626
+ value : object
627
+ Metadata value to convert.
628
+
629
+ Returns
630
+ -------
631
+ float or None
632
+ Converted value, or ``None`` when conversion fails.
633
+ """
634
+ if value is None:
635
+ return None
636
+ try:
637
+ return float(value)
638
+ except (TypeError, ValueError):
639
+ return None
640
+
641
+
642
+ def _pixel_sizes(layer, ndim: int) -> np.ndarray | None:
643
+ """Return per-axis pixel sizes from layer metadata.
644
+
645
+ Parameters
646
+ ----------
647
+ layer : object
648
+ Napari layer providing ``metadata``.
649
+ ndim : int
650
+ Dimensionality of the labels or image array.
651
+
652
+ Returns
653
+ -------
654
+ numpy.ndarray or None
655
+ Per-axis pixel sizes in micrometers, ordered to match array axes.
656
+ Returns ``None`` when metadata is missing or incomplete.
657
+
658
+ Notes
659
+ -----
660
+ The SenoQuant reader stores sizes under
661
+ ``layer.metadata["physical_pixel_sizes"]`` using ``"Z"``, ``"Y"``,
662
+ and ``"X"`` keys (micrometers).
663
+ """
664
+ metadata = getattr(layer, "metadata", None)
665
+ if not isinstance(metadata, dict):
666
+ return None
667
+ physical_sizes = metadata.get("physical_pixel_sizes")
668
+ if not isinstance(physical_sizes, dict):
669
+ return None
670
+ size_x = physical_sizes.get("X")
671
+ size_y = physical_sizes.get("Y")
672
+ size_z = physical_sizes.get("Z")
673
+ return _pixel_sizes_from_metadata(size_x, size_y, size_z, ndim)
674
+
675
+
676
+ def _pixel_sizes_from_metadata(
677
+ size_x, size_y, size_z, ndim: int
678
+ ) -> np.ndarray | None:
679
+ """Normalize metadata sizes into axis-ordered pixel sizes.
680
+
681
+ Parameters
682
+ ----------
683
+ size_x, size_y, size_z : object
684
+ Physical sizes from metadata (may be ``None`` or non-numeric).
685
+ ndim : int
686
+ Dimensionality of the labels or image array.
687
+
688
+ Returns
689
+ -------
690
+ numpy.ndarray or None
691
+ Axis-ordered pixel sizes in micrometers, or ``None`` if sizes are
692
+ incomplete or ``ndim`` is unsupported.
693
+ """
694
+ axis_sizes = {
695
+ "x": _safe_float(size_x),
696
+ "y": _safe_float(size_y),
697
+ "z": _safe_float(size_z),
698
+ }
699
+ if ndim == 2:
700
+ sizes = [axis_sizes["y"], axis_sizes["x"]]
701
+ elif ndim == 3:
702
+ sizes = [axis_sizes["z"], axis_sizes["y"], axis_sizes["x"]]
703
+ else:
704
+ return None
705
+ if any(value is None for value in sizes):
706
+ return None
707
+ return np.asarray(sizes, dtype=float)
708
+
709
+
710
+ def _axis_names(ndim: int) -> list[str]:
711
+ """Return axis suffixes for centroid columns.
712
+
713
+ Parameters
714
+ ----------
715
+ ndim : int
716
+ Number of spatial dimensions.
717
+
718
+ Returns
719
+ -------
720
+ list of str
721
+ Axis suffixes in display order.
722
+ """
723
+ if ndim == 2:
724
+ return ["y", "x"]
725
+ if ndim == 3:
726
+ return ["z", "y", "x"]
727
+ return [f"axis_{idx}" for idx in range(ndim)]
728
+
729
+
730
+ def _initialize_rows(
731
+ label_ids: np.ndarray,
732
+ centroids: np.ndarray,
733
+ pixel_sizes: np.ndarray | None,
734
+ ) -> list[dict[str, float]]:
735
+ """Initialize output rows with label ids and centroid coordinates.
736
+
737
+ Parameters
738
+ ----------
739
+ label_ids : numpy.ndarray
740
+ Label identifiers for each row.
741
+ centroids : numpy.ndarray
742
+ Centroid coordinates in pixel units.
743
+ pixel_sizes : numpy.ndarray or None
744
+ Per-axis pixel sizes in micrometers. When provided, physical
745
+ centroid columns are added.
746
+
747
+ Returns
748
+ -------
749
+ list of dict
750
+ Row dictionaries with ``label_id`` and centroid columns.
751
+ """
752
+ axes = _axis_names(centroids.shape[1] if centroids.size else 0)
753
+ rows: list[dict[str, float]] = []
754
+ for label_id, centroid in zip(label_ids, centroids):
755
+ row: dict[str, float] = {"label_id": int(label_id)}
756
+ for axis, value in zip(axes, centroid):
757
+ row[f"centroid_{axis}_pixels"] = float(value)
758
+ if pixel_sizes is not None and pixel_sizes.size == len(axes):
759
+ for axis, value, scale in zip(axes, centroid, pixel_sizes):
760
+ row[f"centroid_{axis}_um"] = float(value * scale)
761
+ rows.append(row)
762
+ return rows
763
+
764
+
765
+ def _add_roi_columns(
766
+ rows: list[dict[str, float]],
767
+ labels: np.ndarray,
768
+ label_ids: np.ndarray,
769
+ viewer: object | None,
770
+ rois: Sequence["ROIConfig"],
771
+ label_name: str,
772
+ ) -> None:
773
+ """Add per-ROI inclusion columns to the output rows.
774
+
775
+ Parameters
776
+ ----------
777
+ rows : list of dict
778
+ Output rows to update in-place.
779
+ labels : numpy.ndarray
780
+ Label image used to compute ROI intersections.
781
+ label_ids : numpy.ndarray
782
+ Label ids corresponding to the output rows.
783
+ viewer : object or None
784
+ Napari viewer used to resolve shapes layers.
785
+ rois : sequence of ROIConfig
786
+ ROI configuration entries to evaluate.
787
+ label_name : str
788
+ Name of the labels layer, used in warning messages.
789
+ """
790
+ if viewer is None or not rois or not rows:
791
+ return
792
+ labels_flat = labels.ravel()
793
+ max_label = int(labels_flat.max()) if labels_flat.size else 0
794
+ for index, roi in enumerate(rois, start=0):
795
+ layer_name = getattr(roi, "layer", "")
796
+ if not layer_name:
797
+ continue
798
+ shapes_layer = _find_layer(viewer, layer_name, "Shapes")
799
+ if shapes_layer is None:
800
+ warnings.warn(
801
+ f"ROI layer '{layer_name}' not found for labels '{label_name}'.",
802
+ RuntimeWarning,
803
+ )
804
+ continue
805
+ mask = _shapes_layer_mask(shapes_layer, labels.shape)
806
+ if mask is None:
807
+ warnings.warn(
808
+ f"ROI layer '{layer_name}' could not be rasterized.",
809
+ RuntimeWarning,
810
+ )
811
+ continue
812
+ intersect_counts = np.bincount(
813
+ labels_flat[mask.ravel()], minlength=max_label + 1
814
+ )
815
+ included = intersect_counts[label_ids] > 0
816
+ roi_name = getattr(roi, "name", "") or f"roi_{index}"
817
+ roi_type = getattr(roi, "roi_type", "Include") or "Include"
818
+ if roi_type.lower() == "exclude":
819
+ prefix = "excluded_from_roi"
820
+ else:
821
+ prefix = "included_in_roi"
822
+ column = f"{prefix}_{_sanitize_name(roi_name)}"
823
+ for row, value in zip(rows, included):
824
+ row[column] = int(value)
825
+
826
+
827
+ def _shapes_layer_mask(
828
+ layer: object, shape: tuple[int, ...]
829
+ ) -> np.ndarray | None:
830
+ """Render a shapes layer into a boolean mask.
831
+
832
+ Parameters
833
+ ----------
834
+ layer : object
835
+ Napari shapes layer instance.
836
+ shape : tuple of int
837
+ Target mask shape matching the labels array.
838
+
839
+ Returns
840
+ -------
841
+ numpy.ndarray or None
842
+ Boolean mask array when rendering succeeds.
843
+ """
844
+ masks_array = _shape_masks_array(layer, shape)
845
+ if masks_array is None:
846
+ return None
847
+ if masks_array.ndim == len(shape):
848
+ combined = masks_array
849
+ else:
850
+ combined = np.any(masks_array, axis=0)
851
+ combined = np.asarray(combined)
852
+ combined = np.squeeze(combined)
853
+ if combined.shape != shape:
854
+ return None
855
+ return combined.astype(bool)
856
+
857
+
858
+ def _shape_masks_array(
859
+ layer: object, shape: tuple[int, ...]
860
+ ) -> np.ndarray | None:
861
+ """Return the raw masks array from a shapes layer.
862
+
863
+ Parameters
864
+ ----------
865
+ layer : object
866
+ Napari shapes layer instance.
867
+ shape : tuple of int
868
+ Target mask shape.
869
+
870
+ Returns
871
+ -------
872
+ numpy.ndarray or None
873
+ Raw masks array, or ``None`` if rendering fails.
874
+ """
875
+ to_masks = getattr(layer, "to_masks", None)
876
+ if callable(to_masks):
877
+ try:
878
+ return np.asarray(to_masks(mask_shape=shape))
879
+ except Exception:
880
+ return None
881
+ return None
882
+
883
+
884
+ def _spot_cell_ids_from_centroids(
885
+ cell_labels: np.ndarray, centroids: np.ndarray
886
+ ) -> np.ndarray:
887
+ """Assign each spot to a cell id using its centroid position.
888
+
889
+ Parameters
890
+ ----------
891
+ cell_labels : numpy.ndarray
892
+ Cell segmentation labels array.
893
+ centroids : numpy.ndarray
894
+ Spot centroid coordinates in pixel units.
895
+
896
+ Returns
897
+ -------
898
+ numpy.ndarray
899
+ Cell id for each spot, with ``0`` indicating background.
900
+ """
901
+ if centroids.size == 0:
902
+ return np.empty((0,), dtype=int)
903
+ coords = np.round(centroids).astype(int)
904
+ max_indices = np.asarray(cell_labels.shape) - 1
905
+ coords = np.clip(coords, 0, max_indices)
906
+ indices = tuple(coords[:, axis] for axis in range(coords.shape[1]))
907
+ return cell_labels[indices].astype(int)
908
+
909
+
910
+ def _cell_spot_metrics(
911
+ cell_ids: np.ndarray, spot_means: np.ndarray, max_cell: int
912
+ ) -> tuple[np.ndarray, np.ndarray]:
913
+ """Compute per-cell spot counts and mean intensities.
914
+
915
+ Parameters
916
+ ----------
917
+ cell_ids : numpy.ndarray
918
+ Cell ids for valid spots.
919
+ spot_means : numpy.ndarray
920
+ Mean intensity for each valid spot.
921
+ max_cell : int
922
+ Maximum label id in the cell segmentation.
923
+
924
+ Returns
925
+ -------
926
+ tuple of numpy.ndarray
927
+ ``(counts, means)`` arrays indexed by cell id.
928
+ """
929
+ counts = np.bincount(cell_ids, minlength=max_cell + 1)
930
+ mean_sum = np.bincount(
931
+ cell_ids, weights=spot_means, minlength=max_cell + 1
932
+ )
933
+ mean_values = _safe_divide(mean_sum, counts)
934
+ return counts, mean_values
935
+
936
+
937
+ def _append_cell_metrics(
938
+ rows: list[dict[str, object]],
939
+ counts: np.ndarray,
940
+ means: np.ndarray,
941
+ channel_label: str,
942
+ header: list[str],
943
+ ) -> None:
944
+ """Append channel spot metrics to cell rows.
945
+
946
+ Parameters
947
+ ----------
948
+ rows : list of dict
949
+ Cell rows to update in-place.
950
+ counts : numpy.ndarray
951
+ Spot counts per row.
952
+ means : numpy.ndarray
953
+ Mean spot intensity per row.
954
+ channel_label : str
955
+ Display label for the channel.
956
+ header : list of str
957
+ Header list to extend with the new column names.
958
+ """
959
+ prefix = _sanitize_name(channel_label)
960
+ count_key = f"{prefix}_spot_count"
961
+ mean_key = f"{prefix}_spot_mean_intensity"
962
+ for row, count, mean in zip(rows, counts, means):
963
+ row[count_key] = int(count)
964
+ row[mean_key] = float(mean) if np.isfinite(mean) else np.nan
965
+ header.extend([count_key, mean_key])
966
+
967
+
968
+ def _spot_rows(
969
+ spot_ids: np.ndarray,
970
+ cell_ids: np.ndarray,
971
+ centroids: np.ndarray,
972
+ areas_px: np.ndarray,
973
+ mean_intensity: np.ndarray,
974
+ channel_label: str,
975
+ pixel_sizes: np.ndarray | None,
976
+ roi_columns: list[tuple[str, np.ndarray]],
977
+ ) -> list[dict[str, object]]:
978
+ """Build per-spot rows for export.
979
+
980
+ Parameters
981
+ ----------
982
+ spot_ids : numpy.ndarray
983
+ Spot label identifiers.
984
+ cell_ids : numpy.ndarray
985
+ Cell ids associated with each spot.
986
+ centroids : numpy.ndarray
987
+ Spot centroid coordinates in pixel units.
988
+ areas_px : numpy.ndarray
989
+ Spot area (2D) or volume (3D) in pixel units.
990
+ mean_intensity : numpy.ndarray
991
+ Mean intensity of each spot for the channel image.
992
+ channel_label : str
993
+ Display label for the channel to store in the row.
994
+ pixel_sizes : numpy.ndarray or None
995
+ Per-axis pixel sizes in micrometers. When provided, physical
996
+ centroid coordinates and area/volume are included.
997
+ roi_columns : list of tuple
998
+ Precomputed ROI column names and boolean masks.
999
+
1000
+ Returns
1001
+ -------
1002
+ list of dict
1003
+ Rows ready for serialization in the spots table.
1004
+ """
1005
+ rows: list[dict[str, object]] = []
1006
+ axes = _axis_names(centroids.shape[1] if centroids.size else 0)
1007
+ size_key_px, size_key_um, size_scale = _spot_size_keys(
1008
+ centroids.shape[1] if centroids.size else 0, pixel_sizes
1009
+ )
1010
+ roi_values = _spot_roi_values(centroids, roi_columns)
1011
+ for idx, (spot_id, cell_id, centroid, area_px, mean_val) in enumerate(
1012
+ zip(spot_ids, cell_ids, centroids, areas_px, mean_intensity)
1013
+ ):
1014
+ row: dict[str, object] = {
1015
+ "spot_id": int(spot_id),
1016
+ "cell_id": int(cell_id),
1017
+ "channel": channel_label,
1018
+ }
1019
+ for axis, value in zip(axes, centroid):
1020
+ row[f"centroid_{axis}_pixels"] = float(value)
1021
+ if pixel_sizes is not None and pixel_sizes.size == len(axes):
1022
+ for axis, value, scale in zip(axes, centroid, pixel_sizes):
1023
+ row[f"centroid_{axis}_um"] = float(value * scale)
1024
+ row[size_key_px] = float(area_px)
1025
+ if size_scale is not None and size_key_um:
1026
+ row[size_key_um] = float(area_px * size_scale)
1027
+ row["spot_mean_intensity"] = (
1028
+ float(mean_val) if np.isfinite(mean_val) else np.nan
1029
+ )
1030
+ for column, values in roi_values:
1031
+ row[column] = int(values[idx])
1032
+ rows.append(row)
1033
+ return rows
1034
+
1035
+
1036
+ def _spot_size_keys(
1037
+ ndim: int, pixel_sizes: np.ndarray | None
1038
+ ) -> tuple[str, str | None, float | None]:
1039
+ """Return size column names and physical scale for spot sizes.
1040
+
1041
+ Parameters
1042
+ ----------
1043
+ ndim : int
1044
+ Number of spatial dimensions.
1045
+ pixel_sizes : numpy.ndarray or None
1046
+ Per-axis pixel sizes in micrometers.
1047
+
1048
+ Returns
1049
+ -------
1050
+ tuple
1051
+ ``(pixel_key, physical_key, scale)`` where ``scale`` is the
1052
+ multiplicative factor to convert pixel area/volume to physical
1053
+ units, or ``None`` if physical sizes are unavailable.
1054
+ """
1055
+ if ndim == 3:
1056
+ size_key_px = "spot_volume_pixels"
1057
+ size_key_um = "spot_volume_um3"
1058
+ else:
1059
+ size_key_px = "spot_area_pixels"
1060
+ size_key_um = "spot_area_um2"
1061
+ if pixel_sizes is None:
1062
+ return size_key_px, None, None
1063
+ scale = float(np.prod(pixel_sizes))
1064
+ return size_key_px, size_key_um, scale
1065
+
1066
+
1067
+ def _spot_roi_columns(
1068
+ viewer: object | None,
1069
+ rois: Sequence["ROIConfig"],
1070
+ label_name: str,
1071
+ shape: tuple[int, ...],
1072
+ ) -> list[tuple[str, np.ndarray]]:
1073
+ """Prepare ROI mask columns for spots export.
1074
+
1075
+ Parameters
1076
+ ----------
1077
+ viewer : object or None
1078
+ Napari viewer instance used to resolve shapes layers.
1079
+ rois : sequence of ROIConfig
1080
+ ROI configuration entries to evaluate.
1081
+ label_name : str
1082
+ Name of the labels layer, used in warning messages.
1083
+ shape : tuple of int
1084
+ Target mask shape matching the labels array.
1085
+
1086
+ Returns
1087
+ -------
1088
+ list of tuple
1089
+ List of ``(column_name, mask)`` entries for ROI membership.
1090
+ """
1091
+ if viewer is None or not rois:
1092
+ return []
1093
+ columns: list[tuple[str, np.ndarray]] = []
1094
+ for index, roi in enumerate(rois, start=0):
1095
+ layer_name = getattr(roi, "layer", "")
1096
+ if not layer_name:
1097
+ continue
1098
+ shapes_layer = _find_layer(viewer, layer_name, "Shapes")
1099
+ if shapes_layer is None:
1100
+ warnings.warn(
1101
+ f"ROI layer '{layer_name}' not found for labels '{label_name}'.",
1102
+ RuntimeWarning,
1103
+ )
1104
+ continue
1105
+ mask = _shapes_layer_mask(shapes_layer, shape)
1106
+ if mask is None:
1107
+ warnings.warn(
1108
+ f"ROI layer '{layer_name}' could not be rasterized.",
1109
+ RuntimeWarning,
1110
+ )
1111
+ continue
1112
+ roi_name = getattr(roi, "name", "") or f"roi_{index}"
1113
+ roi_type = getattr(roi, "roi_type", "Include") or "Include"
1114
+ if roi_type.lower() == "exclude":
1115
+ prefix = "excluded_from_roi"
1116
+ else:
1117
+ prefix = "included_in_roi"
1118
+ column = f"{prefix}_{_sanitize_name(roi_name)}"
1119
+ columns.append((column, mask))
1120
+ return columns
1121
+
1122
+
1123
+ def _spot_roi_values(
1124
+ centroids: np.ndarray, roi_columns: list[tuple[str, np.ndarray]]
1125
+ ) -> list[tuple[str, np.ndarray]]:
1126
+ """Return ROI membership values for each spot centroid.
1127
+
1128
+ Parameters
1129
+ ----------
1130
+ centroids : numpy.ndarray
1131
+ Spot centroid coordinates in pixel units.
1132
+ roi_columns : list of tuple
1133
+ ROI columns from :func:`_spot_roi_columns`.
1134
+
1135
+ Returns
1136
+ -------
1137
+ list of tuple
1138
+ List of ``(column_name, values)`` pairs aligned to the spot order.
1139
+ """
1140
+ if not roi_columns or centroids.size == 0:
1141
+ return []
1142
+ coords = np.round(centroids).astype(int)
1143
+ roi_values: list[tuple[str, np.ndarray]] = []
1144
+ for column, mask in roi_columns:
1145
+ max_indices = np.asarray(mask.shape) - 1
1146
+ clipped = np.clip(coords, 0, max_indices)
1147
+ indices = tuple(
1148
+ clipped[:, axis] for axis in range(clipped.shape[1])
1149
+ )
1150
+ values = mask[indices].astype(int)
1151
+ roi_values.append((column, values))
1152
+ return roi_values
1153
+
1154
+
1155
+ def _spot_header(
1156
+ ndim: int,
1157
+ pixel_sizes: np.ndarray | None,
1158
+ roi_columns: list[tuple[str, np.ndarray]],
1159
+ ) -> list[str]:
1160
+ """Build the header for the spots table.
1161
+
1162
+ Parameters
1163
+ ----------
1164
+ ndim : int
1165
+ Number of spatial dimensions.
1166
+ pixel_sizes : numpy.ndarray or None
1167
+ Per-axis pixel sizes in micrometers.
1168
+ roi_columns : list of tuple
1169
+ ROI columns to append to the header.
1170
+
1171
+ Returns
1172
+ -------
1173
+ list of str
1174
+ Column names for the spots export table.
1175
+ """
1176
+ axes = _axis_names(ndim)
1177
+ size_key_px, size_key_um, _scale = _spot_size_keys(ndim, pixel_sizes)
1178
+ header = ["spot_id", "cell_id", "channel"]
1179
+ header.extend([f"centroid_{axis}_pixels" for axis in axes])
1180
+ if pixel_sizes is not None and pixel_sizes.size == len(axes):
1181
+ header.extend([f"centroid_{axis}_um" for axis in axes])
1182
+ header.append(size_key_px)
1183
+ if size_key_um:
1184
+ header.append(size_key_um)
1185
+ header.append("spot_mean_intensity")
1186
+ if roi_columns:
1187
+ header.extend([column for column, _mask in roi_columns])
1188
+ return header
1189
+
1190
+
1191
+ def _channel_label(channel) -> str:
1192
+ """Return a display label for a channel.
1193
+
1194
+ Parameters
1195
+ ----------
1196
+ channel : object
1197
+ Channel configuration object.
1198
+
1199
+ Returns
1200
+ -------
1201
+ str
1202
+ Human-readable label for the channel.
1203
+ """
1204
+ label = channel.name.strip() if channel.name else ""
1205
+ return label or channel.channel
1206
+
1207
+
1208
+ def _sanitize_name(value: str) -> str:
1209
+ """Normalize names for filenames and column prefixes.
1210
+
1211
+ Parameters
1212
+ ----------
1213
+ value : str
1214
+ Raw name to sanitize.
1215
+
1216
+ Returns
1217
+ -------
1218
+ str
1219
+ Lowercase name with spaces normalized and unsafe characters removed.
1220
+ """
1221
+ cleaned = "".join(
1222
+ char if char.isalnum() or char in "-_ " else "_" for char in value
1223
+ )
1224
+ return cleaned.strip().replace(" ", "_").lower()
1225
+
1226
+
1227
+ def _safe_divide(numerator: np.ndarray, denominator: np.ndarray) -> np.ndarray:
1228
+ """Compute numerator/denominator with zero-safe handling.
1229
+
1230
+ Parameters
1231
+ ----------
1232
+ numerator : numpy.ndarray
1233
+ Numerator values.
1234
+ denominator : numpy.ndarray
1235
+ Denominator values.
1236
+
1237
+ Returns
1238
+ -------
1239
+ numpy.ndarray
1240
+ Division results with zeros where denominator is zero.
1241
+ """
1242
+ result = np.zeros_like(numerator, dtype=float)
1243
+ np.divide(numerator, denominator, out=result, where=denominator != 0)
1244
+ return result
1245
+
1246
+
1247
+ def _write_table(
1248
+ path: Path, header: list[str], rows: list[dict[str, object]], fmt: str
1249
+ ) -> None:
1250
+ """Write rows to disk as CSV or XLSX.
1251
+
1252
+ Parameters
1253
+ ----------
1254
+ path : pathlib.Path
1255
+ Destination file path.
1256
+ header : list of str
1257
+ Column names for the output table.
1258
+ rows : list of dict
1259
+ Table rows keyed by column name.
1260
+ fmt : str
1261
+ Output format (``"csv"`` or ``"xlsx"``).
1262
+
1263
+ Raises
1264
+ ------
1265
+ RuntimeError
1266
+ If ``fmt`` is ``"xlsx"`` and ``openpyxl`` is unavailable.
1267
+ ValueError
1268
+ If ``fmt`` is not a supported format.
1269
+ """
1270
+ if fmt == "csv":
1271
+ with path.open("w", newline="", encoding="utf-8") as handle:
1272
+ writer = csv.DictWriter(handle, fieldnames=header)
1273
+ writer.writeheader()
1274
+ writer.writerows(rows)
1275
+ return
1276
+
1277
+ if fmt == "xlsx":
1278
+ try:
1279
+ import openpyxl
1280
+ except ImportError as exc: # pragma: no cover
1281
+ raise RuntimeError(
1282
+ "openpyxl is required for xlsx export"
1283
+ ) from exc
1284
+ workbook = openpyxl.Workbook()
1285
+ sheet = workbook.active
1286
+ sheet.append(header)
1287
+ for row in rows:
1288
+ sheet.append([row.get(column) for column in header])
1289
+ workbook.save(path)
1290
+ return
1291
+
1292
+ raise ValueError(f"Unsupported export format: {fmt}")