senoquant 1.0.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- senoquant/__init__.py +6 -0
- senoquant/_reader.py +7 -0
- senoquant/_widget.py +33 -0
- senoquant/napari.yaml +83 -0
- senoquant/reader/__init__.py +5 -0
- senoquant/reader/core.py +369 -0
- senoquant/tabs/__init__.py +15 -0
- senoquant/tabs/batch/__init__.py +10 -0
- senoquant/tabs/batch/backend.py +641 -0
- senoquant/tabs/batch/config.py +270 -0
- senoquant/tabs/batch/frontend.py +1283 -0
- senoquant/tabs/batch/io.py +326 -0
- senoquant/tabs/batch/layers.py +86 -0
- senoquant/tabs/quantification/__init__.py +1 -0
- senoquant/tabs/quantification/backend.py +228 -0
- senoquant/tabs/quantification/features/__init__.py +80 -0
- senoquant/tabs/quantification/features/base.py +142 -0
- senoquant/tabs/quantification/features/marker/__init__.py +5 -0
- senoquant/tabs/quantification/features/marker/config.py +69 -0
- senoquant/tabs/quantification/features/marker/dialog.py +437 -0
- senoquant/tabs/quantification/features/marker/export.py +879 -0
- senoquant/tabs/quantification/features/marker/feature.py +119 -0
- senoquant/tabs/quantification/features/marker/morphology.py +285 -0
- senoquant/tabs/quantification/features/marker/rows.py +654 -0
- senoquant/tabs/quantification/features/marker/thresholding.py +46 -0
- senoquant/tabs/quantification/features/roi.py +346 -0
- senoquant/tabs/quantification/features/spots/__init__.py +5 -0
- senoquant/tabs/quantification/features/spots/config.py +62 -0
- senoquant/tabs/quantification/features/spots/dialog.py +477 -0
- senoquant/tabs/quantification/features/spots/export.py +1292 -0
- senoquant/tabs/quantification/features/spots/feature.py +112 -0
- senoquant/tabs/quantification/features/spots/morphology.py +279 -0
- senoquant/tabs/quantification/features/spots/rows.py +241 -0
- senoquant/tabs/quantification/frontend.py +815 -0
- senoquant/tabs/segmentation/__init__.py +1 -0
- senoquant/tabs/segmentation/backend.py +131 -0
- senoquant/tabs/segmentation/frontend.py +1009 -0
- senoquant/tabs/segmentation/models/__init__.py +5 -0
- senoquant/tabs/segmentation/models/base.py +146 -0
- senoquant/tabs/segmentation/models/cpsam/details.json +65 -0
- senoquant/tabs/segmentation/models/cpsam/model.py +150 -0
- senoquant/tabs/segmentation/models/default_2d/details.json +69 -0
- senoquant/tabs/segmentation/models/default_2d/model.py +664 -0
- senoquant/tabs/segmentation/models/default_3d/details.json +69 -0
- senoquant/tabs/segmentation/models/default_3d/model.py +682 -0
- senoquant/tabs/segmentation/models/hf.py +71 -0
- senoquant/tabs/segmentation/models/nuclear_dilation/__init__.py +1 -0
- senoquant/tabs/segmentation/models/nuclear_dilation/details.json +26 -0
- senoquant/tabs/segmentation/models/nuclear_dilation/model.py +96 -0
- senoquant/tabs/segmentation/models/perinuclear_rings/__init__.py +1 -0
- senoquant/tabs/segmentation/models/perinuclear_rings/details.json +34 -0
- senoquant/tabs/segmentation/models/perinuclear_rings/model.py +132 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/__init__.py +2 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/__init__.py +3 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/data/__init__.py +6 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/data/generate.py +470 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/data/prepare.py +273 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/data/rawdata.py +112 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/data/transform.py +384 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/__init__.py +0 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/blocks.py +184 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/losses.py +79 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/nets.py +165 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/predict.py +467 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/probability.py +67 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/internals/train.py +148 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/io/__init__.py +163 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/__init__.py +52 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/base_model.py +329 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/care_isotropic.py +160 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/care_projection.py +178 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/care_standard.py +446 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/care_upsampling.py +54 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/config.py +254 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/models/pretrained.py +119 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/scripts/__init__.py +0 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/scripts/care_predict.py +180 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/utils/__init__.py +5 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/utils/plot_utils.py +159 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/utils/six.py +18 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/utils/tf.py +644 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/utils/utils.py +272 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/csbdeep/version.py +1 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/docs/source/conf.py +368 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/setup.py +68 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/tests/test_datagen.py +169 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/tests/test_models.py +462 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/tests/test_utils.py +166 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/tools/create_zip_contents.py +34 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/__init__.py +30 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/big.py +624 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/bioimageio_utils.py +494 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/data/__init__.py +39 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/geometry/__init__.py +10 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/geometry/geom2d.py +215 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/geometry/geom3d.py +349 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/matching.py +483 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/models/__init__.py +28 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/models/base.py +1217 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/models/model2d.py +594 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/models/model3d.py +696 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/nms.py +384 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/plot/__init__.py +2 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/plot/plot.py +74 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/plot/render.py +298 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/rays3d.py +373 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/sample_patches.py +65 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/scripts/__init__.py +0 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/scripts/predict2d.py +90 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/scripts/predict3d.py +93 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/utils.py +408 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/_stardist/version.py +1 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/__init__.py +45 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/convert/__init__.py +17 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/convert/cli.py +55 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/convert/core.py +285 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/__init__.py +15 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/cli.py +36 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/divisibility.py +193 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/probe.py +100 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/receptive_field.py +182 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/rf_cli.py +48 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/valid_sizes.py +278 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/post/__init__.py +8 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/post/core.py +157 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/pre/__init__.py +17 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/pre/core.py +226 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/predict/__init__.py +5 -0
- senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/predict/core.py +401 -0
- senoquant/tabs/settings/__init__.py +1 -0
- senoquant/tabs/settings/backend.py +29 -0
- senoquant/tabs/settings/frontend.py +19 -0
- senoquant/tabs/spots/__init__.py +1 -0
- senoquant/tabs/spots/backend.py +139 -0
- senoquant/tabs/spots/frontend.py +800 -0
- senoquant/tabs/spots/models/__init__.py +5 -0
- senoquant/tabs/spots/models/base.py +94 -0
- senoquant/tabs/spots/models/rmp/details.json +61 -0
- senoquant/tabs/spots/models/rmp/model.py +499 -0
- senoquant/tabs/spots/models/udwt/details.json +103 -0
- senoquant/tabs/spots/models/udwt/model.py +482 -0
- senoquant/utils.py +25 -0
- senoquant-1.0.0b1.dist-info/METADATA +193 -0
- senoquant-1.0.0b1.dist-info/RECORD +148 -0
- senoquant-1.0.0b1.dist-info/WHEEL +5 -0
- senoquant-1.0.0b1.dist-info/entry_points.txt +2 -0
- senoquant-1.0.0b1.dist-info/licenses/LICENSE +28 -0
- senoquant-1.0.0b1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,879 @@
|
|
|
1
|
+
"""Marker feature export logic.
|
|
2
|
+
|
|
3
|
+
This module serializes per-label morphology and per-channel intensity
|
|
4
|
+
summaries for the marker feature. When thresholds are enabled for a
|
|
5
|
+
channel, both raw and thresholded intensity columns are exported along
|
|
6
|
+
with a JSON metadata file recording the threshold settings.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import csv
|
|
12
|
+
import json
|
|
13
|
+
import warnings
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Iterable, Optional, Sequence, TYPE_CHECKING
|
|
16
|
+
|
|
17
|
+
import numpy as np
|
|
18
|
+
from skimage.measure import regionprops_table
|
|
19
|
+
|
|
20
|
+
from senoquant.utils import layer_data_asarray
|
|
21
|
+
from .config import MarkerFeatureData
|
|
22
|
+
from .morphology import add_morphology_columns
|
|
23
|
+
from ..base import FeatureConfig
|
|
24
|
+
|
|
25
|
+
if TYPE_CHECKING:
|
|
26
|
+
from ..roi import ROIConfig
|
|
27
|
+
|
|
28
|
+
def export_marker(
|
|
29
|
+
feature: FeatureConfig,
|
|
30
|
+
temp_dir: Path,
|
|
31
|
+
viewer=None,
|
|
32
|
+
export_format: str = "csv",
|
|
33
|
+
enable_thresholds: bool = True,
|
|
34
|
+
) -> Iterable[Path]:
|
|
35
|
+
"""Export marker feature outputs into a temporary directory.
|
|
36
|
+
|
|
37
|
+
Parameters
|
|
38
|
+
----------
|
|
39
|
+
feature : FeatureConfig
|
|
40
|
+
Marker feature configuration to export.
|
|
41
|
+
temp_dir : Path
|
|
42
|
+
Temporary directory where outputs should be written.
|
|
43
|
+
viewer : object, optional
|
|
44
|
+
Napari viewer instance used to resolve layers by name.
|
|
45
|
+
export_format : str, optional
|
|
46
|
+
File format for exports (``"csv"`` or ``"xlsx"``).
|
|
47
|
+
enable_thresholds : bool, optional
|
|
48
|
+
Whether thresholded outputs should be computed.
|
|
49
|
+
|
|
50
|
+
Returns
|
|
51
|
+
-------
|
|
52
|
+
iterable of Path
|
|
53
|
+
Paths to files produced by the export routine. Each segmentation
|
|
54
|
+
produces one table, and a shared ``marker_thresholds.json`` file
|
|
55
|
+
is emitted when channels are configured.
|
|
56
|
+
|
|
57
|
+
Notes
|
|
58
|
+
-----
|
|
59
|
+
If an image layer does not match a labels layer in shape, that channel
|
|
60
|
+
is skipped and only morphological properties (centroids) are saved.
|
|
61
|
+
When a channel has thresholds enabled, thresholded columns are emitted
|
|
62
|
+
with a ``_thresholded`` suffix while the unthresholded values are kept.
|
|
63
|
+
"""
|
|
64
|
+
data = feature.data
|
|
65
|
+
if not isinstance(data, MarkerFeatureData) or viewer is None:
|
|
66
|
+
return []
|
|
67
|
+
|
|
68
|
+
export_format = (export_format or "csv").lower()
|
|
69
|
+
outputs: list[Path] = []
|
|
70
|
+
channels = [channel for channel in data.channels if channel.channel]
|
|
71
|
+
if not data.segmentations or not channels:
|
|
72
|
+
return []
|
|
73
|
+
|
|
74
|
+
if enable_thresholds:
|
|
75
|
+
metadata_path = _write_threshold_metadata(temp_dir, channels)
|
|
76
|
+
if metadata_path is not None:
|
|
77
|
+
outputs.append(metadata_path)
|
|
78
|
+
|
|
79
|
+
for index, segmentation in enumerate(data.segmentations, start=0):
|
|
80
|
+
label_name = segmentation.label.strip()
|
|
81
|
+
if not label_name:
|
|
82
|
+
continue
|
|
83
|
+
labels_layer = _find_layer(viewer, label_name, "Labels")
|
|
84
|
+
if labels_layer is None:
|
|
85
|
+
continue
|
|
86
|
+
labels = layer_data_asarray(labels_layer)
|
|
87
|
+
if labels.size == 0:
|
|
88
|
+
continue
|
|
89
|
+
|
|
90
|
+
label_ids, centroids = _compute_centroids(labels)
|
|
91
|
+
if label_ids.size == 0:
|
|
92
|
+
continue
|
|
93
|
+
area_px = _pixel_counts(labels, label_ids)
|
|
94
|
+
|
|
95
|
+
pixel_sizes = _pixel_sizes(labels_layer, labels.ndim)
|
|
96
|
+
if pixel_sizes is None:
|
|
97
|
+
for channel in channels:
|
|
98
|
+
channel_layer = _find_layer(viewer, channel.channel, "Image")
|
|
99
|
+
if channel_layer is None:
|
|
100
|
+
continue
|
|
101
|
+
pixel_sizes = _pixel_sizes(channel_layer, labels.ndim)
|
|
102
|
+
if pixel_sizes is not None:
|
|
103
|
+
break
|
|
104
|
+
rows = _initialize_rows(label_ids, centroids, pixel_sizes)
|
|
105
|
+
_add_roi_columns(rows, labels, label_ids, viewer, data.rois, label_name)
|
|
106
|
+
morph_columns = add_morphology_columns(
|
|
107
|
+
rows, labels, label_ids, pixel_sizes
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
# Extract file path from metadata if available
|
|
111
|
+
file_path = None
|
|
112
|
+
if channels:
|
|
113
|
+
first_channel_layer = _find_layer(viewer, channels[0].channel, "Image")
|
|
114
|
+
if first_channel_layer is not None:
|
|
115
|
+
metadata = getattr(first_channel_layer, "metadata", {})
|
|
116
|
+
file_path = metadata.get("path")
|
|
117
|
+
|
|
118
|
+
# Determine segmentation type from label name or config
|
|
119
|
+
seg_type = getattr(segmentation, "task", "nuclear")
|
|
120
|
+
ref_columns = _add_reference_columns(
|
|
121
|
+
rows, labels, label_ids, file_path, seg_type
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
header = list(rows[0].keys()) if rows else []
|
|
125
|
+
|
|
126
|
+
for channel in channels:
|
|
127
|
+
channel_layer = _find_layer(viewer, channel.channel, "Image")
|
|
128
|
+
if channel_layer is None:
|
|
129
|
+
continue
|
|
130
|
+
image = layer_data_asarray(channel_layer)
|
|
131
|
+
if image.shape != labels.shape:
|
|
132
|
+
warnings.warn(
|
|
133
|
+
"Marker export: image/label shape mismatch for "
|
|
134
|
+
f"'{channel.channel}' vs '{label_name}'. "
|
|
135
|
+
"Skipping intensity metrics for this channel; "
|
|
136
|
+
"only morphological properties will be saved.",
|
|
137
|
+
RuntimeWarning,
|
|
138
|
+
)
|
|
139
|
+
continue
|
|
140
|
+
raw_sum = _intensity_sum(labels, image, label_ids)
|
|
141
|
+
mean_intensity = _safe_divide(raw_sum, area_px)
|
|
142
|
+
pixel_volume = _pixel_volume(channel_layer, labels.ndim)
|
|
143
|
+
integrated = mean_intensity * (area_px * pixel_volume)
|
|
144
|
+
if enable_thresholds:
|
|
145
|
+
thresh_mean, thresh_raw, thresh_integrated = _apply_threshold(
|
|
146
|
+
mean_intensity,
|
|
147
|
+
raw_sum,
|
|
148
|
+
integrated,
|
|
149
|
+
channel,
|
|
150
|
+
)
|
|
151
|
+
else:
|
|
152
|
+
thresh_mean, thresh_raw, thresh_integrated = (
|
|
153
|
+
mean_intensity,
|
|
154
|
+
raw_sum,
|
|
155
|
+
integrated,
|
|
156
|
+
)
|
|
157
|
+
prefix = _channel_prefix(channel)
|
|
158
|
+
for row, mean_val, raw_val, int_val in zip(
|
|
159
|
+
rows, mean_intensity, raw_sum, integrated
|
|
160
|
+
):
|
|
161
|
+
row[f"{prefix}_mean_intensity"] = float(mean_val)
|
|
162
|
+
row[f"{prefix}_integrated_intensity"] = float(int_val)
|
|
163
|
+
row[f"{prefix}_raw_integrated_intensity"] = float(raw_val)
|
|
164
|
+
if enable_thresholds and getattr(channel, "threshold_enabled", False):
|
|
165
|
+
for row, mean_val, raw_val, int_val in zip(
|
|
166
|
+
rows, thresh_mean, thresh_raw, thresh_integrated
|
|
167
|
+
):
|
|
168
|
+
row[f"{prefix}_mean_intensity_thresholded"] = float(mean_val)
|
|
169
|
+
row[f"{prefix}_integrated_intensity_thresholded"] = float(
|
|
170
|
+
int_val
|
|
171
|
+
)
|
|
172
|
+
row[f"{prefix}_raw_integrated_intensity_thresholded"] = float(
|
|
173
|
+
raw_val
|
|
174
|
+
)
|
|
175
|
+
if not header:
|
|
176
|
+
header = list(rows[0].keys())
|
|
177
|
+
else:
|
|
178
|
+
header.extend(
|
|
179
|
+
[
|
|
180
|
+
f"{prefix}_mean_intensity",
|
|
181
|
+
f"{prefix}_integrated_intensity",
|
|
182
|
+
f"{prefix}_raw_integrated_intensity",
|
|
183
|
+
]
|
|
184
|
+
)
|
|
185
|
+
if enable_thresholds and getattr(channel, "threshold_enabled", False):
|
|
186
|
+
header.extend(
|
|
187
|
+
[
|
|
188
|
+
f"{prefix}_mean_intensity_thresholded",
|
|
189
|
+
f"{prefix}_integrated_intensity_thresholded",
|
|
190
|
+
f"{prefix}_raw_integrated_intensity_thresholded",
|
|
191
|
+
]
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
if not rows:
|
|
195
|
+
continue
|
|
196
|
+
file_stem = _sanitize_name(label_name or f"segmentation_{index}")
|
|
197
|
+
output_path = temp_dir / f"{file_stem}.{export_format}"
|
|
198
|
+
_write_table(output_path, header, rows, export_format)
|
|
199
|
+
outputs.append(output_path)
|
|
200
|
+
|
|
201
|
+
return outputs
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def _find_layer(viewer, name: str, layer_type: str):
|
|
205
|
+
"""Return a layer by name and class name.
|
|
206
|
+
|
|
207
|
+
Parameters
|
|
208
|
+
----------
|
|
209
|
+
viewer : object
|
|
210
|
+
Napari viewer instance containing layers.
|
|
211
|
+
name : str
|
|
212
|
+
Layer name to locate.
|
|
213
|
+
layer_type : str
|
|
214
|
+
Layer class name to match (e.g., ``"Image"`` or ``"Labels"``).
|
|
215
|
+
|
|
216
|
+
Returns
|
|
217
|
+
-------
|
|
218
|
+
object or None
|
|
219
|
+
Matching layer instance, or ``None`` if not found.
|
|
220
|
+
"""
|
|
221
|
+
for layer in viewer.layers:
|
|
222
|
+
if layer.__class__.__name__ == layer_type and layer.name == name:
|
|
223
|
+
return layer
|
|
224
|
+
return None
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def _compute_centroids(labels: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
|
|
228
|
+
"""Compute centroid coordinates for each non-zero label.
|
|
229
|
+
|
|
230
|
+
Parameters
|
|
231
|
+
----------
|
|
232
|
+
labels : numpy.ndarray
|
|
233
|
+
Label image with integer ids.
|
|
234
|
+
|
|
235
|
+
Returns
|
|
236
|
+
-------
|
|
237
|
+
tuple of numpy.ndarray
|
|
238
|
+
Label ids and centroid coordinates in pixel units.
|
|
239
|
+
"""
|
|
240
|
+
props = regionprops_table(labels, properties=("label", "centroid"))
|
|
241
|
+
label_ids = np.asarray(props.get("label", []), dtype=int)
|
|
242
|
+
centroid_cols = [key for key in props if key.startswith("centroid-")]
|
|
243
|
+
if not centroid_cols:
|
|
244
|
+
return label_ids, np.empty((0, labels.ndim), dtype=float)
|
|
245
|
+
centroids = np.column_stack([props[key] for key in centroid_cols]).astype(float)
|
|
246
|
+
return label_ids, centroids
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def _pixel_counts(labels: np.ndarray, label_ids: np.ndarray) -> np.ndarray:
|
|
250
|
+
"""Return pixel counts for each label id.
|
|
251
|
+
|
|
252
|
+
Parameters
|
|
253
|
+
----------
|
|
254
|
+
labels : numpy.ndarray
|
|
255
|
+
Label image with integer ids.
|
|
256
|
+
label_ids : numpy.ndarray
|
|
257
|
+
Label ids to extract counts for.
|
|
258
|
+
|
|
259
|
+
Returns
|
|
260
|
+
-------
|
|
261
|
+
numpy.ndarray
|
|
262
|
+
Pixel counts for each provided label id.
|
|
263
|
+
"""
|
|
264
|
+
labels_flat = labels.ravel()
|
|
265
|
+
max_label = int(labels_flat.max()) if labels_flat.size else 0
|
|
266
|
+
counts = np.bincount(labels_flat, minlength=max_label + 1)
|
|
267
|
+
return counts[label_ids]
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def _intensity_sum(
|
|
271
|
+
labels: np.ndarray, image: np.ndarray, label_ids: np.ndarray
|
|
272
|
+
) -> np.ndarray:
|
|
273
|
+
"""Return raw intensity sums for each label id.
|
|
274
|
+
|
|
275
|
+
Parameters
|
|
276
|
+
----------
|
|
277
|
+
labels : numpy.ndarray
|
|
278
|
+
Label image with integer ids.
|
|
279
|
+
image : numpy.ndarray
|
|
280
|
+
Image data aligned to ``labels``.
|
|
281
|
+
label_ids : numpy.ndarray
|
|
282
|
+
Label ids to extract sums for.
|
|
283
|
+
|
|
284
|
+
Returns
|
|
285
|
+
-------
|
|
286
|
+
numpy.ndarray
|
|
287
|
+
Raw intensity sums for each label id.
|
|
288
|
+
"""
|
|
289
|
+
labels_flat = labels.ravel()
|
|
290
|
+
image_flat = np.nan_to_num(image.ravel(), nan=0.0)
|
|
291
|
+
max_label = int(labels_flat.max()) if labels_flat.size else 0
|
|
292
|
+
sums = np.bincount(labels_flat, weights=image_flat, minlength=max_label + 1)
|
|
293
|
+
return sums[label_ids]
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def _pixel_volume(layer, ndim: int) -> float:
|
|
297
|
+
"""Compute per-pixel physical volume from layer metadata.
|
|
298
|
+
|
|
299
|
+
Parameters
|
|
300
|
+
----------
|
|
301
|
+
layer : object
|
|
302
|
+
Napari image layer providing metadata.
|
|
303
|
+
ndim : int
|
|
304
|
+
Dimensionality of the image data.
|
|
305
|
+
|
|
306
|
+
Returns
|
|
307
|
+
-------
|
|
308
|
+
float
|
|
309
|
+
Physical volume of one pixel/voxel in cubic micrometers.
|
|
310
|
+
|
|
311
|
+
Notes
|
|
312
|
+
-----
|
|
313
|
+
The SenoQuant reader stores physical sizes under
|
|
314
|
+
``layer.metadata["physical_pixel_sizes"]`` with keys ``"Z"``, ``"Y"``,
|
|
315
|
+
and ``"X"`` in micrometers (um). Missing values default to 1.0 so the
|
|
316
|
+
measurement stays in pixel units.
|
|
317
|
+
"""
|
|
318
|
+
pixel_sizes = _pixel_sizes(layer, ndim)
|
|
319
|
+
if pixel_sizes is None:
|
|
320
|
+
return 1.0
|
|
321
|
+
return float(np.prod(pixel_sizes))
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def _safe_float(value) -> float | None:
|
|
325
|
+
"""Convert a metadata value to float when possible.
|
|
326
|
+
|
|
327
|
+
Parameters
|
|
328
|
+
----------
|
|
329
|
+
value : object
|
|
330
|
+
Metadata value to convert.
|
|
331
|
+
|
|
332
|
+
Returns
|
|
333
|
+
-------
|
|
334
|
+
float or None
|
|
335
|
+
Converted value when possible, otherwise ``None``.
|
|
336
|
+
"""
|
|
337
|
+
if value is None:
|
|
338
|
+
return None
|
|
339
|
+
try:
|
|
340
|
+
return float(value)
|
|
341
|
+
except (TypeError, ValueError):
|
|
342
|
+
return None
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
def _pixel_sizes(layer, ndim: int) -> np.ndarray | None:
|
|
346
|
+
"""Return per-axis pixel sizes from layer metadata.
|
|
347
|
+
|
|
348
|
+
Parameters
|
|
349
|
+
----------
|
|
350
|
+
layer : object
|
|
351
|
+
Napari image layer providing metadata.
|
|
352
|
+
ndim : int
|
|
353
|
+
Dimensionality of the image data.
|
|
354
|
+
|
|
355
|
+
Returns
|
|
356
|
+
-------
|
|
357
|
+
numpy.ndarray or None
|
|
358
|
+
Per-axis pixel sizes in micrometers, ordered to match the data axes.
|
|
359
|
+
|
|
360
|
+
Notes
|
|
361
|
+
-----
|
|
362
|
+
For 2D images the Z size may be ``None`` and is ignored.
|
|
363
|
+
"""
|
|
364
|
+
metadata = getattr(layer, "metadata", None)
|
|
365
|
+
if not isinstance(metadata, dict):
|
|
366
|
+
return None
|
|
367
|
+
physical_sizes = metadata.get("physical_pixel_sizes")
|
|
368
|
+
if not isinstance(physical_sizes, dict):
|
|
369
|
+
return None
|
|
370
|
+
size_x = physical_sizes.get("X")
|
|
371
|
+
size_y = physical_sizes.get("Y")
|
|
372
|
+
size_z = physical_sizes.get("Z")
|
|
373
|
+
return _pixel_sizes_from_metadata(size_x, size_y, size_z, ndim)
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
def _pixel_sizes_from_metadata(
|
|
377
|
+
size_x, size_y, size_z, ndim: int
|
|
378
|
+
) -> np.ndarray | None:
|
|
379
|
+
"""Normalize metadata sizes into axis-ordered pixel sizes.
|
|
380
|
+
|
|
381
|
+
Parameters
|
|
382
|
+
----------
|
|
383
|
+
size_x : object
|
|
384
|
+
Physical size along X.
|
|
385
|
+
size_y : object
|
|
386
|
+
Physical size along Y.
|
|
387
|
+
size_z : object
|
|
388
|
+
Physical size along Z.
|
|
389
|
+
ndim : int
|
|
390
|
+
Dimensionality of the image data.
|
|
391
|
+
|
|
392
|
+
Returns
|
|
393
|
+
-------
|
|
394
|
+
numpy.ndarray or None
|
|
395
|
+
Axis-ordered pixel sizes in micrometers.
|
|
396
|
+
"""
|
|
397
|
+
axis_sizes = {
|
|
398
|
+
"x": _safe_float(size_x),
|
|
399
|
+
"y": _safe_float(size_y),
|
|
400
|
+
"z": _safe_float(size_z),
|
|
401
|
+
}
|
|
402
|
+
if ndim == 2:
|
|
403
|
+
sizes = [axis_sizes["y"], axis_sizes["x"]]
|
|
404
|
+
elif ndim == 3:
|
|
405
|
+
sizes = [axis_sizes["z"], axis_sizes["y"], axis_sizes["x"]]
|
|
406
|
+
else:
|
|
407
|
+
return None
|
|
408
|
+
if any(value is None for value in sizes):
|
|
409
|
+
return None
|
|
410
|
+
return np.asarray(sizes, dtype=float)
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
def _add_roi_columns(
|
|
414
|
+
rows: list[dict[str, float]],
|
|
415
|
+
labels: np.ndarray,
|
|
416
|
+
label_ids: np.ndarray,
|
|
417
|
+
viewer: object | None,
|
|
418
|
+
rois: Sequence["ROIConfig"],
|
|
419
|
+
label_name: str,
|
|
420
|
+
) -> None:
|
|
421
|
+
"""Add per-ROI inclusion columns to the output rows.
|
|
422
|
+
|
|
423
|
+
Parameters
|
|
424
|
+
----------
|
|
425
|
+
rows : list of dict
|
|
426
|
+
Output row dictionaries to update in-place.
|
|
427
|
+
labels : numpy.ndarray
|
|
428
|
+
Label image used to compute ROI intersections.
|
|
429
|
+
label_ids : numpy.ndarray
|
|
430
|
+
Label ids corresponding to the output rows.
|
|
431
|
+
viewer : object or None
|
|
432
|
+
Napari viewer used to resolve shapes layers.
|
|
433
|
+
rois : sequence of ROIConfig
|
|
434
|
+
ROI configuration entries to evaluate.
|
|
435
|
+
label_name : str
|
|
436
|
+
Name of the labels layer (for warnings).
|
|
437
|
+
"""
|
|
438
|
+
if viewer is None or not rois or not rows:
|
|
439
|
+
return
|
|
440
|
+
labels_flat = labels.ravel()
|
|
441
|
+
max_label = int(labels_flat.max()) if labels_flat.size else 0
|
|
442
|
+
for index, roi in enumerate(rois, start=0):
|
|
443
|
+
layer_name = getattr(roi, "layer", "")
|
|
444
|
+
if not layer_name:
|
|
445
|
+
continue
|
|
446
|
+
shapes_layer = _find_layer(viewer, layer_name, "Shapes")
|
|
447
|
+
if shapes_layer is None:
|
|
448
|
+
warnings.warn(
|
|
449
|
+
f"ROI layer '{layer_name}' not found for labels '{label_name}'.",
|
|
450
|
+
RuntimeWarning,
|
|
451
|
+
)
|
|
452
|
+
continue
|
|
453
|
+
mask = _shapes_layer_mask(shapes_layer, labels.shape)
|
|
454
|
+
if mask is None:
|
|
455
|
+
warnings.warn(
|
|
456
|
+
f"ROI layer '{layer_name}' could not be rasterized.",
|
|
457
|
+
RuntimeWarning,
|
|
458
|
+
)
|
|
459
|
+
continue
|
|
460
|
+
intersect_counts = np.bincount(
|
|
461
|
+
labels_flat[mask.ravel()], minlength=max_label + 1
|
|
462
|
+
)
|
|
463
|
+
included = intersect_counts[label_ids] > 0
|
|
464
|
+
roi_name = getattr(roi, "name", "") or f"roi_{index}"
|
|
465
|
+
roi_type = getattr(roi, "roi_type", "Include") or "Include"
|
|
466
|
+
if roi_type.lower() == "exclude":
|
|
467
|
+
prefix = "excluded_from_roi"
|
|
468
|
+
else:
|
|
469
|
+
prefix = "included_in_roi"
|
|
470
|
+
column = f"{prefix}_{_sanitize_name(roi_name)}"
|
|
471
|
+
for row, value in zip(rows, included):
|
|
472
|
+
row[column] = int(value)
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
def _shapes_layer_mask(
|
|
476
|
+
layer: object, shape: tuple[int, ...]
|
|
477
|
+
) -> np.ndarray | None:
|
|
478
|
+
"""Render a shapes layer into a boolean mask.
|
|
479
|
+
|
|
480
|
+
Parameters
|
|
481
|
+
----------
|
|
482
|
+
layer : object
|
|
483
|
+
Napari shapes layer instance.
|
|
484
|
+
shape : tuple of int
|
|
485
|
+
Target mask shape matching the labels array.
|
|
486
|
+
|
|
487
|
+
Returns
|
|
488
|
+
-------
|
|
489
|
+
numpy.ndarray or None
|
|
490
|
+
Boolean mask array when rendering succeeds.
|
|
491
|
+
"""
|
|
492
|
+
masks_array = _shape_masks_array(layer, shape)
|
|
493
|
+
if masks_array is None:
|
|
494
|
+
return None
|
|
495
|
+
if masks_array.ndim == len(shape):
|
|
496
|
+
combined = masks_array
|
|
497
|
+
else:
|
|
498
|
+
combined = np.any(masks_array, axis=0)
|
|
499
|
+
combined = np.asarray(combined)
|
|
500
|
+
combined = np.squeeze(combined)
|
|
501
|
+
if combined.shape != shape:
|
|
502
|
+
return None
|
|
503
|
+
return combined.astype(bool)
|
|
504
|
+
|
|
505
|
+
|
|
506
|
+
def _shape_masks_array(
|
|
507
|
+
layer: object, shape: tuple[int, ...]
|
|
508
|
+
) -> np.ndarray | None:
|
|
509
|
+
"""Return the raw masks array from a shapes layer."""
|
|
510
|
+
to_masks = getattr(layer, "to_masks", None)
|
|
511
|
+
if callable(to_masks):
|
|
512
|
+
try:
|
|
513
|
+
return np.asarray(to_masks(mask_shape=shape))
|
|
514
|
+
except Exception:
|
|
515
|
+
return None
|
|
516
|
+
return None
|
|
517
|
+
|
|
518
|
+
|
|
519
|
+
def _axis_names(ndim: int) -> list[str]:
|
|
520
|
+
"""Return axis suffixes for centroid columns.
|
|
521
|
+
|
|
522
|
+
Parameters
|
|
523
|
+
----------
|
|
524
|
+
ndim : int
|
|
525
|
+
Number of spatial dimensions.
|
|
526
|
+
|
|
527
|
+
Returns
|
|
528
|
+
-------
|
|
529
|
+
list of str
|
|
530
|
+
Axis suffixes in display order.
|
|
531
|
+
"""
|
|
532
|
+
if ndim == 2:
|
|
533
|
+
return ["y", "x"]
|
|
534
|
+
if ndim == 3:
|
|
535
|
+
return ["z", "y", "x"]
|
|
536
|
+
return [f"axis_{idx}" for idx in range(ndim)]
|
|
537
|
+
|
|
538
|
+
|
|
539
|
+
def _initialize_rows(
|
|
540
|
+
label_ids: np.ndarray,
|
|
541
|
+
centroids: np.ndarray,
|
|
542
|
+
pixel_sizes: np.ndarray | None,
|
|
543
|
+
) -> list[dict[str, float]]:
|
|
544
|
+
"""Initialize output rows with label ids and centroid coordinates.
|
|
545
|
+
|
|
546
|
+
Parameters
|
|
547
|
+
----------
|
|
548
|
+
label_ids : numpy.ndarray
|
|
549
|
+
Label identifiers for each row.
|
|
550
|
+
centroids : numpy.ndarray
|
|
551
|
+
Centroid coordinates in pixel units.
|
|
552
|
+
pixel_sizes : numpy.ndarray or None
|
|
553
|
+
Per-axis pixel sizes in micrometers.
|
|
554
|
+
|
|
555
|
+
Returns
|
|
556
|
+
-------
|
|
557
|
+
list of dict
|
|
558
|
+
Row dictionaries with centroid fields populated.
|
|
559
|
+
"""
|
|
560
|
+
axes = _axis_names(centroids.shape[1] if centroids.size else 0)
|
|
561
|
+
rows: list[dict[str, float]] = []
|
|
562
|
+
for label_id, centroid in zip(label_ids, centroids):
|
|
563
|
+
row: dict[str, float] = {"label_id": int(label_id)}
|
|
564
|
+
for axis, value in zip(axes, centroid):
|
|
565
|
+
row[f"centroid_{axis}_pixels"] = float(value)
|
|
566
|
+
if pixel_sizes is not None and pixel_sizes.size == len(axes):
|
|
567
|
+
for axis, value, scale in zip(axes, centroid, pixel_sizes):
|
|
568
|
+
row[f"centroid_{axis}_um"] = float(value * scale)
|
|
569
|
+
rows.append(row)
|
|
570
|
+
return rows
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
def _channel_prefix(channel) -> str:
|
|
574
|
+
"""Return a sanitized column prefix for a channel.
|
|
575
|
+
|
|
576
|
+
Parameters
|
|
577
|
+
----------
|
|
578
|
+
channel : object
|
|
579
|
+
Marker channel configuration.
|
|
580
|
+
|
|
581
|
+
Returns
|
|
582
|
+
-------
|
|
583
|
+
str
|
|
584
|
+
Sanitized prefix for column names.
|
|
585
|
+
"""
|
|
586
|
+
name = channel.name.strip() if channel.name else ""
|
|
587
|
+
if not name:
|
|
588
|
+
name = channel.channel
|
|
589
|
+
return _sanitize_name(name)
|
|
590
|
+
|
|
591
|
+
|
|
592
|
+
def _sanitize_name(value: str) -> str:
|
|
593
|
+
"""Normalize names for filenames and column prefixes.
|
|
594
|
+
|
|
595
|
+
Parameters
|
|
596
|
+
----------
|
|
597
|
+
value : str
|
|
598
|
+
Raw name to sanitize.
|
|
599
|
+
|
|
600
|
+
Returns
|
|
601
|
+
-------
|
|
602
|
+
str
|
|
603
|
+
Lowercased name with unsafe characters removed.
|
|
604
|
+
"""
|
|
605
|
+
cleaned = "".join(
|
|
606
|
+
char if char.isalnum() or char in "-_ " else "_" for char in value
|
|
607
|
+
)
|
|
608
|
+
return cleaned.strip().replace(" ", "_").lower()
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
def _safe_divide(numerator: np.ndarray, denominator: np.ndarray) -> np.ndarray:
|
|
612
|
+
"""Compute numerator/denominator with zero-safe handling.
|
|
613
|
+
|
|
614
|
+
Parameters
|
|
615
|
+
----------
|
|
616
|
+
numerator : numpy.ndarray
|
|
617
|
+
Numerator values.
|
|
618
|
+
denominator : numpy.ndarray
|
|
619
|
+
Denominator values.
|
|
620
|
+
|
|
621
|
+
Returns
|
|
622
|
+
-------
|
|
623
|
+
numpy.ndarray
|
|
624
|
+
Division result with zero denominators handled safely.
|
|
625
|
+
"""
|
|
626
|
+
result = np.zeros_like(numerator, dtype=float)
|
|
627
|
+
np.divide(
|
|
628
|
+
numerator,
|
|
629
|
+
denominator,
|
|
630
|
+
out=result,
|
|
631
|
+
where=denominator != 0,
|
|
632
|
+
)
|
|
633
|
+
return result
|
|
634
|
+
|
|
635
|
+
|
|
636
|
+
def _apply_threshold(
|
|
637
|
+
mean_intensity: np.ndarray,
|
|
638
|
+
raw_sum: np.ndarray,
|
|
639
|
+
integrated: np.ndarray,
|
|
640
|
+
channel,
|
|
641
|
+
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
|
|
642
|
+
"""Zero intensity values outside the configured threshold range.
|
|
643
|
+
|
|
644
|
+
Parameters
|
|
645
|
+
----------
|
|
646
|
+
mean_intensity : np.ndarray
|
|
647
|
+
Mean intensity per label.
|
|
648
|
+
raw_sum : np.ndarray
|
|
649
|
+
Raw integrated intensity per label.
|
|
650
|
+
integrated : np.ndarray
|
|
651
|
+
Physical-unit integrated intensity per label.
|
|
652
|
+
channel : object
|
|
653
|
+
Channel configuration with threshold metadata.
|
|
654
|
+
|
|
655
|
+
Returns
|
|
656
|
+
-------
|
|
657
|
+
tuple of numpy.ndarray
|
|
658
|
+
Thresholded mean, raw, and integrated intensity arrays.
|
|
659
|
+
"""
|
|
660
|
+
if not getattr(channel, "threshold_enabled", False):
|
|
661
|
+
return mean_intensity, raw_sum, integrated
|
|
662
|
+
min_val = getattr(channel, "threshold_min", None)
|
|
663
|
+
max_val = getattr(channel, "threshold_max", None)
|
|
664
|
+
keep = np.ones_like(mean_intensity, dtype=bool)
|
|
665
|
+
if min_val is not None:
|
|
666
|
+
keep &= mean_intensity >= float(min_val)
|
|
667
|
+
if max_val is not None:
|
|
668
|
+
keep &= mean_intensity <= float(max_val)
|
|
669
|
+
if keep.all():
|
|
670
|
+
return mean_intensity, raw_sum, integrated
|
|
671
|
+
mean = mean_intensity.copy()
|
|
672
|
+
raw = raw_sum.copy()
|
|
673
|
+
integ = integrated.copy()
|
|
674
|
+
mean[~keep] = 0.0
|
|
675
|
+
raw[~keep] = 0.0
|
|
676
|
+
integ[~keep] = 0.0
|
|
677
|
+
return mean, raw, integ
|
|
678
|
+
|
|
679
|
+
|
|
680
|
+
def _write_threshold_metadata(
|
|
681
|
+
temp_dir: Path, channels: list
|
|
682
|
+
) -> Optional[Path]:
|
|
683
|
+
"""Persist channel threshold metadata for the export run.
|
|
684
|
+
|
|
685
|
+
Parameters
|
|
686
|
+
----------
|
|
687
|
+
temp_dir : Path
|
|
688
|
+
Temporary output directory.
|
|
689
|
+
channels : list
|
|
690
|
+
Channel configurations to serialize.
|
|
691
|
+
|
|
692
|
+
Returns
|
|
693
|
+
-------
|
|
694
|
+
pathlib.Path or None
|
|
695
|
+
Path to the metadata file written.
|
|
696
|
+
"""
|
|
697
|
+
payload = {
|
|
698
|
+
"channels": [
|
|
699
|
+
{
|
|
700
|
+
"name": channel.name,
|
|
701
|
+
"channel": channel.channel,
|
|
702
|
+
"threshold_enabled": bool(channel.threshold_enabled),
|
|
703
|
+
"threshold_method": channel.threshold_method,
|
|
704
|
+
"threshold_min": channel.threshold_min,
|
|
705
|
+
"threshold_max": channel.threshold_max,
|
|
706
|
+
}
|
|
707
|
+
for channel in channels
|
|
708
|
+
]
|
|
709
|
+
}
|
|
710
|
+
output_path = temp_dir / "marker_thresholds.json"
|
|
711
|
+
with output_path.open("w", encoding="utf-8") as handle:
|
|
712
|
+
json.dump(payload, handle, indent=2)
|
|
713
|
+
return output_path
|
|
714
|
+
|
|
715
|
+
|
|
716
|
+
def _add_reference_columns(
|
|
717
|
+
rows: list[dict],
|
|
718
|
+
labels: np.ndarray,
|
|
719
|
+
label_ids: np.ndarray,
|
|
720
|
+
file_path: str | None,
|
|
721
|
+
segmentation_type: str,
|
|
722
|
+
) -> list[str]:
|
|
723
|
+
"""Add reference columns to marker export rows.
|
|
724
|
+
|
|
725
|
+
Parameters
|
|
726
|
+
----------
|
|
727
|
+
rows : list of dict
|
|
728
|
+
Output row dictionaries to update in-place.
|
|
729
|
+
labels : numpy.ndarray
|
|
730
|
+
Label image with integer ids.
|
|
731
|
+
label_ids : numpy.ndarray
|
|
732
|
+
Label ids corresponding to the output rows.
|
|
733
|
+
file_path : str or None
|
|
734
|
+
Original file path from metadata.
|
|
735
|
+
segmentation_type : str
|
|
736
|
+
Type of segmentation ("nuclear" or "cytoplasmic").
|
|
737
|
+
|
|
738
|
+
Returns
|
|
739
|
+
-------
|
|
740
|
+
list of str
|
|
741
|
+
List of column names added.
|
|
742
|
+
"""
|
|
743
|
+
column_names: list[str] = []
|
|
744
|
+
|
|
745
|
+
# Add file path column
|
|
746
|
+
if file_path:
|
|
747
|
+
for row in rows:
|
|
748
|
+
row["file_path"] = str(file_path)
|
|
749
|
+
column_names.append("file_path")
|
|
750
|
+
|
|
751
|
+
# Add segmentation type column
|
|
752
|
+
for row in rows:
|
|
753
|
+
row["segmentation_type"] = segmentation_type
|
|
754
|
+
column_names.append("segmentation_type")
|
|
755
|
+
|
|
756
|
+
return column_names
|
|
757
|
+
|
|
758
|
+
|
|
759
|
+
def _build_cross_segmentation_map(
|
|
760
|
+
all_segmentations: dict[str, tuple[np.ndarray, np.ndarray]],
|
|
761
|
+
) -> dict[tuple[str, int], list[tuple[str, int]]]:
|
|
762
|
+
"""Build a mapping of label overlaps across segmentations.
|
|
763
|
+
|
|
764
|
+
Parameters
|
|
765
|
+
----------
|
|
766
|
+
all_segmentations : dict
|
|
767
|
+
Mapping from segmentation name to (labels, label_ids) tuple.
|
|
768
|
+
|
|
769
|
+
Returns
|
|
770
|
+
-------
|
|
771
|
+
dict
|
|
772
|
+
Mapping from (seg_name, label_id) to list of overlapping
|
|
773
|
+
(other_seg_name, overlapping_label_id) tuples.
|
|
774
|
+
|
|
775
|
+
Notes
|
|
776
|
+
-----
|
|
777
|
+
This function identifies which labels from different segmentations
|
|
778
|
+
overlap spatially, enabling cross-referencing between tables.
|
|
779
|
+
"""
|
|
780
|
+
cross_map: dict[tuple[str, int], list[tuple[str, int]]] = {}
|
|
781
|
+
|
|
782
|
+
seg_names = list(all_segmentations.keys())
|
|
783
|
+
for i, seg1_name in enumerate(seg_names):
|
|
784
|
+
labels1, label_ids1 = all_segmentations[seg1_name]
|
|
785
|
+
for label_id1 in label_ids1:
|
|
786
|
+
cross_map[(seg1_name, int(label_id1))] = []
|
|
787
|
+
# Check overlaps with all other segmentations
|
|
788
|
+
for seg2_name in seg_names[i + 1 :]:
|
|
789
|
+
labels2, _label_ids2 = all_segmentations[seg2_name]
|
|
790
|
+
# Find which labels in seg2 overlap with label_id1
|
|
791
|
+
mask1 = labels1 == label_id1
|
|
792
|
+
overlapping_labels2 = np.unique(labels2[mask1])
|
|
793
|
+
overlapping_labels2 = overlapping_labels2[overlapping_labels2 > 0]
|
|
794
|
+
for label_id2 in overlapping_labels2:
|
|
795
|
+
cross_map[(seg1_name, int(label_id1))].append(
|
|
796
|
+
(seg2_name, int(label_id2)),
|
|
797
|
+
)
|
|
798
|
+
|
|
799
|
+
return cross_map
|
|
800
|
+
|
|
801
|
+
|
|
802
|
+
def _add_cross_reference_column(
|
|
803
|
+
rows: list[dict],
|
|
804
|
+
segmentation_name: str,
|
|
805
|
+
label_ids: np.ndarray,
|
|
806
|
+
cross_map: dict,
|
|
807
|
+
) -> str:
|
|
808
|
+
"""Add a cross-reference column to rows for multi-segmentation overlaps.
|
|
809
|
+
|
|
810
|
+
Parameters
|
|
811
|
+
----------
|
|
812
|
+
rows : list of dict
|
|
813
|
+
Output row dictionaries to update in-place.
|
|
814
|
+
segmentation_name : str
|
|
815
|
+
Name of this segmentation.
|
|
816
|
+
label_ids : numpy.ndarray
|
|
817
|
+
Label ids corresponding to the output rows.
|
|
818
|
+
cross_map : dict
|
|
819
|
+
Cross-segmentation overlap mapping from _build_cross_segmentation_map.
|
|
820
|
+
|
|
821
|
+
Returns
|
|
822
|
+
-------
|
|
823
|
+
str
|
|
824
|
+
Column name added.
|
|
825
|
+
"""
|
|
826
|
+
for row, label_id in zip(rows, label_ids, strict=True):
|
|
827
|
+
overlaps = cross_map.get((segmentation_name, int(label_id)), [])
|
|
828
|
+
if overlaps:
|
|
829
|
+
overlap_str = ";".join(
|
|
830
|
+
[f"{seg}_{lid}" for seg, lid in overlaps],
|
|
831
|
+
)
|
|
832
|
+
row["overlaps_with"] = overlap_str
|
|
833
|
+
else:
|
|
834
|
+
row["overlaps_with"] = ""
|
|
835
|
+
|
|
836
|
+
return "overlaps_with"
|
|
837
|
+
|
|
838
|
+
|
|
839
|
+
|
|
840
|
+
|
|
841
|
+
def _write_table(
|
|
842
|
+
path: Path, header: list[str], rows: list[dict[str, float]], fmt: str
|
|
843
|
+
) -> None:
|
|
844
|
+
"""Write rows to disk as CSV or XLSX.
|
|
845
|
+
|
|
846
|
+
Parameters
|
|
847
|
+
----------
|
|
848
|
+
path : pathlib.Path
|
|
849
|
+
Destination file path.
|
|
850
|
+
header : list of str
|
|
851
|
+
Column names for the output table.
|
|
852
|
+
rows : list of dict
|
|
853
|
+
Table rows keyed by column name.
|
|
854
|
+
fmt : str
|
|
855
|
+
Output format (``"csv"`` or ``"xlsx"``).
|
|
856
|
+
"""
|
|
857
|
+
if fmt == "csv":
|
|
858
|
+
with path.open("w", newline="", encoding="utf-8") as handle:
|
|
859
|
+
writer = csv.DictWriter(handle, fieldnames=header)
|
|
860
|
+
writer.writeheader()
|
|
861
|
+
writer.writerows(rows)
|
|
862
|
+
return
|
|
863
|
+
|
|
864
|
+
if fmt == "xlsx":
|
|
865
|
+
try:
|
|
866
|
+
import openpyxl
|
|
867
|
+
except ImportError as exc: # pragma: no cover
|
|
868
|
+
raise RuntimeError(
|
|
869
|
+
"openpyxl is required for xlsx export"
|
|
870
|
+
) from exc
|
|
871
|
+
workbook = openpyxl.Workbook()
|
|
872
|
+
sheet = workbook.active
|
|
873
|
+
sheet.append(header)
|
|
874
|
+
for row in rows:
|
|
875
|
+
sheet.append([row.get(column) for column in header])
|
|
876
|
+
workbook.save(path)
|
|
877
|
+
return
|
|
878
|
+
|
|
879
|
+
raise ValueError(f"Unsupported export format: {fmt}")
|