ngio 0.3.5__py3-none-any.whl → 0.4.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ngio/__init__.py +6 -0
- ngio/common/__init__.py +50 -48
- ngio/common/_array_io_pipes.py +554 -0
- ngio/common/_array_io_utils.py +508 -0
- ngio/common/_dimensions.py +63 -27
- ngio/common/_masking_roi.py +38 -10
- ngio/common/_pyramid.py +9 -7
- ngio/common/_roi.py +583 -72
- ngio/common/_synt_images_utils.py +101 -0
- ngio/common/_zoom.py +17 -12
- ngio/common/transforms/__init__.py +5 -0
- ngio/common/transforms/_label.py +12 -0
- ngio/common/transforms/_zoom.py +109 -0
- ngio/experimental/__init__.py +5 -0
- ngio/experimental/iterators/__init__.py +17 -0
- ngio/experimental/iterators/_abstract_iterator.py +170 -0
- ngio/experimental/iterators/_feature.py +151 -0
- ngio/experimental/iterators/_image_processing.py +169 -0
- ngio/experimental/iterators/_rois_utils.py +127 -0
- ngio/experimental/iterators/_segmentation.py +282 -0
- ngio/hcs/_plate.py +41 -36
- ngio/images/__init__.py +22 -1
- ngio/images/_abstract_image.py +247 -117
- ngio/images/_create.py +15 -15
- ngio/images/_create_synt_container.py +128 -0
- ngio/images/_image.py +425 -62
- ngio/images/_label.py +33 -30
- ngio/images/_masked_image.py +396 -122
- ngio/images/_ome_zarr_container.py +203 -66
- ngio/{common → images}/_table_ops.py +41 -41
- ngio/ome_zarr_meta/ngio_specs/__init__.py +2 -8
- ngio/ome_zarr_meta/ngio_specs/_axes.py +151 -128
- ngio/ome_zarr_meta/ngio_specs/_channels.py +55 -18
- ngio/ome_zarr_meta/ngio_specs/_dataset.py +7 -7
- ngio/ome_zarr_meta/ngio_specs/_ngio_hcs.py +3 -3
- ngio/ome_zarr_meta/ngio_specs/_ngio_image.py +11 -68
- ngio/ome_zarr_meta/v04/_v04_spec_utils.py +1 -1
- ngio/resources/20200812-CardiomyocyteDifferentiation14-Cycle1_B03/mask.png +0 -0
- ngio/resources/20200812-CardiomyocyteDifferentiation14-Cycle1_B03/nuclei.png +0 -0
- ngio/resources/20200812-CardiomyocyteDifferentiation14-Cycle1_B03/raw.jpg +0 -0
- ngio/resources/__init__.py +54 -0
- ngio/resources/resource_model.py +35 -0
- ngio/tables/backends/_abstract_backend.py +5 -6
- ngio/tables/backends/_anndata.py +1 -1
- ngio/tables/backends/_anndata_utils.py +3 -3
- ngio/tables/backends/_non_zarr_backends.py +1 -1
- ngio/tables/backends/_table_backends.py +0 -1
- ngio/tables/backends/_utils.py +3 -3
- ngio/tables/v1/_roi_table.py +156 -69
- ngio/utils/__init__.py +2 -3
- ngio/utils/_logger.py +19 -0
- ngio/utils/_zarr_utils.py +1 -5
- {ngio-0.3.5.dist-info → ngio-0.4.0a2.dist-info}/METADATA +3 -1
- ngio-0.4.0a2.dist-info/RECORD +76 -0
- ngio/common/_array_pipe.py +0 -288
- ngio/common/_axes_transforms.py +0 -64
- ngio/common/_common_types.py +0 -5
- ngio/common/_slicer.py +0 -96
- ngio-0.3.5.dist-info/RECORD +0 -61
- {ngio-0.3.5.dist-info → ngio-0.4.0a2.dist-info}/WHEEL +0 -0
- {ngio-0.3.5.dist-info → ngio-0.4.0a2.dist-info}/licenses/LICENSE +0 -0
ngio/images/_image.py
CHANGED
|
@@ -1,12 +1,21 @@
|
|
|
1
1
|
"""Generic class to handle Image-like data in a OME-NGFF file."""
|
|
2
2
|
|
|
3
|
-
from collections.abc import
|
|
3
|
+
from collections.abc import Sequence
|
|
4
4
|
from typing import Literal
|
|
5
5
|
|
|
6
6
|
import dask.array as da
|
|
7
|
-
|
|
8
|
-
from
|
|
9
|
-
|
|
7
|
+
import numpy as np
|
|
8
|
+
from pydantic import BaseModel, model_validator
|
|
9
|
+
|
|
10
|
+
from ngio.common import (
|
|
11
|
+
ArrayLike,
|
|
12
|
+
Dimensions,
|
|
13
|
+
Roi,
|
|
14
|
+
RoiPixels,
|
|
15
|
+
SlicingInputType,
|
|
16
|
+
TransformProtocol,
|
|
17
|
+
)
|
|
18
|
+
from ngio.images._abstract_image import AbstractImage
|
|
10
19
|
from ngio.images._create import create_empty_image_container
|
|
11
20
|
from ngio.ome_zarr_meta import (
|
|
12
21
|
ImageMetaHandler,
|
|
@@ -30,18 +39,55 @@ from ngio.utils import (
|
|
|
30
39
|
)
|
|
31
40
|
|
|
32
41
|
|
|
42
|
+
class ChannelSelectionModel(BaseModel):
|
|
43
|
+
"""Model for channel selection.
|
|
44
|
+
|
|
45
|
+
This model is used to select a channel by label, wavelength ID, or index.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
identifier (str): Unique identifier for the channel.
|
|
49
|
+
This can be a channel label, wavelength ID, or index.
|
|
50
|
+
mode (Literal["label", "wavelength_id", "index"]): Specifies how to
|
|
51
|
+
interpret the identifier. Can be "label", "wavelength_id", or
|
|
52
|
+
"index" (must be an integer).
|
|
53
|
+
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
mode: Literal["label", "wavelength_id", "index"] = "label"
|
|
57
|
+
identifier: str
|
|
58
|
+
|
|
59
|
+
@model_validator(mode="after")
|
|
60
|
+
def check_channel_selection(self):
|
|
61
|
+
if self.mode == "index":
|
|
62
|
+
try:
|
|
63
|
+
int(self.identifier)
|
|
64
|
+
except ValueError as e:
|
|
65
|
+
raise ValueError(
|
|
66
|
+
"Identifier must be an integer when mode is 'index'"
|
|
67
|
+
) from e
|
|
68
|
+
return self
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
ChannelSlicingInputType = (
|
|
72
|
+
None
|
|
73
|
+
| int
|
|
74
|
+
| str
|
|
75
|
+
| ChannelSelectionModel
|
|
76
|
+
| Sequence[str | ChannelSelectionModel | int]
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
|
|
33
80
|
def _check_channel_meta(meta: NgioImageMeta, dimension: Dimensions) -> ChannelsMeta:
|
|
34
81
|
"""Check the channel metadata."""
|
|
35
|
-
c_dim = dimension.get("c",
|
|
36
|
-
c_dim = 1 if c_dim is None else c_dim
|
|
82
|
+
c_dim = dimension.get("c", default=1)
|
|
37
83
|
|
|
38
84
|
if meta.channels_meta is None:
|
|
39
85
|
return ChannelsMeta.default_init(labels=c_dim)
|
|
40
86
|
|
|
41
|
-
if len(meta.channels) != c_dim:
|
|
87
|
+
if len(meta.channels_meta.channels) != c_dim:
|
|
42
88
|
raise NgioValidationError(
|
|
43
89
|
"The number of channels does not match the image. "
|
|
44
|
-
f"Expected {len(meta.channels)} channels, got {c_dim}."
|
|
90
|
+
f"Expected {len(meta.channels_meta.channels)} channels, got {c_dim}."
|
|
45
91
|
)
|
|
46
92
|
|
|
47
93
|
return meta.channels_meta
|
|
@@ -72,33 +118,268 @@ class Image(AbstractImage[ImageMetaHandler]):
|
|
|
72
118
|
super().__init__(
|
|
73
119
|
group_handler=group_handler, path=path, meta_handler=meta_handler
|
|
74
120
|
)
|
|
75
|
-
self._channels_meta = _check_channel_meta(self.meta, self.dimensions)
|
|
76
121
|
|
|
77
122
|
@property
|
|
78
123
|
def meta(self) -> NgioImageMeta:
|
|
79
124
|
"""Return the metadata."""
|
|
80
125
|
return self._meta_handler.meta
|
|
81
126
|
|
|
127
|
+
@property
|
|
128
|
+
def channels_meta(self) -> ChannelsMeta:
|
|
129
|
+
"""Return the channels metadata."""
|
|
130
|
+
return _check_channel_meta(self.meta, self.dimensions)
|
|
131
|
+
|
|
82
132
|
@property
|
|
83
133
|
def channel_labels(self) -> list[str]:
|
|
84
134
|
"""Return the channels of the image."""
|
|
85
|
-
channel_labels
|
|
86
|
-
for c in self._channels_meta.channels:
|
|
87
|
-
channel_labels.append(c.label)
|
|
88
|
-
return channel_labels
|
|
135
|
+
return self.channels_meta.channel_labels
|
|
89
136
|
|
|
90
137
|
@property
|
|
91
138
|
def wavelength_ids(self) -> list[str | None]:
|
|
92
139
|
"""Return the list of wavelength of the image."""
|
|
93
|
-
|
|
94
|
-
for c in self._channels_meta.channels:
|
|
95
|
-
wavelength_ids.append(c.wavelength_id)
|
|
96
|
-
return wavelength_ids
|
|
140
|
+
return self.channels_meta.channel_wavelength_ids
|
|
97
141
|
|
|
98
142
|
@property
|
|
99
143
|
def num_channels(self) -> int:
|
|
100
144
|
"""Return the number of channels."""
|
|
101
|
-
return len(self.
|
|
145
|
+
return len(self.channel_labels)
|
|
146
|
+
|
|
147
|
+
def get_channel_idx(
|
|
148
|
+
self, channel_label: str | None = None, wavelength_id: str | None = None
|
|
149
|
+
) -> int:
|
|
150
|
+
"""Get the index of a channel by its label or wavelength ID."""
|
|
151
|
+
return self.channels_meta.get_channel_idx(
|
|
152
|
+
channel_label=channel_label, wavelength_id=wavelength_id
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
def get_as_numpy(
|
|
156
|
+
self,
|
|
157
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
158
|
+
axes_order: Sequence[str] | None = None,
|
|
159
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
160
|
+
**slicing_kwargs: slice | int | Sequence[int] | None,
|
|
161
|
+
) -> np.ndarray:
|
|
162
|
+
"""Get the image as a numpy array.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
channel_selection: Select a specific channel by label.
|
|
166
|
+
If None, all channels are returned.
|
|
167
|
+
Alternatively, you can slice arbitrary channels
|
|
168
|
+
using the slice_kwargs (c=[0, 2]).
|
|
169
|
+
axes_order: The order of the axes to return the array.
|
|
170
|
+
transforms: The transforms to apply to the array.
|
|
171
|
+
**slicing_kwargs: The slices to get the array.
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
The array of the region of interest.
|
|
175
|
+
"""
|
|
176
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
177
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
178
|
+
)
|
|
179
|
+
return self._get_as_numpy(
|
|
180
|
+
axes_order=axes_order, transforms=transforms, **_slicing_kwargs
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
def get_roi_as_numpy(
|
|
184
|
+
self,
|
|
185
|
+
roi: Roi | RoiPixels,
|
|
186
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
187
|
+
axes_order: Sequence[str] | None = None,
|
|
188
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
189
|
+
**slicing_kwargs: SlicingInputType,
|
|
190
|
+
) -> np.ndarray:
|
|
191
|
+
"""Get the image as a numpy array for a region of interest.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
roi: The region of interest to get the array.
|
|
195
|
+
channel_selection: Select a what subset of channels to return.
|
|
196
|
+
If None, all channels are returned.
|
|
197
|
+
axes_order: The order of the axes to return the array.
|
|
198
|
+
transforms: The transforms to apply to the array.
|
|
199
|
+
**slicing_kwargs: The slices to get the array.
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
The array of the region of interest.
|
|
203
|
+
"""
|
|
204
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
205
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
206
|
+
)
|
|
207
|
+
return self._get_roi_as_numpy(
|
|
208
|
+
roi=roi, axes_order=axes_order, transforms=transforms, **_slicing_kwargs
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
def get_as_dask(
|
|
212
|
+
self,
|
|
213
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
214
|
+
axes_order: Sequence[str] | None = None,
|
|
215
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
216
|
+
**slicing_kwargs: SlicingInputType,
|
|
217
|
+
) -> da.Array:
|
|
218
|
+
"""Get the image as a dask array.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
channel_selection: Select a what subset of channels to return.
|
|
222
|
+
If None, all channels are returned.
|
|
223
|
+
axes_order: The order of the axes to return the array.
|
|
224
|
+
transforms: The transforms to apply to the array.
|
|
225
|
+
**slicing_kwargs: The slices to get the array.
|
|
226
|
+
|
|
227
|
+
Returns:
|
|
228
|
+
The dask array of the region of interest.
|
|
229
|
+
"""
|
|
230
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
231
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
232
|
+
)
|
|
233
|
+
return self._get_as_dask(
|
|
234
|
+
axes_order=axes_order, transforms=transforms, **_slicing_kwargs
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
def get_roi_as_dask(
|
|
238
|
+
self,
|
|
239
|
+
roi: Roi | RoiPixels,
|
|
240
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
241
|
+
axes_order: Sequence[str] | None = None,
|
|
242
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
243
|
+
**slicing_kwargs: SlicingInputType,
|
|
244
|
+
) -> da.Array:
|
|
245
|
+
"""Get the image as a dask array for a region of interest.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
roi: The region of interest to get the array.
|
|
249
|
+
channel_selection: Select a what subset of channels to return.
|
|
250
|
+
If None, all channels are returned.
|
|
251
|
+
axes_order: The order of the axes to return the array.
|
|
252
|
+
transforms: The transforms to apply to the array.
|
|
253
|
+
**slicing_kwargs: The slices to get the array.
|
|
254
|
+
|
|
255
|
+
Returns:
|
|
256
|
+
The dask array of the region of interest.
|
|
257
|
+
"""
|
|
258
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
259
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
260
|
+
)
|
|
261
|
+
return self._get_roi_as_dask(
|
|
262
|
+
roi=roi, axes_order=axes_order, transforms=transforms, **_slicing_kwargs
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
def get_array(
|
|
266
|
+
self,
|
|
267
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
268
|
+
axes_order: Sequence[str] | None = None,
|
|
269
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
270
|
+
mode: Literal["numpy", "dask"] = "numpy",
|
|
271
|
+
**slicing_kwargs: SlicingInputType,
|
|
272
|
+
) -> ArrayLike:
|
|
273
|
+
"""Get the image as a zarr array.
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
channel_selection: Select a what subset of channels to return.
|
|
277
|
+
If None, all channels are returned.
|
|
278
|
+
axes_order: The order of the axes to return the array.
|
|
279
|
+
transforms: The transforms to apply to the array.
|
|
280
|
+
mode: The object type to return.
|
|
281
|
+
Can be "dask", "numpy".
|
|
282
|
+
**slicing_kwargs: The slices to get the array.
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
The zarr array of the region of interest.
|
|
286
|
+
"""
|
|
287
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
288
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
289
|
+
)
|
|
290
|
+
return self._get_array(
|
|
291
|
+
axes_order=axes_order, mode=mode, transforms=transforms, **_slicing_kwargs
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
def get_roi(
|
|
295
|
+
self,
|
|
296
|
+
roi: Roi | RoiPixels,
|
|
297
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
298
|
+
axes_order: Sequence[str] | None = None,
|
|
299
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
300
|
+
mode: Literal["numpy", "dask"] = "numpy",
|
|
301
|
+
**slicing_kwargs: SlicingInputType,
|
|
302
|
+
) -> ArrayLike:
|
|
303
|
+
"""Get the image as a zarr array for a region of interest.
|
|
304
|
+
|
|
305
|
+
Args:
|
|
306
|
+
roi: The region of interest to get the array.
|
|
307
|
+
channel_selection: Select a what subset of channels to return.
|
|
308
|
+
If None, all channels are returned.
|
|
309
|
+
axes_order: The order of the axes to return the array.
|
|
310
|
+
transforms: The transforms to apply to the array.
|
|
311
|
+
mode: The object type to return.
|
|
312
|
+
Can be "dask", "numpy".
|
|
313
|
+
**slicing_kwargs: The slices to get the array.
|
|
314
|
+
|
|
315
|
+
Returns:
|
|
316
|
+
The zarr array of the region of interest.
|
|
317
|
+
"""
|
|
318
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
319
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
320
|
+
)
|
|
321
|
+
return self._get_roi(
|
|
322
|
+
roi=roi,
|
|
323
|
+
axes_order=axes_order,
|
|
324
|
+
mode=mode,
|
|
325
|
+
transforms=transforms,
|
|
326
|
+
**_slicing_kwargs,
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
def set_array(
|
|
330
|
+
self,
|
|
331
|
+
patch: ArrayLike,
|
|
332
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
333
|
+
axes_order: Sequence[str] | None = None,
|
|
334
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
335
|
+
**slicing_kwargs: SlicingInputType,
|
|
336
|
+
) -> None:
|
|
337
|
+
"""Set the image array.
|
|
338
|
+
|
|
339
|
+
Args:
|
|
340
|
+
patch: The array to set.
|
|
341
|
+
channel_selection: Select a what subset of channels to return.
|
|
342
|
+
If None, all channels are set.
|
|
343
|
+
axes_order: The order of the axes to set the array.
|
|
344
|
+
transforms: The transforms to apply to the array.
|
|
345
|
+
**slicing_kwargs: The slices to set the array.
|
|
346
|
+
"""
|
|
347
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
348
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
349
|
+
)
|
|
350
|
+
self._set_array(
|
|
351
|
+
patch=patch, axes_order=axes_order, transforms=transforms, **_slicing_kwargs
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
def set_roi(
|
|
355
|
+
self,
|
|
356
|
+
roi: Roi | RoiPixels,
|
|
357
|
+
patch: ArrayLike,
|
|
358
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
359
|
+
axes_order: Sequence[str] | None = None,
|
|
360
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
361
|
+
**slicing_kwargs: SlicingInputType,
|
|
362
|
+
) -> None:
|
|
363
|
+
"""Set the image array for a region of interest.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
roi: The region of interest to set the array.
|
|
367
|
+
patch: The array to set.
|
|
368
|
+
channel_selection: Select a what subset of channels to return.
|
|
369
|
+
axes_order: The order of the axes to set the array.
|
|
370
|
+
transforms: The transforms to apply to the array.
|
|
371
|
+
**slicing_kwargs: The slices to set the array.
|
|
372
|
+
"""
|
|
373
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
374
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
375
|
+
)
|
|
376
|
+
self._set_roi(
|
|
377
|
+
roi=roi,
|
|
378
|
+
patch=patch,
|
|
379
|
+
axes_order=axes_order,
|
|
380
|
+
transforms=transforms,
|
|
381
|
+
**_slicing_kwargs,
|
|
382
|
+
)
|
|
102
383
|
|
|
103
384
|
def consolidate(
|
|
104
385
|
self,
|
|
@@ -106,7 +387,7 @@ class Image(AbstractImage[ImageMetaHandler]):
|
|
|
106
387
|
mode: Literal["dask", "numpy", "coarsen"] = "dask",
|
|
107
388
|
) -> None:
|
|
108
389
|
"""Consolidate the label on disk."""
|
|
109
|
-
|
|
390
|
+
self._consolidate(order=order, mode=mode)
|
|
110
391
|
|
|
111
392
|
|
|
112
393
|
class ImagesContainer:
|
|
@@ -150,35 +431,55 @@ class ImagesContainer:
|
|
|
150
431
|
image = self.get()
|
|
151
432
|
return image.wavelength_ids
|
|
152
433
|
|
|
434
|
+
def get_channel_idx(
|
|
435
|
+
self, channel_label: str | None = None, wavelength_id: str | None = None
|
|
436
|
+
) -> int:
|
|
437
|
+
"""Get the index of a channel by label or wavelength ID.
|
|
438
|
+
|
|
439
|
+
Args:
|
|
440
|
+
channel_label (str | None): The label of the channel.
|
|
441
|
+
If None a wavelength ID must be provided.
|
|
442
|
+
wavelength_id (str | None): The wavelength ID of the channel.
|
|
443
|
+
If None a channel label must be provided.
|
|
444
|
+
|
|
445
|
+
Returns:
|
|
446
|
+
int: The index of the channel.
|
|
447
|
+
|
|
448
|
+
"""
|
|
449
|
+
image = self.get()
|
|
450
|
+
return image.get_channel_idx(
|
|
451
|
+
channel_label=channel_label, wavelength_id=wavelength_id
|
|
452
|
+
)
|
|
453
|
+
|
|
153
454
|
def set_channel_meta(
|
|
154
455
|
self,
|
|
155
|
-
labels:
|
|
156
|
-
wavelength_id:
|
|
157
|
-
start:
|
|
158
|
-
end:
|
|
456
|
+
labels: Sequence[str | None] | int | None = None,
|
|
457
|
+
wavelength_id: Sequence[str | None] | None = None,
|
|
458
|
+
start: Sequence[float | None] | None = None,
|
|
459
|
+
end: Sequence[float | None] | None = None,
|
|
159
460
|
percentiles: tuple[float, float] | None = None,
|
|
160
|
-
colors:
|
|
161
|
-
active:
|
|
461
|
+
colors: Sequence[str | None] | None = None,
|
|
462
|
+
active: Sequence[bool | None] | None = None,
|
|
162
463
|
**omero_kwargs: dict,
|
|
163
464
|
) -> None:
|
|
164
465
|
"""Create a ChannelsMeta object with the default unit.
|
|
165
466
|
|
|
166
467
|
Args:
|
|
167
|
-
labels(
|
|
468
|
+
labels(Sequence[str | None] | int): The list of channels names
|
|
168
469
|
in the image. If an integer is provided, the channels will
|
|
169
470
|
be named "channel_i".
|
|
170
|
-
wavelength_id(
|
|
471
|
+
wavelength_id(Sequence[str | None]): The wavelength ID of the channel.
|
|
171
472
|
If None, the wavelength ID will be the same as the channel name.
|
|
172
|
-
start(
|
|
473
|
+
start(Sequence[float | None]): The start value for each channel.
|
|
173
474
|
If None, the start value will be computed from the image.
|
|
174
|
-
end(
|
|
475
|
+
end(Sequence[float | None]): The end value for each channel.
|
|
175
476
|
If None, the end value will be computed from the image.
|
|
176
477
|
percentiles(tuple[float, float] | None): The start and end
|
|
177
478
|
percentiles for each channel. If None, the percentiles will
|
|
178
479
|
not be computed.
|
|
179
|
-
colors(
|
|
480
|
+
colors(Sequence[str | None]): The list of colors for the
|
|
180
481
|
channels. If None, the colors will be random.
|
|
181
|
-
active (
|
|
482
|
+
active (Sequence[bool | None]): Whether the channel should
|
|
182
483
|
be shown by default.
|
|
183
484
|
omero_kwargs(dict): Extra fields to store in the omero attributes.
|
|
184
485
|
"""
|
|
@@ -293,12 +594,12 @@ class ImagesContainer:
|
|
|
293
594
|
self,
|
|
294
595
|
store: StoreOrGroup,
|
|
295
596
|
ref_path: str | None = None,
|
|
296
|
-
shape:
|
|
297
|
-
labels:
|
|
597
|
+
shape: Sequence[int] | None = None,
|
|
598
|
+
labels: Sequence[str] | None = None,
|
|
298
599
|
pixel_size: PixelSize | None = None,
|
|
299
|
-
axes_names:
|
|
600
|
+
axes_names: Sequence[str] | None = None,
|
|
300
601
|
name: str | None = None,
|
|
301
|
-
chunks:
|
|
602
|
+
chunks: Sequence[int] | None = None,
|
|
302
603
|
dtype: str | None = None,
|
|
303
604
|
overwrite: bool = False,
|
|
304
605
|
) -> "ImagesContainer":
|
|
@@ -308,12 +609,12 @@ class ImagesContainer:
|
|
|
308
609
|
store (StoreOrGroup): The Zarr store or group to create the image in.
|
|
309
610
|
ref_path (str | None): The path to the reference image in
|
|
310
611
|
the image container.
|
|
311
|
-
shape (
|
|
312
|
-
labels (
|
|
612
|
+
shape (Sequence[int] | None): The shape of the new image.
|
|
613
|
+
labels (Sequence[str] | None): The labels of the new image.
|
|
313
614
|
pixel_size (PixelSize | None): The pixel size of the new image.
|
|
314
|
-
axes_names (
|
|
615
|
+
axes_names (Sequence[str] | None): The axes names of the new image.
|
|
315
616
|
name (str | None): The name of the new image.
|
|
316
|
-
chunks (
|
|
617
|
+
chunks (Sequence[int] | None): The chunk shape of the new image.
|
|
317
618
|
dtype (str | None): The data type of the new image.
|
|
318
619
|
overwrite (bool): Whether to overwrite an existing image.
|
|
319
620
|
|
|
@@ -378,11 +679,10 @@ def compute_image_percentile(
|
|
|
378
679
|
starts, ends = [], []
|
|
379
680
|
for c in range(image.num_channels):
|
|
380
681
|
if image.num_channels == 1:
|
|
381
|
-
data = image.
|
|
682
|
+
data = image.get_as_dask()
|
|
382
683
|
else:
|
|
383
|
-
data = image.
|
|
684
|
+
data = image.get_as_dask(c=c)
|
|
384
685
|
|
|
385
|
-
assert isinstance(data, da.Array), "Data must be a Dask array."
|
|
386
686
|
data = da.ravel(data)
|
|
387
687
|
# remove all the zeros
|
|
388
688
|
mask = data > 1e-16
|
|
@@ -396,7 +696,7 @@ def compute_image_percentile(
|
|
|
396
696
|
# compute the percentiles
|
|
397
697
|
_s_perc, _e_perc = da.percentile(
|
|
398
698
|
data, [start_percentile, end_percentile], method="nearest"
|
|
399
|
-
).compute() # type: ignore
|
|
699
|
+
).compute() # type: ignore (return type is a tuple of floats)
|
|
400
700
|
|
|
401
701
|
starts.append(float(_s_perc))
|
|
402
702
|
ends.append(float(_e_perc))
|
|
@@ -407,12 +707,12 @@ def derive_image_container(
|
|
|
407
707
|
image_container: ImagesContainer,
|
|
408
708
|
store: StoreOrGroup,
|
|
409
709
|
ref_path: str | None = None,
|
|
410
|
-
shape:
|
|
411
|
-
labels:
|
|
710
|
+
shape: Sequence[int] | None = None,
|
|
711
|
+
labels: Sequence[str] | None = None,
|
|
412
712
|
pixel_size: PixelSize | None = None,
|
|
413
|
-
axes_names:
|
|
713
|
+
axes_names: Sequence[str] | None = None,
|
|
414
714
|
name: str | None = None,
|
|
415
|
-
chunks:
|
|
715
|
+
chunks: Sequence[int] | None = None,
|
|
416
716
|
dtype: str | None = None,
|
|
417
717
|
overwrite: bool = False,
|
|
418
718
|
) -> ImagesContainer:
|
|
@@ -422,12 +722,12 @@ def derive_image_container(
|
|
|
422
722
|
image_container (ImagesContainer): The image container to derive the new image.
|
|
423
723
|
store (StoreOrGroup): The Zarr store or group to create the image in.
|
|
424
724
|
ref_path (str | None): The path to the reference image in the image container.
|
|
425
|
-
shape (
|
|
426
|
-
labels (
|
|
725
|
+
shape (Sequence[int] | None): The shape of the new image.
|
|
726
|
+
labels (Sequence[str] | None): The labels of the new image.
|
|
427
727
|
pixel_size (PixelSize | None): The pixel size of the new image.
|
|
428
|
-
axes_names (
|
|
728
|
+
axes_names (Sequence[str] | None): The axes names of the new image.
|
|
429
729
|
name (str | None): The name of the new image.
|
|
430
|
-
chunks (
|
|
730
|
+
chunks (Sequence[int] | None): The chunk shape of the new image.
|
|
431
731
|
dtype (str | None): The data type of the new image.
|
|
432
732
|
overwrite (bool): Whether to overwrite an existing image.
|
|
433
733
|
|
|
@@ -449,7 +749,7 @@ def derive_image_container(
|
|
|
449
749
|
pixel_size = ref_image.pixel_size
|
|
450
750
|
|
|
451
751
|
if axes_names is None:
|
|
452
|
-
axes_names = ref_meta.axes_mapper.
|
|
752
|
+
axes_names = ref_meta.axes_mapper.axes_names
|
|
453
753
|
|
|
454
754
|
if len(axes_names) != len(shape):
|
|
455
755
|
raise NgioValidationError(
|
|
@@ -495,16 +795,11 @@ def derive_image_container(
|
|
|
495
795
|
_labels = ref_image.channel_labels
|
|
496
796
|
wavelength_id = ref_image.wavelength_ids
|
|
497
797
|
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
]
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
]
|
|
504
|
-
start = [
|
|
505
|
-
c.channel_visualisation.start for c in ref_image._channels_meta.channels
|
|
506
|
-
]
|
|
507
|
-
end = [c.channel_visualisation.end for c in ref_image._channels_meta.channels]
|
|
798
|
+
channel_meta = ref_image.channels_meta
|
|
799
|
+
colors = [c.channel_visualisation.color for c in channel_meta.channels]
|
|
800
|
+
active = [c.channel_visualisation.active for c in channel_meta.channels]
|
|
801
|
+
start = [c.channel_visualisation.start for c in channel_meta.channels]
|
|
802
|
+
end = [c.channel_visualisation.end for c in channel_meta.channels]
|
|
508
803
|
else:
|
|
509
804
|
_labels = None
|
|
510
805
|
wavelength_id = None
|
|
@@ -530,3 +825,71 @@ def derive_image_container(
|
|
|
530
825
|
end=end,
|
|
531
826
|
)
|
|
532
827
|
return image_container
|
|
828
|
+
|
|
829
|
+
|
|
830
|
+
def _parse_str_or_model(
|
|
831
|
+
image: Image, channel_selection: int | str | ChannelSelectionModel
|
|
832
|
+
) -> int:
|
|
833
|
+
"""Parse a string or ChannelSelectionModel to an integer channel index."""
|
|
834
|
+
if isinstance(channel_selection, int):
|
|
835
|
+
if channel_selection < 0:
|
|
836
|
+
raise NgioValidationError("Channel index must be a non-negative integer.")
|
|
837
|
+
if channel_selection >= image.num_channels:
|
|
838
|
+
raise NgioValidationError(
|
|
839
|
+
"Channel index must be less than the number "
|
|
840
|
+
f"of channels ({image.num_channels})."
|
|
841
|
+
)
|
|
842
|
+
return channel_selection
|
|
843
|
+
elif isinstance(channel_selection, str):
|
|
844
|
+
return image.get_channel_idx(channel_label=channel_selection)
|
|
845
|
+
elif isinstance(channel_selection, ChannelSelectionModel):
|
|
846
|
+
if channel_selection.mode == "label":
|
|
847
|
+
return image.get_channel_idx(
|
|
848
|
+
channel_label=str(channel_selection.identifier)
|
|
849
|
+
)
|
|
850
|
+
elif channel_selection.mode == "wavelength_id":
|
|
851
|
+
return image.get_channel_idx(
|
|
852
|
+
channel_label=str(channel_selection.identifier)
|
|
853
|
+
)
|
|
854
|
+
elif channel_selection.mode == "index":
|
|
855
|
+
return int(channel_selection.identifier)
|
|
856
|
+
raise NgioValidationError(
|
|
857
|
+
"Invalid channel selection type. "
|
|
858
|
+
f"{channel_selection} is of type {type(channel_selection)} ",
|
|
859
|
+
"supported types are str, ChannelSelectionModel, and int.",
|
|
860
|
+
)
|
|
861
|
+
|
|
862
|
+
|
|
863
|
+
def _parse_channel_selection(
|
|
864
|
+
image: Image, channel_selection: ChannelSlicingInputType
|
|
865
|
+
) -> dict[str, SlicingInputType]:
|
|
866
|
+
"""Parse the channel selection input into a list of channel indices."""
|
|
867
|
+
if channel_selection is None:
|
|
868
|
+
return {}
|
|
869
|
+
if isinstance(channel_selection, int | str | ChannelSelectionModel):
|
|
870
|
+
channel_index = _parse_str_or_model(image, channel_selection)
|
|
871
|
+
return {"c": channel_index}
|
|
872
|
+
elif isinstance(channel_selection, Sequence):
|
|
873
|
+
_sequence = [_parse_str_or_model(image, cs) for cs in channel_selection]
|
|
874
|
+
return {"c": _sequence}
|
|
875
|
+
raise NgioValidationError(
|
|
876
|
+
f"Invalid channel selection type {type(channel_selection)}. "
|
|
877
|
+
"Supported types are int, str, ChannelSelectionModel, and Sequence."
|
|
878
|
+
)
|
|
879
|
+
|
|
880
|
+
|
|
881
|
+
def add_channel_selection_to_slicing_dict(
|
|
882
|
+
image: Image,
|
|
883
|
+
channel_selection: ChannelSlicingInputType,
|
|
884
|
+
slicing_dict: dict[str, SlicingInputType],
|
|
885
|
+
) -> dict[str, SlicingInputType]:
|
|
886
|
+
"""Add channel selection information to the slicing dictionary."""
|
|
887
|
+
channel_info = _parse_channel_selection(image, channel_selection)
|
|
888
|
+
if "c" in slicing_dict and channel_info:
|
|
889
|
+
raise NgioValidationError(
|
|
890
|
+
"Both channel_selection and 'c' in slicing_kwargs are provided. "
|
|
891
|
+
"Which channel selection should be used is ambiguous. "
|
|
892
|
+
"Please provide only one."
|
|
893
|
+
)
|
|
894
|
+
slicing_dict = slicing_dict | channel_info
|
|
895
|
+
return slicing_dict
|