ngio 0.3.5__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ngio/__init__.py +7 -2
- ngio/common/__init__.py +5 -52
- ngio/common/_dimensions.py +270 -55
- ngio/common/_masking_roi.py +38 -10
- ngio/common/_pyramid.py +51 -30
- ngio/common/_roi.py +269 -82
- ngio/common/_synt_images_utils.py +101 -0
- ngio/common/_zoom.py +49 -19
- ngio/experimental/__init__.py +5 -0
- ngio/experimental/iterators/__init__.py +15 -0
- ngio/experimental/iterators/_abstract_iterator.py +390 -0
- ngio/experimental/iterators/_feature.py +189 -0
- ngio/experimental/iterators/_image_processing.py +130 -0
- ngio/experimental/iterators/_mappers.py +48 -0
- ngio/experimental/iterators/_rois_utils.py +127 -0
- ngio/experimental/iterators/_segmentation.py +235 -0
- ngio/hcs/_plate.py +41 -36
- ngio/images/__init__.py +22 -1
- ngio/images/_abstract_image.py +403 -176
- ngio/images/_create.py +31 -15
- ngio/images/_create_synt_container.py +138 -0
- ngio/images/_image.py +452 -63
- ngio/images/_label.py +56 -30
- ngio/images/_masked_image.py +387 -129
- ngio/images/_ome_zarr_container.py +237 -67
- ngio/{common → images}/_table_ops.py +41 -41
- ngio/io_pipes/__init__.py +75 -0
- ngio/io_pipes/_io_pipes.py +361 -0
- ngio/io_pipes/_io_pipes_masked.py +488 -0
- ngio/io_pipes/_io_pipes_roi.py +152 -0
- ngio/io_pipes/_io_pipes_types.py +56 -0
- ngio/io_pipes/_match_shape.py +376 -0
- ngio/io_pipes/_ops_axes.py +344 -0
- ngio/io_pipes/_ops_slices.py +446 -0
- ngio/io_pipes/_ops_slices_utils.py +196 -0
- ngio/io_pipes/_ops_transforms.py +104 -0
- ngio/io_pipes/_zoom_transform.py +175 -0
- ngio/ome_zarr_meta/__init__.py +4 -2
- ngio/ome_zarr_meta/ngio_specs/__init__.py +4 -10
- ngio/ome_zarr_meta/ngio_specs/_axes.py +186 -175
- ngio/ome_zarr_meta/ngio_specs/_channels.py +55 -18
- ngio/ome_zarr_meta/ngio_specs/_dataset.py +48 -122
- ngio/ome_zarr_meta/ngio_specs/_ngio_hcs.py +3 -3
- ngio/ome_zarr_meta/ngio_specs/_ngio_image.py +38 -87
- ngio/ome_zarr_meta/ngio_specs/_pixel_size.py +17 -1
- ngio/ome_zarr_meta/v04/_v04_spec_utils.py +34 -31
- ngio/resources/20200812-CardiomyocyteDifferentiation14-Cycle1_B03/mask.png +0 -0
- ngio/resources/20200812-CardiomyocyteDifferentiation14-Cycle1_B03/nuclei.png +0 -0
- ngio/resources/20200812-CardiomyocyteDifferentiation14-Cycle1_B03/raw.jpg +0 -0
- ngio/resources/__init__.py +55 -0
- ngio/resources/resource_model.py +36 -0
- ngio/tables/backends/_abstract_backend.py +5 -6
- ngio/tables/backends/_anndata.py +1 -1
- ngio/tables/backends/_anndata_utils.py +3 -3
- ngio/tables/backends/_non_zarr_backends.py +1 -1
- ngio/tables/backends/_table_backends.py +0 -1
- ngio/tables/backends/_utils.py +3 -3
- ngio/tables/v1/_roi_table.py +165 -70
- ngio/transforms/__init__.py +5 -0
- ngio/transforms/_zoom.py +19 -0
- ngio/utils/__init__.py +2 -3
- ngio/utils/_datasets.py +5 -0
- ngio/utils/_logger.py +19 -0
- ngio/utils/_zarr_utils.py +6 -6
- {ngio-0.3.5.dist-info → ngio-0.4.0.dist-info}/METADATA +16 -14
- ngio-0.4.0.dist-info/RECORD +85 -0
- ngio/common/_array_pipe.py +0 -288
- ngio/common/_axes_transforms.py +0 -64
- ngio/common/_common_types.py +0 -5
- ngio/common/_slicer.py +0 -96
- ngio-0.3.5.dist-info/RECORD +0 -61
- {ngio-0.3.5.dist-info → ngio-0.4.0.dist-info}/WHEEL +0 -0
- {ngio-0.3.5.dist-info → ngio-0.4.0.dist-info}/licenses/LICENSE +0 -0
ngio/images/_image.py
CHANGED
|
@@ -1,13 +1,25 @@
|
|
|
1
1
|
"""Generic class to handle Image-like data in a OME-NGFF file."""
|
|
2
2
|
|
|
3
|
-
from collections.abc import
|
|
3
|
+
from collections.abc import Sequence
|
|
4
4
|
from typing import Literal
|
|
5
5
|
|
|
6
6
|
import dask.array as da
|
|
7
|
-
|
|
8
|
-
from
|
|
9
|
-
from
|
|
7
|
+
import numpy as np
|
|
8
|
+
from pydantic import BaseModel, model_validator
|
|
9
|
+
from zarr.types import DIMENSION_SEPARATOR
|
|
10
|
+
|
|
11
|
+
from ngio.common import (
|
|
12
|
+
Dimensions,
|
|
13
|
+
InterpolationOrder,
|
|
14
|
+
Roi,
|
|
15
|
+
RoiPixels,
|
|
16
|
+
)
|
|
17
|
+
from ngio.images._abstract_image import AbstractImage
|
|
10
18
|
from ngio.images._create import create_empty_image_container
|
|
19
|
+
from ngio.io_pipes import (
|
|
20
|
+
SlicingInputType,
|
|
21
|
+
TransformProtocol,
|
|
22
|
+
)
|
|
11
23
|
from ngio.ome_zarr_meta import (
|
|
12
24
|
ImageMetaHandler,
|
|
13
25
|
NgioImageMeta,
|
|
@@ -30,18 +42,55 @@ from ngio.utils import (
|
|
|
30
42
|
)
|
|
31
43
|
|
|
32
44
|
|
|
45
|
+
class ChannelSelectionModel(BaseModel):
|
|
46
|
+
"""Model for channel selection.
|
|
47
|
+
|
|
48
|
+
This model is used to select a channel by label, wavelength ID, or index.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
identifier (str): Unique identifier for the channel.
|
|
52
|
+
This can be a channel label, wavelength ID, or index.
|
|
53
|
+
mode (Literal["label", "wavelength_id", "index"]): Specifies how to
|
|
54
|
+
interpret the identifier. Can be "label", "wavelength_id", or
|
|
55
|
+
"index" (must be an integer).
|
|
56
|
+
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
mode: Literal["label", "wavelength_id", "index"] = "label"
|
|
60
|
+
identifier: str
|
|
61
|
+
|
|
62
|
+
@model_validator(mode="after")
|
|
63
|
+
def check_channel_selection(self):
|
|
64
|
+
if self.mode == "index":
|
|
65
|
+
try:
|
|
66
|
+
int(self.identifier)
|
|
67
|
+
except ValueError as e:
|
|
68
|
+
raise ValueError(
|
|
69
|
+
"Identifier must be an integer when mode is 'index'"
|
|
70
|
+
) from e
|
|
71
|
+
return self
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
ChannelSlicingInputType = (
|
|
75
|
+
None
|
|
76
|
+
| int
|
|
77
|
+
| str
|
|
78
|
+
| ChannelSelectionModel
|
|
79
|
+
| Sequence[str | ChannelSelectionModel | int]
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
|
|
33
83
|
def _check_channel_meta(meta: NgioImageMeta, dimension: Dimensions) -> ChannelsMeta:
|
|
34
84
|
"""Check the channel metadata."""
|
|
35
|
-
c_dim = dimension.get("c",
|
|
36
|
-
c_dim = 1 if c_dim is None else c_dim
|
|
85
|
+
c_dim = dimension.get("c", default=1)
|
|
37
86
|
|
|
38
87
|
if meta.channels_meta is None:
|
|
39
88
|
return ChannelsMeta.default_init(labels=c_dim)
|
|
40
89
|
|
|
41
|
-
if len(meta.channels) != c_dim:
|
|
90
|
+
if len(meta.channels_meta.channels) != c_dim:
|
|
42
91
|
raise NgioValidationError(
|
|
43
92
|
"The number of channels does not match the image. "
|
|
44
|
-
f"Expected {len(meta.channels)} channels, got {c_dim}."
|
|
93
|
+
f"Expected {len(meta.channels_meta.channels)} channels, got {c_dim}."
|
|
45
94
|
)
|
|
46
95
|
|
|
47
96
|
return meta.channels_meta
|
|
@@ -72,41 +121,276 @@ class Image(AbstractImage[ImageMetaHandler]):
|
|
|
72
121
|
super().__init__(
|
|
73
122
|
group_handler=group_handler, path=path, meta_handler=meta_handler
|
|
74
123
|
)
|
|
75
|
-
self._channels_meta = _check_channel_meta(self.meta, self.dimensions)
|
|
76
124
|
|
|
77
125
|
@property
|
|
78
126
|
def meta(self) -> NgioImageMeta:
|
|
79
127
|
"""Return the metadata."""
|
|
80
128
|
return self._meta_handler.meta
|
|
81
129
|
|
|
130
|
+
@property
|
|
131
|
+
def channels_meta(self) -> ChannelsMeta:
|
|
132
|
+
"""Return the channels metadata."""
|
|
133
|
+
return _check_channel_meta(self.meta, self.dimensions)
|
|
134
|
+
|
|
82
135
|
@property
|
|
83
136
|
def channel_labels(self) -> list[str]:
|
|
84
137
|
"""Return the channels of the image."""
|
|
85
|
-
channel_labels
|
|
86
|
-
for c in self._channels_meta.channels:
|
|
87
|
-
channel_labels.append(c.label)
|
|
88
|
-
return channel_labels
|
|
138
|
+
return self.channels_meta.channel_labels
|
|
89
139
|
|
|
90
140
|
@property
|
|
91
141
|
def wavelength_ids(self) -> list[str | None]:
|
|
92
142
|
"""Return the list of wavelength of the image."""
|
|
93
|
-
|
|
94
|
-
for c in self._channels_meta.channels:
|
|
95
|
-
wavelength_ids.append(c.wavelength_id)
|
|
96
|
-
return wavelength_ids
|
|
143
|
+
return self.channels_meta.channel_wavelength_ids
|
|
97
144
|
|
|
98
145
|
@property
|
|
99
146
|
def num_channels(self) -> int:
|
|
100
147
|
"""Return the number of channels."""
|
|
101
|
-
return len(self.
|
|
148
|
+
return len(self.channel_labels)
|
|
149
|
+
|
|
150
|
+
def get_channel_idx(
|
|
151
|
+
self, channel_label: str | None = None, wavelength_id: str | None = None
|
|
152
|
+
) -> int:
|
|
153
|
+
"""Get the index of a channel by its label or wavelength ID."""
|
|
154
|
+
return self.channels_meta.get_channel_idx(
|
|
155
|
+
channel_label=channel_label, wavelength_id=wavelength_id
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
def get_as_numpy(
|
|
159
|
+
self,
|
|
160
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
161
|
+
axes_order: Sequence[str] | None = None,
|
|
162
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
163
|
+
**slicing_kwargs: slice | int | Sequence[int] | None,
|
|
164
|
+
) -> np.ndarray:
|
|
165
|
+
"""Get the image as a numpy array.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
channel_selection: Select a specific channel by label.
|
|
169
|
+
If None, all channels are returned.
|
|
170
|
+
Alternatively, you can slice arbitrary channels
|
|
171
|
+
using the slice_kwargs (c=[0, 2]).
|
|
172
|
+
axes_order: The order of the axes to return the array.
|
|
173
|
+
transforms: The transforms to apply to the array.
|
|
174
|
+
**slicing_kwargs: The slices to get the array.
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
The array of the region of interest.
|
|
178
|
+
"""
|
|
179
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
180
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
181
|
+
)
|
|
182
|
+
return self._get_as_numpy(
|
|
183
|
+
axes_order=axes_order, transforms=transforms, **_slicing_kwargs
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
def get_roi_as_numpy(
|
|
187
|
+
self,
|
|
188
|
+
roi: Roi | RoiPixels,
|
|
189
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
190
|
+
axes_order: Sequence[str] | None = None,
|
|
191
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
192
|
+
**slicing_kwargs: SlicingInputType,
|
|
193
|
+
) -> np.ndarray:
|
|
194
|
+
"""Get the image as a numpy array for a region of interest.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
roi: The region of interest to get the array.
|
|
198
|
+
channel_selection: Select a what subset of channels to return.
|
|
199
|
+
If None, all channels are returned.
|
|
200
|
+
axes_order: The order of the axes to return the array.
|
|
201
|
+
transforms: The transforms to apply to the array.
|
|
202
|
+
**slicing_kwargs: The slices to get the array.
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
The array of the region of interest.
|
|
206
|
+
"""
|
|
207
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
208
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
209
|
+
)
|
|
210
|
+
return self._get_roi_as_numpy(
|
|
211
|
+
roi=roi, axes_order=axes_order, transforms=transforms, **_slicing_kwargs
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
def get_as_dask(
|
|
215
|
+
self,
|
|
216
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
217
|
+
axes_order: Sequence[str] | None = None,
|
|
218
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
219
|
+
**slicing_kwargs: SlicingInputType,
|
|
220
|
+
) -> da.Array:
|
|
221
|
+
"""Get the image as a dask array.
|
|
222
|
+
|
|
223
|
+
Args:
|
|
224
|
+
channel_selection: Select a what subset of channels to return.
|
|
225
|
+
If None, all channels are returned.
|
|
226
|
+
axes_order: The order of the axes to return the array.
|
|
227
|
+
transforms: The transforms to apply to the array.
|
|
228
|
+
**slicing_kwargs: The slices to get the array.
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
The dask array of the region of interest.
|
|
232
|
+
"""
|
|
233
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
234
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
235
|
+
)
|
|
236
|
+
return self._get_as_dask(
|
|
237
|
+
axes_order=axes_order, transforms=transforms, **_slicing_kwargs
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
def get_roi_as_dask(
|
|
241
|
+
self,
|
|
242
|
+
roi: Roi | RoiPixels,
|
|
243
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
244
|
+
axes_order: Sequence[str] | None = None,
|
|
245
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
246
|
+
**slicing_kwargs: SlicingInputType,
|
|
247
|
+
) -> da.Array:
|
|
248
|
+
"""Get the image as a dask array for a region of interest.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
roi: The region of interest to get the array.
|
|
252
|
+
channel_selection: Select a what subset of channels to return.
|
|
253
|
+
If None, all channels are returned.
|
|
254
|
+
axes_order: The order of the axes to return the array.
|
|
255
|
+
transforms: The transforms to apply to the array.
|
|
256
|
+
**slicing_kwargs: The slices to get the array.
|
|
257
|
+
|
|
258
|
+
Returns:
|
|
259
|
+
The dask array of the region of interest.
|
|
260
|
+
"""
|
|
261
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
262
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
263
|
+
)
|
|
264
|
+
return self._get_roi_as_dask(
|
|
265
|
+
roi=roi, axes_order=axes_order, transforms=transforms, **_slicing_kwargs
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
def get_array(
|
|
269
|
+
self,
|
|
270
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
271
|
+
axes_order: Sequence[str] | None = None,
|
|
272
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
273
|
+
mode: Literal["numpy", "dask"] = "numpy",
|
|
274
|
+
**slicing_kwargs: SlicingInputType,
|
|
275
|
+
) -> np.ndarray | da.Array:
|
|
276
|
+
"""Get the image as a zarr array.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
channel_selection: Select a what subset of channels to return.
|
|
280
|
+
If None, all channels are returned.
|
|
281
|
+
axes_order: The order of the axes to return the array.
|
|
282
|
+
transforms: The transforms to apply to the array.
|
|
283
|
+
mode: The object type to return.
|
|
284
|
+
Can be "dask", "numpy".
|
|
285
|
+
**slicing_kwargs: The slices to get the array.
|
|
286
|
+
|
|
287
|
+
Returns:
|
|
288
|
+
The zarr array of the region of interest.
|
|
289
|
+
"""
|
|
290
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
291
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
292
|
+
)
|
|
293
|
+
return self._get_array(
|
|
294
|
+
axes_order=axes_order, mode=mode, transforms=transforms, **_slicing_kwargs
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
def get_roi(
|
|
298
|
+
self,
|
|
299
|
+
roi: Roi | RoiPixels,
|
|
300
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
301
|
+
axes_order: Sequence[str] | None = None,
|
|
302
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
303
|
+
mode: Literal["numpy", "dask"] = "numpy",
|
|
304
|
+
**slicing_kwargs: SlicingInputType,
|
|
305
|
+
) -> np.ndarray | da.Array:
|
|
306
|
+
"""Get the image as a zarr array for a region of interest.
|
|
307
|
+
|
|
308
|
+
Args:
|
|
309
|
+
roi: The region of interest to get the array.
|
|
310
|
+
channel_selection: Select a what subset of channels to return.
|
|
311
|
+
If None, all channels are returned.
|
|
312
|
+
axes_order: The order of the axes to return the array.
|
|
313
|
+
transforms: The transforms to apply to the array.
|
|
314
|
+
mode: The object type to return.
|
|
315
|
+
Can be "dask", "numpy".
|
|
316
|
+
**slicing_kwargs: The slices to get the array.
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
The zarr array of the region of interest.
|
|
320
|
+
"""
|
|
321
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
322
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
323
|
+
)
|
|
324
|
+
return self._get_roi(
|
|
325
|
+
roi=roi,
|
|
326
|
+
axes_order=axes_order,
|
|
327
|
+
mode=mode,
|
|
328
|
+
transforms=transforms,
|
|
329
|
+
**_slicing_kwargs,
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
def set_array(
|
|
333
|
+
self,
|
|
334
|
+
patch: np.ndarray | da.Array,
|
|
335
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
336
|
+
axes_order: Sequence[str] | None = None,
|
|
337
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
338
|
+
**slicing_kwargs: SlicingInputType,
|
|
339
|
+
) -> None:
|
|
340
|
+
"""Set the image array.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
patch: The array to set.
|
|
344
|
+
channel_selection: Select a what subset of channels to return.
|
|
345
|
+
If None, all channels are set.
|
|
346
|
+
axes_order: The order of the axes to set the array.
|
|
347
|
+
transforms: The transforms to apply to the array.
|
|
348
|
+
**slicing_kwargs: The slices to set the array.
|
|
349
|
+
"""
|
|
350
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
351
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
352
|
+
)
|
|
353
|
+
self._set_array(
|
|
354
|
+
patch=patch, axes_order=axes_order, transforms=transforms, **_slicing_kwargs
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
def set_roi(
|
|
358
|
+
self,
|
|
359
|
+
roi: Roi | RoiPixels,
|
|
360
|
+
patch: np.ndarray | da.Array,
|
|
361
|
+
channel_selection: ChannelSlicingInputType = None,
|
|
362
|
+
axes_order: Sequence[str] | None = None,
|
|
363
|
+
transforms: Sequence[TransformProtocol] | None = None,
|
|
364
|
+
**slicing_kwargs: SlicingInputType,
|
|
365
|
+
) -> None:
|
|
366
|
+
"""Set the image array for a region of interest.
|
|
367
|
+
|
|
368
|
+
Args:
|
|
369
|
+
roi: The region of interest to set the array.
|
|
370
|
+
patch: The array to set.
|
|
371
|
+
channel_selection: Select a what subset of channels to return.
|
|
372
|
+
axes_order: The order of the axes to set the array.
|
|
373
|
+
transforms: The transforms to apply to the array.
|
|
374
|
+
**slicing_kwargs: The slices to set the array.
|
|
375
|
+
"""
|
|
376
|
+
_slicing_kwargs = add_channel_selection_to_slicing_dict(
|
|
377
|
+
image=self, channel_selection=channel_selection, slicing_dict=slicing_kwargs
|
|
378
|
+
)
|
|
379
|
+
self._set_roi(
|
|
380
|
+
roi=roi,
|
|
381
|
+
patch=patch,
|
|
382
|
+
axes_order=axes_order,
|
|
383
|
+
transforms=transforms,
|
|
384
|
+
**_slicing_kwargs,
|
|
385
|
+
)
|
|
102
386
|
|
|
103
387
|
def consolidate(
|
|
104
388
|
self,
|
|
105
|
-
order:
|
|
389
|
+
order: InterpolationOrder = "linear",
|
|
106
390
|
mode: Literal["dask", "numpy", "coarsen"] = "dask",
|
|
107
391
|
) -> None:
|
|
108
392
|
"""Consolidate the label on disk."""
|
|
109
|
-
|
|
393
|
+
self._consolidate(order=order, mode=mode)
|
|
110
394
|
|
|
111
395
|
|
|
112
396
|
class ImagesContainer:
|
|
@@ -150,35 +434,55 @@ class ImagesContainer:
|
|
|
150
434
|
image = self.get()
|
|
151
435
|
return image.wavelength_ids
|
|
152
436
|
|
|
437
|
+
def get_channel_idx(
|
|
438
|
+
self, channel_label: str | None = None, wavelength_id: str | None = None
|
|
439
|
+
) -> int:
|
|
440
|
+
"""Get the index of a channel by label or wavelength ID.
|
|
441
|
+
|
|
442
|
+
Args:
|
|
443
|
+
channel_label (str | None): The label of the channel.
|
|
444
|
+
If None a wavelength ID must be provided.
|
|
445
|
+
wavelength_id (str | None): The wavelength ID of the channel.
|
|
446
|
+
If None a channel label must be provided.
|
|
447
|
+
|
|
448
|
+
Returns:
|
|
449
|
+
int: The index of the channel.
|
|
450
|
+
|
|
451
|
+
"""
|
|
452
|
+
image = self.get()
|
|
453
|
+
return image.get_channel_idx(
|
|
454
|
+
channel_label=channel_label, wavelength_id=wavelength_id
|
|
455
|
+
)
|
|
456
|
+
|
|
153
457
|
def set_channel_meta(
|
|
154
458
|
self,
|
|
155
|
-
labels:
|
|
156
|
-
wavelength_id:
|
|
157
|
-
start:
|
|
158
|
-
end:
|
|
459
|
+
labels: Sequence[str | None] | int | None = None,
|
|
460
|
+
wavelength_id: Sequence[str | None] | None = None,
|
|
461
|
+
start: Sequence[float | None] | None = None,
|
|
462
|
+
end: Sequence[float | None] | None = None,
|
|
159
463
|
percentiles: tuple[float, float] | None = None,
|
|
160
|
-
colors:
|
|
161
|
-
active:
|
|
464
|
+
colors: Sequence[str | None] | None = None,
|
|
465
|
+
active: Sequence[bool | None] | None = None,
|
|
162
466
|
**omero_kwargs: dict,
|
|
163
467
|
) -> None:
|
|
164
468
|
"""Create a ChannelsMeta object with the default unit.
|
|
165
469
|
|
|
166
470
|
Args:
|
|
167
|
-
labels(
|
|
471
|
+
labels(Sequence[str | None] | int): The list of channels names
|
|
168
472
|
in the image. If an integer is provided, the channels will
|
|
169
473
|
be named "channel_i".
|
|
170
|
-
wavelength_id(
|
|
474
|
+
wavelength_id(Sequence[str | None]): The wavelength ID of the channel.
|
|
171
475
|
If None, the wavelength ID will be the same as the channel name.
|
|
172
|
-
start(
|
|
476
|
+
start(Sequence[float | None]): The start value for each channel.
|
|
173
477
|
If None, the start value will be computed from the image.
|
|
174
|
-
end(
|
|
478
|
+
end(Sequence[float | None]): The end value for each channel.
|
|
175
479
|
If None, the end value will be computed from the image.
|
|
176
480
|
percentiles(tuple[float, float] | None): The start and end
|
|
177
481
|
percentiles for each channel. If None, the percentiles will
|
|
178
482
|
not be computed.
|
|
179
|
-
colors(
|
|
483
|
+
colors(Sequence[str | None]): The list of colors for the
|
|
180
484
|
channels. If None, the colors will be random.
|
|
181
|
-
active (
|
|
485
|
+
active (Sequence[bool | None]): Whether the channel should
|
|
182
486
|
be shown by default.
|
|
183
487
|
omero_kwargs(dict): Extra fields to store in the omero attributes.
|
|
184
488
|
"""
|
|
@@ -293,13 +597,15 @@ class ImagesContainer:
|
|
|
293
597
|
self,
|
|
294
598
|
store: StoreOrGroup,
|
|
295
599
|
ref_path: str | None = None,
|
|
296
|
-
shape:
|
|
297
|
-
labels:
|
|
600
|
+
shape: Sequence[int] | None = None,
|
|
601
|
+
labels: Sequence[str] | None = None,
|
|
298
602
|
pixel_size: PixelSize | None = None,
|
|
299
|
-
axes_names:
|
|
603
|
+
axes_names: Sequence[str] | None = None,
|
|
300
604
|
name: str | None = None,
|
|
301
|
-
chunks:
|
|
605
|
+
chunks: Sequence[int] | None = None,
|
|
302
606
|
dtype: str | None = None,
|
|
607
|
+
dimension_separator: DIMENSION_SEPARATOR | None = None,
|
|
608
|
+
compressor: str | None = None,
|
|
303
609
|
overwrite: bool = False,
|
|
304
610
|
) -> "ImagesContainer":
|
|
305
611
|
"""Create an empty OME-Zarr image from an existing image.
|
|
@@ -308,12 +614,16 @@ class ImagesContainer:
|
|
|
308
614
|
store (StoreOrGroup): The Zarr store or group to create the image in.
|
|
309
615
|
ref_path (str | None): The path to the reference image in
|
|
310
616
|
the image container.
|
|
311
|
-
shape (
|
|
312
|
-
labels (
|
|
617
|
+
shape (Sequence[int] | None): The shape of the new image.
|
|
618
|
+
labels (Sequence[str] | None): The labels of the new image.
|
|
313
619
|
pixel_size (PixelSize | None): The pixel size of the new image.
|
|
314
|
-
axes_names (
|
|
620
|
+
axes_names (Sequence[str] | None): The axes names of the new image.
|
|
315
621
|
name (str | None): The name of the new image.
|
|
316
|
-
chunks (
|
|
622
|
+
chunks (Sequence[int] | None): The chunk shape of the new image.
|
|
623
|
+
dimension_separator (DIMENSION_SEPARATOR | None): The separator to use for
|
|
624
|
+
dimensions. If None it will use the same as the reference image.
|
|
625
|
+
compressor (str | None): The compressor to use. If None it will use
|
|
626
|
+
the same as the reference image.
|
|
317
627
|
dtype (str | None): The data type of the new image.
|
|
318
628
|
overwrite (bool): Whether to overwrite an existing image.
|
|
319
629
|
|
|
@@ -331,6 +641,8 @@ class ImagesContainer:
|
|
|
331
641
|
name=name,
|
|
332
642
|
chunks=chunks,
|
|
333
643
|
dtype=dtype,
|
|
644
|
+
dimension_separator=dimension_separator,
|
|
645
|
+
compressor=compressor,
|
|
334
646
|
overwrite=overwrite,
|
|
335
647
|
)
|
|
336
648
|
|
|
@@ -378,11 +690,10 @@ def compute_image_percentile(
|
|
|
378
690
|
starts, ends = [], []
|
|
379
691
|
for c in range(image.num_channels):
|
|
380
692
|
if image.num_channels == 1:
|
|
381
|
-
data = image.
|
|
693
|
+
data = image.get_as_dask()
|
|
382
694
|
else:
|
|
383
|
-
data = image.
|
|
695
|
+
data = image.get_as_dask(c=c)
|
|
384
696
|
|
|
385
|
-
assert isinstance(data, da.Array), "Data must be a Dask array."
|
|
386
697
|
data = da.ravel(data)
|
|
387
698
|
# remove all the zeros
|
|
388
699
|
mask = data > 1e-16
|
|
@@ -396,7 +707,7 @@ def compute_image_percentile(
|
|
|
396
707
|
# compute the percentiles
|
|
397
708
|
_s_perc, _e_perc = da.percentile(
|
|
398
709
|
data, [start_percentile, end_percentile], method="nearest"
|
|
399
|
-
).compute() # type: ignore
|
|
710
|
+
).compute() # type: ignore (return type is a tuple of floats)
|
|
400
711
|
|
|
401
712
|
starts.append(float(_s_perc))
|
|
402
713
|
ends.append(float(_e_perc))
|
|
@@ -407,13 +718,15 @@ def derive_image_container(
|
|
|
407
718
|
image_container: ImagesContainer,
|
|
408
719
|
store: StoreOrGroup,
|
|
409
720
|
ref_path: str | None = None,
|
|
410
|
-
shape:
|
|
411
|
-
labels:
|
|
721
|
+
shape: Sequence[int] | None = None,
|
|
722
|
+
labels: Sequence[str] | None = None,
|
|
412
723
|
pixel_size: PixelSize | None = None,
|
|
413
|
-
axes_names:
|
|
724
|
+
axes_names: Sequence[str] | None = None,
|
|
414
725
|
name: str | None = None,
|
|
415
|
-
chunks:
|
|
726
|
+
chunks: Sequence[int] | None = None,
|
|
416
727
|
dtype: str | None = None,
|
|
728
|
+
dimension_separator: DIMENSION_SEPARATOR | None = None,
|
|
729
|
+
compressor=None,
|
|
417
730
|
overwrite: bool = False,
|
|
418
731
|
) -> ImagesContainer:
|
|
419
732
|
"""Create an empty OME-Zarr image from an existing image.
|
|
@@ -422,12 +735,16 @@ def derive_image_container(
|
|
|
422
735
|
image_container (ImagesContainer): The image container to derive the new image.
|
|
423
736
|
store (StoreOrGroup): The Zarr store or group to create the image in.
|
|
424
737
|
ref_path (str | None): The path to the reference image in the image container.
|
|
425
|
-
shape (
|
|
426
|
-
labels (
|
|
738
|
+
shape (Sequence[int] | None): The shape of the new image.
|
|
739
|
+
labels (Sequence[str] | None): The labels of the new image.
|
|
427
740
|
pixel_size (PixelSize | None): The pixel size of the new image.
|
|
428
|
-
axes_names (
|
|
741
|
+
axes_names (Sequence[str] | None): The axes names of the new image.
|
|
429
742
|
name (str | None): The name of the new image.
|
|
430
|
-
chunks (
|
|
743
|
+
chunks (Sequence[int] | None): The chunk shape of the new image.
|
|
744
|
+
dimension_separator (DIMENSION_SEPARATOR | None): The separator to use for
|
|
745
|
+
dimensions. If None it will use the same as the reference image.
|
|
746
|
+
compressor: The compressor to use. If None it will use
|
|
747
|
+
the same as the reference image.
|
|
431
748
|
dtype (str | None): The data type of the new image.
|
|
432
749
|
overwrite (bool): Whether to overwrite an existing image.
|
|
433
750
|
|
|
@@ -449,7 +766,7 @@ def derive_image_container(
|
|
|
449
766
|
pixel_size = ref_image.pixel_size
|
|
450
767
|
|
|
451
768
|
if axes_names is None:
|
|
452
|
-
axes_names = ref_meta.
|
|
769
|
+
axes_names = ref_meta.axes_handler.axes_names
|
|
453
770
|
|
|
454
771
|
if len(axes_names) != len(shape):
|
|
455
772
|
raise NgioValidationError(
|
|
@@ -471,6 +788,13 @@ def derive_image_container(
|
|
|
471
788
|
|
|
472
789
|
if dtype is None:
|
|
473
790
|
dtype = ref_image.dtype
|
|
791
|
+
|
|
792
|
+
if dimension_separator is None:
|
|
793
|
+
dimension_separator = ref_image.zarr_array._dimension_separator # type: ignore
|
|
794
|
+
|
|
795
|
+
if compressor is None:
|
|
796
|
+
compressor = ref_image.zarr_array.compressor # type: ignore
|
|
797
|
+
|
|
474
798
|
handler = create_empty_image_container(
|
|
475
799
|
store=store,
|
|
476
800
|
shape=shape,
|
|
@@ -486,6 +810,8 @@ def derive_image_container(
|
|
|
486
810
|
name=name,
|
|
487
811
|
chunks=chunks,
|
|
488
812
|
dtype=dtype,
|
|
813
|
+
dimension_separator=dimension_separator, # type: ignore
|
|
814
|
+
compressor=compressor, # type: ignore
|
|
489
815
|
overwrite=overwrite,
|
|
490
816
|
version=ref_meta.version,
|
|
491
817
|
)
|
|
@@ -495,16 +821,11 @@ def derive_image_container(
|
|
|
495
821
|
_labels = ref_image.channel_labels
|
|
496
822
|
wavelength_id = ref_image.wavelength_ids
|
|
497
823
|
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
]
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
]
|
|
504
|
-
start = [
|
|
505
|
-
c.channel_visualisation.start for c in ref_image._channels_meta.channels
|
|
506
|
-
]
|
|
507
|
-
end = [c.channel_visualisation.end for c in ref_image._channels_meta.channels]
|
|
824
|
+
channel_meta = ref_image.channels_meta
|
|
825
|
+
colors = [c.channel_visualisation.color for c in channel_meta.channels]
|
|
826
|
+
active = [c.channel_visualisation.active for c in channel_meta.channels]
|
|
827
|
+
start = [c.channel_visualisation.start for c in channel_meta.channels]
|
|
828
|
+
end = [c.channel_visualisation.end for c in channel_meta.channels]
|
|
508
829
|
else:
|
|
509
830
|
_labels = None
|
|
510
831
|
wavelength_id = None
|
|
@@ -530,3 +851,71 @@ def derive_image_container(
|
|
|
530
851
|
end=end,
|
|
531
852
|
)
|
|
532
853
|
return image_container
|
|
854
|
+
|
|
855
|
+
|
|
856
|
+
def _parse_str_or_model(
|
|
857
|
+
image: Image, channel_selection: int | str | ChannelSelectionModel
|
|
858
|
+
) -> int:
|
|
859
|
+
"""Parse a string or ChannelSelectionModel to an integer channel index."""
|
|
860
|
+
if isinstance(channel_selection, int):
|
|
861
|
+
if channel_selection < 0:
|
|
862
|
+
raise NgioValidationError("Channel index must be a non-negative integer.")
|
|
863
|
+
if channel_selection >= image.num_channels:
|
|
864
|
+
raise NgioValidationError(
|
|
865
|
+
"Channel index must be less than the number "
|
|
866
|
+
f"of channels ({image.num_channels})."
|
|
867
|
+
)
|
|
868
|
+
return channel_selection
|
|
869
|
+
elif isinstance(channel_selection, str):
|
|
870
|
+
return image.get_channel_idx(channel_label=channel_selection)
|
|
871
|
+
elif isinstance(channel_selection, ChannelSelectionModel):
|
|
872
|
+
if channel_selection.mode == "label":
|
|
873
|
+
return image.get_channel_idx(
|
|
874
|
+
channel_label=str(channel_selection.identifier)
|
|
875
|
+
)
|
|
876
|
+
elif channel_selection.mode == "wavelength_id":
|
|
877
|
+
return image.get_channel_idx(
|
|
878
|
+
channel_label=str(channel_selection.identifier)
|
|
879
|
+
)
|
|
880
|
+
elif channel_selection.mode == "index":
|
|
881
|
+
return int(channel_selection.identifier)
|
|
882
|
+
raise NgioValidationError(
|
|
883
|
+
"Invalid channel selection type. "
|
|
884
|
+
f"{channel_selection} is of type {type(channel_selection)} ",
|
|
885
|
+
"supported types are str, ChannelSelectionModel, and int.",
|
|
886
|
+
)
|
|
887
|
+
|
|
888
|
+
|
|
889
|
+
def _parse_channel_selection(
|
|
890
|
+
image: Image, channel_selection: ChannelSlicingInputType
|
|
891
|
+
) -> dict[str, SlicingInputType]:
|
|
892
|
+
"""Parse the channel selection input into a list of channel indices."""
|
|
893
|
+
if channel_selection is None:
|
|
894
|
+
return {}
|
|
895
|
+
if isinstance(channel_selection, int | str | ChannelSelectionModel):
|
|
896
|
+
channel_index = _parse_str_or_model(image, channel_selection)
|
|
897
|
+
return {"c": channel_index}
|
|
898
|
+
elif isinstance(channel_selection, Sequence):
|
|
899
|
+
_sequence = [_parse_str_or_model(image, cs) for cs in channel_selection]
|
|
900
|
+
return {"c": _sequence}
|
|
901
|
+
raise NgioValidationError(
|
|
902
|
+
f"Invalid channel selection type {type(channel_selection)}. "
|
|
903
|
+
"Supported types are int, str, ChannelSelectionModel, and Sequence."
|
|
904
|
+
)
|
|
905
|
+
|
|
906
|
+
|
|
907
|
+
def add_channel_selection_to_slicing_dict(
|
|
908
|
+
image: Image,
|
|
909
|
+
channel_selection: ChannelSlicingInputType,
|
|
910
|
+
slicing_dict: dict[str, SlicingInputType],
|
|
911
|
+
) -> dict[str, SlicingInputType]:
|
|
912
|
+
"""Add channel selection information to the slicing dictionary."""
|
|
913
|
+
channel_info = _parse_channel_selection(image, channel_selection)
|
|
914
|
+
if "c" in slicing_dict and channel_info:
|
|
915
|
+
raise NgioValidationError(
|
|
916
|
+
"Both channel_selection and 'c' in slicing_kwargs are provided. "
|
|
917
|
+
"Which channel selection should be used is ambiguous. "
|
|
918
|
+
"Please provide only one."
|
|
919
|
+
)
|
|
920
|
+
slicing_dict = slicing_dict | channel_info
|
|
921
|
+
return slicing_dict
|