ngio 0.5.0b6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ngio/__init__.py +69 -0
- ngio/common/__init__.py +28 -0
- ngio/common/_dimensions.py +335 -0
- ngio/common/_masking_roi.py +153 -0
- ngio/common/_pyramid.py +408 -0
- ngio/common/_roi.py +315 -0
- ngio/common/_synt_images_utils.py +101 -0
- ngio/common/_zoom.py +188 -0
- ngio/experimental/__init__.py +5 -0
- ngio/experimental/iterators/__init__.py +15 -0
- ngio/experimental/iterators/_abstract_iterator.py +390 -0
- ngio/experimental/iterators/_feature.py +189 -0
- ngio/experimental/iterators/_image_processing.py +130 -0
- ngio/experimental/iterators/_mappers.py +48 -0
- ngio/experimental/iterators/_rois_utils.py +126 -0
- ngio/experimental/iterators/_segmentation.py +235 -0
- ngio/hcs/__init__.py +19 -0
- ngio/hcs/_plate.py +1354 -0
- ngio/images/__init__.py +44 -0
- ngio/images/_abstract_image.py +967 -0
- ngio/images/_create_synt_container.py +132 -0
- ngio/images/_create_utils.py +423 -0
- ngio/images/_image.py +926 -0
- ngio/images/_label.py +411 -0
- ngio/images/_masked_image.py +531 -0
- ngio/images/_ome_zarr_container.py +1237 -0
- ngio/images/_table_ops.py +471 -0
- ngio/io_pipes/__init__.py +75 -0
- ngio/io_pipes/_io_pipes.py +361 -0
- ngio/io_pipes/_io_pipes_masked.py +488 -0
- ngio/io_pipes/_io_pipes_roi.py +146 -0
- ngio/io_pipes/_io_pipes_types.py +56 -0
- ngio/io_pipes/_match_shape.py +377 -0
- ngio/io_pipes/_ops_axes.py +344 -0
- ngio/io_pipes/_ops_slices.py +411 -0
- ngio/io_pipes/_ops_slices_utils.py +199 -0
- ngio/io_pipes/_ops_transforms.py +104 -0
- ngio/io_pipes/_zoom_transform.py +180 -0
- ngio/ome_zarr_meta/__init__.py +65 -0
- ngio/ome_zarr_meta/_meta_handlers.py +536 -0
- ngio/ome_zarr_meta/ngio_specs/__init__.py +77 -0
- ngio/ome_zarr_meta/ngio_specs/_axes.py +515 -0
- ngio/ome_zarr_meta/ngio_specs/_channels.py +462 -0
- ngio/ome_zarr_meta/ngio_specs/_dataset.py +89 -0
- ngio/ome_zarr_meta/ngio_specs/_ngio_hcs.py +539 -0
- ngio/ome_zarr_meta/ngio_specs/_ngio_image.py +438 -0
- ngio/ome_zarr_meta/ngio_specs/_pixel_size.py +122 -0
- ngio/ome_zarr_meta/v04/__init__.py +27 -0
- ngio/ome_zarr_meta/v04/_custom_models.py +18 -0
- ngio/ome_zarr_meta/v04/_v04_spec.py +473 -0
- ngio/ome_zarr_meta/v05/__init__.py +27 -0
- ngio/ome_zarr_meta/v05/_custom_models.py +18 -0
- ngio/ome_zarr_meta/v05/_v05_spec.py +511 -0
- ngio/resources/20200812-CardiomyocyteDifferentiation14-Cycle1_B03/mask.png +0 -0
- ngio/resources/20200812-CardiomyocyteDifferentiation14-Cycle1_B03/nuclei.png +0 -0
- ngio/resources/20200812-CardiomyocyteDifferentiation14-Cycle1_B03/raw.jpg +0 -0
- ngio/resources/__init__.py +55 -0
- ngio/resources/resource_model.py +36 -0
- ngio/tables/__init__.py +43 -0
- ngio/tables/_abstract_table.py +270 -0
- ngio/tables/_tables_container.py +449 -0
- ngio/tables/backends/__init__.py +57 -0
- ngio/tables/backends/_abstract_backend.py +240 -0
- ngio/tables/backends/_anndata.py +139 -0
- ngio/tables/backends/_anndata_utils.py +90 -0
- ngio/tables/backends/_csv.py +19 -0
- ngio/tables/backends/_json.py +92 -0
- ngio/tables/backends/_parquet.py +19 -0
- ngio/tables/backends/_py_arrow_backends.py +222 -0
- ngio/tables/backends/_table_backends.py +226 -0
- ngio/tables/backends/_utils.py +608 -0
- ngio/tables/v1/__init__.py +23 -0
- ngio/tables/v1/_condition_table.py +71 -0
- ngio/tables/v1/_feature_table.py +125 -0
- ngio/tables/v1/_generic_table.py +49 -0
- ngio/tables/v1/_roi_table.py +575 -0
- ngio/transforms/__init__.py +5 -0
- ngio/transforms/_zoom.py +19 -0
- ngio/utils/__init__.py +45 -0
- ngio/utils/_cache.py +48 -0
- ngio/utils/_datasets.py +165 -0
- ngio/utils/_errors.py +37 -0
- ngio/utils/_fractal_fsspec_store.py +42 -0
- ngio/utils/_zarr_utils.py +534 -0
- ngio-0.5.0b6.dist-info/METADATA +148 -0
- ngio-0.5.0b6.dist-info/RECORD +88 -0
- ngio-0.5.0b6.dist-info/WHEEL +4 -0
- ngio-0.5.0b6.dist-info/licenses/LICENSE +28 -0
|
@@ -0,0 +1,438 @@
|
|
|
1
|
+
"""Image metadata models.
|
|
2
|
+
|
|
3
|
+
This module contains the models for the image metadata.
|
|
4
|
+
These metadata models are not adhering to the OME standard.
|
|
5
|
+
But they can be built from the OME standard metadata, and the
|
|
6
|
+
can be converted to the OME standard.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from collections.abc import Sequence
|
|
10
|
+
from typing import Any, Literal, TypeVar
|
|
11
|
+
|
|
12
|
+
import numpy as np
|
|
13
|
+
from pydantic import BaseModel
|
|
14
|
+
|
|
15
|
+
from ngio.ome_zarr_meta.ngio_specs._axes import (
|
|
16
|
+
AxesHandler,
|
|
17
|
+
DefaultSpaceUnit,
|
|
18
|
+
DefaultTimeUnit,
|
|
19
|
+
SpaceUnits,
|
|
20
|
+
TimeUnits,
|
|
21
|
+
)
|
|
22
|
+
from ngio.ome_zarr_meta.ngio_specs._channels import ChannelsMeta
|
|
23
|
+
from ngio.ome_zarr_meta.ngio_specs._dataset import Dataset
|
|
24
|
+
from ngio.ome_zarr_meta.ngio_specs._pixel_size import PixelSize
|
|
25
|
+
from ngio.utils import NgioValidationError, NgioValueError
|
|
26
|
+
|
|
27
|
+
T = TypeVar("T")
|
|
28
|
+
NgffVersions = Literal["0.4", "0.5"]
|
|
29
|
+
DefaultNgffVersion: Literal["0.4"] = "0.4"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class ImageLabelSource(BaseModel):
|
|
33
|
+
"""Image label source model."""
|
|
34
|
+
|
|
35
|
+
version: NgffVersions
|
|
36
|
+
source: dict[str, str | None]
|
|
37
|
+
|
|
38
|
+
@classmethod
|
|
39
|
+
def default_init(cls, version: NgffVersions) -> "ImageLabelSource":
|
|
40
|
+
"""Initialize the ImageLabelSource object."""
|
|
41
|
+
return cls(version=version, source={"image": "../../"})
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class NgioLabelsGroupMeta(BaseModel):
|
|
45
|
+
"""Metadata model for the /labels group in OME-NGFF."""
|
|
46
|
+
|
|
47
|
+
version: NgffVersions
|
|
48
|
+
labels: list[str]
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class AbstractNgioImageMeta:
|
|
52
|
+
"""Base class for ImageMeta and LabelMeta."""
|
|
53
|
+
|
|
54
|
+
def __init__(
|
|
55
|
+
self, version: NgffVersions, name: str | None, datasets: list[Dataset]
|
|
56
|
+
) -> None:
|
|
57
|
+
"""Initialize the ImageMeta object."""
|
|
58
|
+
self._version = version
|
|
59
|
+
self._name = name
|
|
60
|
+
|
|
61
|
+
if len(datasets) == 0:
|
|
62
|
+
raise NgioValidationError("At least one dataset must be provided.")
|
|
63
|
+
|
|
64
|
+
self._datasets = datasets
|
|
65
|
+
self._axes_handler = datasets[0].axes_handler
|
|
66
|
+
|
|
67
|
+
def __repr__(self):
|
|
68
|
+
class_name = type(self).__name__
|
|
69
|
+
paths = [dataset.path for dataset in self.datasets]
|
|
70
|
+
axes = self.axes_handler.axes_names
|
|
71
|
+
return f"{class_name}(name={self.name}, datasets={paths}, axes={axes})"
|
|
72
|
+
|
|
73
|
+
@classmethod
|
|
74
|
+
def default_init(
|
|
75
|
+
cls,
|
|
76
|
+
levels: Sequence[str],
|
|
77
|
+
axes_handler: AxesHandler,
|
|
78
|
+
scales: Sequence[tuple[float, ...]],
|
|
79
|
+
translations: Sequence[tuple[float, ...] | None],
|
|
80
|
+
name: str | None = None,
|
|
81
|
+
version: NgffVersions = DefaultNgffVersion,
|
|
82
|
+
):
|
|
83
|
+
"""Initialize the ImageMeta object."""
|
|
84
|
+
datasets = []
|
|
85
|
+
for level, scale, translation in zip(levels, scales, translations, strict=True):
|
|
86
|
+
dataset = Dataset(
|
|
87
|
+
path=level,
|
|
88
|
+
axes_handler=axes_handler,
|
|
89
|
+
scale=scale,
|
|
90
|
+
translation=translation,
|
|
91
|
+
)
|
|
92
|
+
datasets.append(dataset)
|
|
93
|
+
|
|
94
|
+
return cls(
|
|
95
|
+
version=version,
|
|
96
|
+
name=name,
|
|
97
|
+
datasets=datasets,
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
def to_units(
|
|
101
|
+
self,
|
|
102
|
+
*,
|
|
103
|
+
space_unit: SpaceUnits = DefaultSpaceUnit,
|
|
104
|
+
time_unit: TimeUnits = DefaultTimeUnit,
|
|
105
|
+
):
|
|
106
|
+
"""Convert the pixel size to the given units.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
space_unit(str): The space unit to convert to.
|
|
110
|
+
time_unit(str): The time unit to convert to.
|
|
111
|
+
"""
|
|
112
|
+
new_axes_handler = self.axes_handler.to_units(
|
|
113
|
+
space_unit=space_unit,
|
|
114
|
+
time_unit=time_unit,
|
|
115
|
+
)
|
|
116
|
+
new_datasets = []
|
|
117
|
+
for dataset in self.datasets:
|
|
118
|
+
new_dataset = Dataset(
|
|
119
|
+
path=dataset.path,
|
|
120
|
+
axes_handler=new_axes_handler,
|
|
121
|
+
scale=dataset.scale,
|
|
122
|
+
translation=dataset.translation,
|
|
123
|
+
)
|
|
124
|
+
new_datasets.append(new_dataset)
|
|
125
|
+
|
|
126
|
+
return type(self)(
|
|
127
|
+
version=self.version,
|
|
128
|
+
name=self.name,
|
|
129
|
+
datasets=new_datasets,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
@property
|
|
133
|
+
def version(self) -> NgffVersions:
|
|
134
|
+
"""Version of the OME-NFF metadata used to build the object."""
|
|
135
|
+
return self._version # type: ignore (version is a Literal type)
|
|
136
|
+
|
|
137
|
+
@property
|
|
138
|
+
def zarr_format(self) -> Literal[2, 3]:
|
|
139
|
+
"""Zarr version used to store the data."""
|
|
140
|
+
match self.version:
|
|
141
|
+
case "0.4":
|
|
142
|
+
return 2
|
|
143
|
+
case "0.5":
|
|
144
|
+
return 3
|
|
145
|
+
case _:
|
|
146
|
+
raise NgioValueError(f"Unsupported NGFF version: {self.version}")
|
|
147
|
+
|
|
148
|
+
@property
|
|
149
|
+
def name(self) -> str | None:
|
|
150
|
+
"""Name of the image."""
|
|
151
|
+
return self._name
|
|
152
|
+
|
|
153
|
+
@property
|
|
154
|
+
def datasets(self) -> list[Dataset]:
|
|
155
|
+
"""List of datasets in the multiscale."""
|
|
156
|
+
return self._datasets
|
|
157
|
+
|
|
158
|
+
@property
|
|
159
|
+
def axes_handler(self):
|
|
160
|
+
"""Return the axes mapper."""
|
|
161
|
+
return self._axes_handler
|
|
162
|
+
|
|
163
|
+
@property
|
|
164
|
+
def levels(self) -> int:
|
|
165
|
+
"""Number of levels in the multiscale."""
|
|
166
|
+
return len(self.datasets)
|
|
167
|
+
|
|
168
|
+
@property
|
|
169
|
+
def paths(self) -> list[str]:
|
|
170
|
+
"""List of paths of the datasets."""
|
|
171
|
+
return [dataset.path for dataset in self.datasets]
|
|
172
|
+
|
|
173
|
+
@property
|
|
174
|
+
def space_unit(self) -> str | None:
|
|
175
|
+
"""Get the space unit of the pixel size."""
|
|
176
|
+
return self.axes_handler.space_unit
|
|
177
|
+
|
|
178
|
+
@property
|
|
179
|
+
def time_unit(self) -> str | None:
|
|
180
|
+
"""Get the time unit of the pixel size."""
|
|
181
|
+
return self.axes_handler.time_unit
|
|
182
|
+
|
|
183
|
+
def _get_dataset_by_path(self, path: str) -> Dataset:
|
|
184
|
+
"""Get a dataset by its path."""
|
|
185
|
+
for dataset in self.datasets:
|
|
186
|
+
if dataset.path == path:
|
|
187
|
+
return dataset
|
|
188
|
+
raise NgioValueError(f"Dataset with path {path} not found.")
|
|
189
|
+
|
|
190
|
+
def _get_dataset_by_index(self, idx: int) -> Dataset:
|
|
191
|
+
"""Get a dataset by its index."""
|
|
192
|
+
if idx < 0 or idx >= len(self.datasets):
|
|
193
|
+
raise NgioValueError(f"Index {idx} out of range.")
|
|
194
|
+
return self.datasets[idx]
|
|
195
|
+
|
|
196
|
+
def _find_closest_dataset(
|
|
197
|
+
self, pixel_size: PixelSize, mode: str = "any"
|
|
198
|
+
) -> Dataset | None:
|
|
199
|
+
"""Find the closest dataset to the given pixel size.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
pixel_size(PixelSize): The pixel size to search for.
|
|
203
|
+
mode(str): The mode to find the closest dataset.
|
|
204
|
+
"any": Will find the closest dataset.
|
|
205
|
+
"lr": Will find closest "lower" resolution dataset.
|
|
206
|
+
"hr": Will find closest "higher" resolution
|
|
207
|
+
"""
|
|
208
|
+
min_dist = np.inf
|
|
209
|
+
closest_dataset = None
|
|
210
|
+
|
|
211
|
+
if mode == "any":
|
|
212
|
+
datasets = self.datasets
|
|
213
|
+
elif mode == "lr":
|
|
214
|
+
# Lower resolution means that the pixel size is larger.
|
|
215
|
+
datasets = [d for d in self.datasets if d.pixel_size > pixel_size]
|
|
216
|
+
elif mode == "hr":
|
|
217
|
+
# Higher resolution means that the pixel size is smaller.
|
|
218
|
+
datasets = [d for d in self.datasets if d.pixel_size < pixel_size]
|
|
219
|
+
else:
|
|
220
|
+
raise NgioValueError(f"Mode {mode} not recognized.")
|
|
221
|
+
|
|
222
|
+
for d in datasets:
|
|
223
|
+
dist = d.pixel_size.distance(pixel_size)
|
|
224
|
+
if dist < min_dist:
|
|
225
|
+
min_dist = dist
|
|
226
|
+
closest_dataset = d
|
|
227
|
+
|
|
228
|
+
return closest_dataset
|
|
229
|
+
|
|
230
|
+
def _get_closest_dataset(
|
|
231
|
+
self, pixel_size: PixelSize, strict: bool = False
|
|
232
|
+
) -> Dataset:
|
|
233
|
+
"""Get a dataset with the closest pixel size.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
pixel_size(PixelSize): The pixel size to search for.
|
|
237
|
+
strict(bool): If True, the pixel size must be exactly the same.
|
|
238
|
+
"""
|
|
239
|
+
closest_dataset = self._find_closest_dataset(pixel_size, mode="any")
|
|
240
|
+
|
|
241
|
+
if closest_dataset is None:
|
|
242
|
+
raise NgioValueError("No dataset found.")
|
|
243
|
+
|
|
244
|
+
if strict and closest_dataset.pixel_size != pixel_size:
|
|
245
|
+
raise NgioValueError(
|
|
246
|
+
"No dataset with a pixel size close enough. "
|
|
247
|
+
"Best match is "
|
|
248
|
+
f"{closest_dataset.path}:{closest_dataset.pixel_size}"
|
|
249
|
+
)
|
|
250
|
+
return closest_dataset
|
|
251
|
+
|
|
252
|
+
def get_lowest_resolution_dataset(self) -> Dataset:
|
|
253
|
+
"""Get the dataset with the lowest resolution."""
|
|
254
|
+
dataset = self.datasets[-1]
|
|
255
|
+
while True:
|
|
256
|
+
lower_res_dataset = self._find_closest_dataset(
|
|
257
|
+
dataset.pixel_size, mode="lr"
|
|
258
|
+
)
|
|
259
|
+
if lower_res_dataset is None:
|
|
260
|
+
break
|
|
261
|
+
dataset = lower_res_dataset
|
|
262
|
+
return dataset
|
|
263
|
+
|
|
264
|
+
def get_highest_resolution_dataset(self) -> Dataset:
|
|
265
|
+
"""Get the dataset with the highest resolution."""
|
|
266
|
+
dataset = self.datasets[0]
|
|
267
|
+
while True:
|
|
268
|
+
higher_res_dataset = self._find_closest_dataset(
|
|
269
|
+
dataset.pixel_size, mode="hr"
|
|
270
|
+
)
|
|
271
|
+
if higher_res_dataset is None:
|
|
272
|
+
break
|
|
273
|
+
dataset = higher_res_dataset
|
|
274
|
+
return dataset
|
|
275
|
+
|
|
276
|
+
def get_dataset(
|
|
277
|
+
self,
|
|
278
|
+
*,
|
|
279
|
+
path: str | None = None,
|
|
280
|
+
idx: int | None = None,
|
|
281
|
+
pixel_size: PixelSize | None = None,
|
|
282
|
+
strict: bool = False,
|
|
283
|
+
) -> Dataset:
|
|
284
|
+
"""Get a dataset by its path, index or pixel size.
|
|
285
|
+
|
|
286
|
+
If all arguments are None, the dataset with the highest resolution is returned.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
path(str): The path of the dataset.
|
|
290
|
+
idx(int): The index of the dataset.
|
|
291
|
+
pixel_size(PixelSize): The pixel size to search for.
|
|
292
|
+
strict(bool): If True, the pixel size must be exactly the same.
|
|
293
|
+
If pixel_size is None, strict is ignored.
|
|
294
|
+
"""
|
|
295
|
+
# Only one of the arguments must be provided
|
|
296
|
+
if (
|
|
297
|
+
sum(
|
|
298
|
+
[
|
|
299
|
+
path is not None,
|
|
300
|
+
idx is not None,
|
|
301
|
+
pixel_size is not None,
|
|
302
|
+
]
|
|
303
|
+
)
|
|
304
|
+
> 1
|
|
305
|
+
):
|
|
306
|
+
raise NgioValueError("get_dataset must receive only one argument or None.")
|
|
307
|
+
|
|
308
|
+
if path is not None:
|
|
309
|
+
return self._get_dataset_by_path(path)
|
|
310
|
+
elif idx is not None:
|
|
311
|
+
return self._get_dataset_by_index(idx)
|
|
312
|
+
elif pixel_size is not None:
|
|
313
|
+
return self._get_closest_dataset(pixel_size, strict=strict)
|
|
314
|
+
else:
|
|
315
|
+
return self.get_highest_resolution_dataset()
|
|
316
|
+
|
|
317
|
+
def _get_closest_datasets(self, path: str | None = None) -> tuple[Dataset, Dataset]:
|
|
318
|
+
"""Get the closest datasets to a dataset."""
|
|
319
|
+
dataset = self.get_dataset(path=path)
|
|
320
|
+
lr_dataset = self._find_closest_dataset(dataset.pixel_size, mode="lr")
|
|
321
|
+
if lr_dataset is None:
|
|
322
|
+
raise NgioValueError(
|
|
323
|
+
"No lower resolution dataset found. "
|
|
324
|
+
"This is the lowest resolution dataset."
|
|
325
|
+
)
|
|
326
|
+
return dataset, lr_dataset
|
|
327
|
+
|
|
328
|
+
def scaling_factor(self, path: str | None = None) -> tuple[float, ...]:
|
|
329
|
+
"""Get the scaling factors to downscale to the next lower resolution dataset."""
|
|
330
|
+
if self.levels == 1:
|
|
331
|
+
return (1.0,) * len(self.axes_handler.axes_names)
|
|
332
|
+
dataset, lr_dataset = self._get_closest_datasets(path=path)
|
|
333
|
+
scale = dataset.scale
|
|
334
|
+
lr_scale = lr_dataset.scale
|
|
335
|
+
scaling_factors = [s / s_lr for s_lr, s in zip(scale, lr_scale, strict=True)]
|
|
336
|
+
return tuple(scaling_factors)
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
class NgioLabelMeta(AbstractNgioImageMeta):
|
|
340
|
+
"""Label metadata model."""
|
|
341
|
+
|
|
342
|
+
def __init__(
|
|
343
|
+
self,
|
|
344
|
+
version: NgffVersions,
|
|
345
|
+
name: str | None,
|
|
346
|
+
datasets: list[Dataset],
|
|
347
|
+
image_label: ImageLabelSource | None = None,
|
|
348
|
+
) -> None:
|
|
349
|
+
"""Initialize the ImageMeta object."""
|
|
350
|
+
super().__init__(version, name, datasets)
|
|
351
|
+
image_label = (
|
|
352
|
+
ImageLabelSource.default_init(self.version)
|
|
353
|
+
if image_label is None
|
|
354
|
+
else image_label
|
|
355
|
+
)
|
|
356
|
+
assert image_label is not None
|
|
357
|
+
if image_label.version != version:
|
|
358
|
+
raise NgioValidationError(
|
|
359
|
+
"Label image version must match the metadata version."
|
|
360
|
+
)
|
|
361
|
+
self._image_label = image_label
|
|
362
|
+
|
|
363
|
+
@property
|
|
364
|
+
def source_image(self) -> str | None:
|
|
365
|
+
source = self._image_label.source
|
|
366
|
+
if "image" not in source:
|
|
367
|
+
return None
|
|
368
|
+
|
|
369
|
+
image_path = source["image"]
|
|
370
|
+
return image_path
|
|
371
|
+
|
|
372
|
+
@property
|
|
373
|
+
def image_label(self) -> ImageLabelSource:
|
|
374
|
+
"""Get the image label metadata."""
|
|
375
|
+
return self._image_label
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
class NgioImageMeta(AbstractNgioImageMeta):
|
|
379
|
+
"""Image metadata model."""
|
|
380
|
+
|
|
381
|
+
def __init__(
|
|
382
|
+
self,
|
|
383
|
+
version: NgffVersions,
|
|
384
|
+
name: str | None,
|
|
385
|
+
datasets: list[Dataset],
|
|
386
|
+
channels: ChannelsMeta | None = None,
|
|
387
|
+
) -> None:
|
|
388
|
+
"""Initialize the ImageMeta object."""
|
|
389
|
+
super().__init__(version=version, name=name, datasets=datasets)
|
|
390
|
+
self._channels_meta = channels
|
|
391
|
+
|
|
392
|
+
@property
|
|
393
|
+
def channels_meta(self) -> ChannelsMeta | None:
|
|
394
|
+
"""Get the channels_meta metadata."""
|
|
395
|
+
return self._channels_meta
|
|
396
|
+
|
|
397
|
+
def set_channels_meta(self, channels_meta: ChannelsMeta) -> None:
|
|
398
|
+
"""Set channels_meta metadata."""
|
|
399
|
+
self._channels_meta = channels_meta
|
|
400
|
+
|
|
401
|
+
def init_channels(
|
|
402
|
+
self,
|
|
403
|
+
labels: list[str] | int,
|
|
404
|
+
wavelength_ids: list[str] | None = None,
|
|
405
|
+
colors: list[str] | None = None,
|
|
406
|
+
active: list[bool] | None = None,
|
|
407
|
+
start: list[int | float] | None = None,
|
|
408
|
+
end: list[int | float] | None = None,
|
|
409
|
+
data_type: Any = np.uint16,
|
|
410
|
+
) -> None:
|
|
411
|
+
"""Set the channels_meta metadata for the image.
|
|
412
|
+
|
|
413
|
+
Args:
|
|
414
|
+
labels (list[str]|int): The labels of the channels.
|
|
415
|
+
wavelength_ids (list[str], optional): The wavelengths of the channels.
|
|
416
|
+
colors (list[str], optional): The colors of the channels.
|
|
417
|
+
adjust_window (bool, optional): Whether to adjust the window.
|
|
418
|
+
start_percentile (int, optional): The start percentile.
|
|
419
|
+
end_percentile (int, optional): The end percentile.
|
|
420
|
+
active (list[bool], optional): Whether the channel is active.
|
|
421
|
+
start (list[int | float], optional): The start value of the channel.
|
|
422
|
+
end (list[int | float], optional): The end value of the channel.
|
|
423
|
+
end (int): The end value of the channel.
|
|
424
|
+
data_type (Any): The data type of the channel.
|
|
425
|
+
"""
|
|
426
|
+
channels_meta = ChannelsMeta.default_init(
|
|
427
|
+
labels=labels,
|
|
428
|
+
wavelength_id=wavelength_ids,
|
|
429
|
+
colors=colors,
|
|
430
|
+
active=active,
|
|
431
|
+
start=start,
|
|
432
|
+
end=end,
|
|
433
|
+
data_type=data_type,
|
|
434
|
+
)
|
|
435
|
+
self.set_channels_meta(channels_meta=channels_meta)
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
NgioImageLabelMeta = NgioImageMeta | NgioLabelMeta
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
"""Fractal internal module for dataset metadata handling."""
|
|
2
|
+
|
|
3
|
+
import math
|
|
4
|
+
from functools import total_ordering
|
|
5
|
+
from typing import overload
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
from pydantic import BaseModel
|
|
9
|
+
|
|
10
|
+
from ngio.ome_zarr_meta.ngio_specs import (
|
|
11
|
+
DefaultSpaceUnit,
|
|
12
|
+
DefaultTimeUnit,
|
|
13
|
+
SpaceUnits,
|
|
14
|
+
TimeUnits,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
################################################################################################
|
|
18
|
+
#
|
|
19
|
+
# PixelSize model
|
|
20
|
+
# The PixelSize model is used to store the pixel size in 3D space.
|
|
21
|
+
# The model does not store scaling factors and units for other axes.
|
|
22
|
+
#
|
|
23
|
+
#################################################################################################
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@total_ordering
|
|
27
|
+
class PixelSize(BaseModel):
|
|
28
|
+
"""PixelSize class to store the pixel size in 3D space."""
|
|
29
|
+
|
|
30
|
+
x: float
|
|
31
|
+
y: float
|
|
32
|
+
z: float
|
|
33
|
+
t: float = 1
|
|
34
|
+
space_unit: SpaceUnits | str | None = DefaultSpaceUnit
|
|
35
|
+
time_unit: TimeUnits | str | None = DefaultTimeUnit
|
|
36
|
+
|
|
37
|
+
def __repr__(self) -> str:
|
|
38
|
+
"""Return a string representation of the pixel size."""
|
|
39
|
+
return f"PixelSize(x={self.x}, y={self.y}, z={self.z}, t={self.t})"
|
|
40
|
+
|
|
41
|
+
def __eq__(self, other) -> bool:
|
|
42
|
+
"""Check if two pixel sizes are equal."""
|
|
43
|
+
if not isinstance(other, PixelSize):
|
|
44
|
+
raise TypeError("Can only compare PixelSize with PixelSize.")
|
|
45
|
+
|
|
46
|
+
if (
|
|
47
|
+
self.time_unit is not None
|
|
48
|
+
and other.time_unit is None
|
|
49
|
+
and self.time_unit != other.time_unit
|
|
50
|
+
):
|
|
51
|
+
return False
|
|
52
|
+
|
|
53
|
+
if self.space_unit != other.space_unit:
|
|
54
|
+
return False
|
|
55
|
+
return math.isclose(self.distance(other), 0)
|
|
56
|
+
|
|
57
|
+
def __lt__(self, other: "PixelSize") -> bool:
|
|
58
|
+
"""Check if one pixel size is less than the other."""
|
|
59
|
+
if not isinstance(other, PixelSize):
|
|
60
|
+
raise TypeError("Can only compare PixelSize with PixelSize.")
|
|
61
|
+
ref = PixelSize(
|
|
62
|
+
x=0,
|
|
63
|
+
y=0,
|
|
64
|
+
z=0,
|
|
65
|
+
t=0,
|
|
66
|
+
space_unit=self.space_unit,
|
|
67
|
+
time_unit=self.time_unit, # type: ignore
|
|
68
|
+
)
|
|
69
|
+
return self.distance(ref) < other.distance(ref)
|
|
70
|
+
|
|
71
|
+
def as_dict(self) -> dict[str, float]:
|
|
72
|
+
"""Return the pixel size as a dictionary."""
|
|
73
|
+
return {"t": self.t, "z": self.z, "y": self.y, "x": self.x}
|
|
74
|
+
|
|
75
|
+
@overload
|
|
76
|
+
def get(self, axis: str, default: float) -> float: ...
|
|
77
|
+
|
|
78
|
+
@overload
|
|
79
|
+
def get(self, axis: str, default: None = None) -> float | None: ...
|
|
80
|
+
|
|
81
|
+
def get(self, axis: str, default: float | None = None) -> float | None:
|
|
82
|
+
"""Get the pixel size for a given axis (in canonical name)."""
|
|
83
|
+
px_size = self.as_dict().get(axis, default)
|
|
84
|
+
if px_size is None:
|
|
85
|
+
raise ValueError(
|
|
86
|
+
f"Invalid axis name: {axis}, must be one of 'x', 'y', 'z', 't'."
|
|
87
|
+
)
|
|
88
|
+
return px_size
|
|
89
|
+
|
|
90
|
+
@property
|
|
91
|
+
def tzyx(self) -> tuple[float, float, float, float]:
|
|
92
|
+
"""Return the voxel size in t, z, y, x order."""
|
|
93
|
+
return self.t, self.z, self.y, self.x
|
|
94
|
+
|
|
95
|
+
@property
|
|
96
|
+
def zyx(self) -> tuple[float, float, float]:
|
|
97
|
+
"""Return the voxel size in z, y, x order."""
|
|
98
|
+
return self.z, self.y, self.x
|
|
99
|
+
|
|
100
|
+
@property
|
|
101
|
+
def yx(self) -> tuple[float, float]:
|
|
102
|
+
"""Return the xy plane pixel size in y, x order."""
|
|
103
|
+
return self.y, self.x
|
|
104
|
+
|
|
105
|
+
@property
|
|
106
|
+
def voxel_volume(self) -> float:
|
|
107
|
+
"""Return the volume of a voxel."""
|
|
108
|
+
return self.y * self.x * self.z
|
|
109
|
+
|
|
110
|
+
@property
|
|
111
|
+
def xy_plane_area(self) -> float:
|
|
112
|
+
"""Return the area of the xy plane."""
|
|
113
|
+
return self.y * self.x
|
|
114
|
+
|
|
115
|
+
@property
|
|
116
|
+
def time_spacing(self) -> float | None:
|
|
117
|
+
"""Return the time spacing."""
|
|
118
|
+
return self.t
|
|
119
|
+
|
|
120
|
+
def distance(self, other: "PixelSize") -> float:
|
|
121
|
+
"""Return the distance between two pixel sizes."""
|
|
122
|
+
return float(np.linalg.norm(np.array(self.tzyx) - np.array(other.tzyx)))
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Utility to read/write OME-Zarr metadata v0.4."""
|
|
2
|
+
|
|
3
|
+
from ngio.ome_zarr_meta.v04._v04_spec import (
|
|
4
|
+
ngio_to_v04_image_meta,
|
|
5
|
+
ngio_to_v04_label_meta,
|
|
6
|
+
ngio_to_v04_labels_group_meta,
|
|
7
|
+
ngio_to_v04_plate_meta,
|
|
8
|
+
ngio_to_v04_well_meta,
|
|
9
|
+
v04_to_ngio_image_meta,
|
|
10
|
+
v04_to_ngio_label_meta,
|
|
11
|
+
v04_to_ngio_labels_group_meta,
|
|
12
|
+
v04_to_ngio_plate_meta,
|
|
13
|
+
v04_to_ngio_well_meta,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"ngio_to_v04_image_meta",
|
|
18
|
+
"ngio_to_v04_label_meta",
|
|
19
|
+
"ngio_to_v04_labels_group_meta",
|
|
20
|
+
"ngio_to_v04_plate_meta",
|
|
21
|
+
"ngio_to_v04_well_meta",
|
|
22
|
+
"v04_to_ngio_image_meta",
|
|
23
|
+
"v04_to_ngio_label_meta",
|
|
24
|
+
"v04_to_ngio_labels_group_meta",
|
|
25
|
+
"v04_to_ngio_plate_meta",
|
|
26
|
+
"v04_to_ngio_well_meta",
|
|
27
|
+
]
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from typing import Annotated
|
|
2
|
+
|
|
3
|
+
from ome_zarr_models.v04.well import WellAttrs as WellAttrs04
|
|
4
|
+
from ome_zarr_models.v04.well_types import WellImage as WellImage04
|
|
5
|
+
from ome_zarr_models.v04.well_types import WellMeta as WellMeta04
|
|
6
|
+
from pydantic import SkipValidation
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class CustomWellImage(WellImage04):
|
|
10
|
+
path: Annotated[str, SkipValidation]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class CustomWellMeta(WellMeta04):
|
|
14
|
+
images: list[CustomWellImage] # type: ignore[valid-type]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class CustomWellAttrs(WellAttrs04):
|
|
18
|
+
well: CustomWellMeta # type: ignore[valid-type]
|