ngio 0.5.0b6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ngio/__init__.py +69 -0
- ngio/common/__init__.py +28 -0
- ngio/common/_dimensions.py +335 -0
- ngio/common/_masking_roi.py +153 -0
- ngio/common/_pyramid.py +408 -0
- ngio/common/_roi.py +315 -0
- ngio/common/_synt_images_utils.py +101 -0
- ngio/common/_zoom.py +188 -0
- ngio/experimental/__init__.py +5 -0
- ngio/experimental/iterators/__init__.py +15 -0
- ngio/experimental/iterators/_abstract_iterator.py +390 -0
- ngio/experimental/iterators/_feature.py +189 -0
- ngio/experimental/iterators/_image_processing.py +130 -0
- ngio/experimental/iterators/_mappers.py +48 -0
- ngio/experimental/iterators/_rois_utils.py +126 -0
- ngio/experimental/iterators/_segmentation.py +235 -0
- ngio/hcs/__init__.py +19 -0
- ngio/hcs/_plate.py +1354 -0
- ngio/images/__init__.py +44 -0
- ngio/images/_abstract_image.py +967 -0
- ngio/images/_create_synt_container.py +132 -0
- ngio/images/_create_utils.py +423 -0
- ngio/images/_image.py +926 -0
- ngio/images/_label.py +411 -0
- ngio/images/_masked_image.py +531 -0
- ngio/images/_ome_zarr_container.py +1237 -0
- ngio/images/_table_ops.py +471 -0
- ngio/io_pipes/__init__.py +75 -0
- ngio/io_pipes/_io_pipes.py +361 -0
- ngio/io_pipes/_io_pipes_masked.py +488 -0
- ngio/io_pipes/_io_pipes_roi.py +146 -0
- ngio/io_pipes/_io_pipes_types.py +56 -0
- ngio/io_pipes/_match_shape.py +377 -0
- ngio/io_pipes/_ops_axes.py +344 -0
- ngio/io_pipes/_ops_slices.py +411 -0
- ngio/io_pipes/_ops_slices_utils.py +199 -0
- ngio/io_pipes/_ops_transforms.py +104 -0
- ngio/io_pipes/_zoom_transform.py +180 -0
- ngio/ome_zarr_meta/__init__.py +65 -0
- ngio/ome_zarr_meta/_meta_handlers.py +536 -0
- ngio/ome_zarr_meta/ngio_specs/__init__.py +77 -0
- ngio/ome_zarr_meta/ngio_specs/_axes.py +515 -0
- ngio/ome_zarr_meta/ngio_specs/_channels.py +462 -0
- ngio/ome_zarr_meta/ngio_specs/_dataset.py +89 -0
- ngio/ome_zarr_meta/ngio_specs/_ngio_hcs.py +539 -0
- ngio/ome_zarr_meta/ngio_specs/_ngio_image.py +438 -0
- ngio/ome_zarr_meta/ngio_specs/_pixel_size.py +122 -0
- ngio/ome_zarr_meta/v04/__init__.py +27 -0
- ngio/ome_zarr_meta/v04/_custom_models.py +18 -0
- ngio/ome_zarr_meta/v04/_v04_spec.py +473 -0
- ngio/ome_zarr_meta/v05/__init__.py +27 -0
- ngio/ome_zarr_meta/v05/_custom_models.py +18 -0
- ngio/ome_zarr_meta/v05/_v05_spec.py +511 -0
- ngio/resources/20200812-CardiomyocyteDifferentiation14-Cycle1_B03/mask.png +0 -0
- ngio/resources/20200812-CardiomyocyteDifferentiation14-Cycle1_B03/nuclei.png +0 -0
- ngio/resources/20200812-CardiomyocyteDifferentiation14-Cycle1_B03/raw.jpg +0 -0
- ngio/resources/__init__.py +55 -0
- ngio/resources/resource_model.py +36 -0
- ngio/tables/__init__.py +43 -0
- ngio/tables/_abstract_table.py +270 -0
- ngio/tables/_tables_container.py +449 -0
- ngio/tables/backends/__init__.py +57 -0
- ngio/tables/backends/_abstract_backend.py +240 -0
- ngio/tables/backends/_anndata.py +139 -0
- ngio/tables/backends/_anndata_utils.py +90 -0
- ngio/tables/backends/_csv.py +19 -0
- ngio/tables/backends/_json.py +92 -0
- ngio/tables/backends/_parquet.py +19 -0
- ngio/tables/backends/_py_arrow_backends.py +222 -0
- ngio/tables/backends/_table_backends.py +226 -0
- ngio/tables/backends/_utils.py +608 -0
- ngio/tables/v1/__init__.py +23 -0
- ngio/tables/v1/_condition_table.py +71 -0
- ngio/tables/v1/_feature_table.py +125 -0
- ngio/tables/v1/_generic_table.py +49 -0
- ngio/tables/v1/_roi_table.py +575 -0
- ngio/transforms/__init__.py +5 -0
- ngio/transforms/_zoom.py +19 -0
- ngio/utils/__init__.py +45 -0
- ngio/utils/_cache.py +48 -0
- ngio/utils/_datasets.py +165 -0
- ngio/utils/_errors.py +37 -0
- ngio/utils/_fractal_fsspec_store.py +42 -0
- ngio/utils/_zarr_utils.py +534 -0
- ngio-0.5.0b6.dist-info/METADATA +148 -0
- ngio-0.5.0b6.dist-info/RECORD +88 -0
- ngio-0.5.0b6.dist-info/WHEEL +4 -0
- ngio-0.5.0b6.dist-info/licenses/LICENSE +28 -0
ngio/common/_roi.py
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
1
|
+
"""Region of interest (ROI) metadata.
|
|
2
|
+
|
|
3
|
+
These are the interfaces between the ROI tables / masking ROI tables and
|
|
4
|
+
the ImageLikeHandler.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from collections.abc import Callable, Mapping
|
|
8
|
+
from typing import Literal, Self
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
|
11
|
+
|
|
12
|
+
from ngio.ome_zarr_meta import PixelSize
|
|
13
|
+
from ngio.utils import NgioValueError
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def world_to_pixel(value: float, pixel_size: float, eps: float = 1e-6) -> float:
|
|
17
|
+
raster_value = value / pixel_size
|
|
18
|
+
|
|
19
|
+
# If the value is very close to an integer, round it
|
|
20
|
+
# This ensures that we don't have floating point precision issues
|
|
21
|
+
# When loading ROIs that were originally defined in pixel coordinates
|
|
22
|
+
_rounded = round(raster_value)
|
|
23
|
+
if abs(_rounded - raster_value) < eps:
|
|
24
|
+
return _rounded
|
|
25
|
+
return raster_value
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def pixel_to_world(value: float, pixel_size: float) -> float:
|
|
29
|
+
return value * pixel_size
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _join_roi_names(name1: str | None, name2: str | None) -> str | None:
|
|
33
|
+
if name1 is not None and name2 is not None:
|
|
34
|
+
if name1 == name2:
|
|
35
|
+
return name1
|
|
36
|
+
return f"{name1}:{name2}"
|
|
37
|
+
return name1 or name2
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _join_roi_labels(label1: int | None, label2: int | None) -> int | None:
|
|
41
|
+
if label1 is not None and label2 is not None:
|
|
42
|
+
if label1 == label2:
|
|
43
|
+
return label1
|
|
44
|
+
raise NgioValueError("Cannot join ROIs with different labels")
|
|
45
|
+
return label1 or label2
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class RoiSlice(BaseModel):
|
|
49
|
+
axis_name: str
|
|
50
|
+
start: float | None = Field(default=None)
|
|
51
|
+
length: float | None = Field(default=None, ge=0)
|
|
52
|
+
|
|
53
|
+
model_config = ConfigDict(extra="forbid")
|
|
54
|
+
|
|
55
|
+
@classmethod
|
|
56
|
+
def _from_slice(
|
|
57
|
+
cls,
|
|
58
|
+
axis_name: str,
|
|
59
|
+
selection: slice,
|
|
60
|
+
) -> "RoiSlice":
|
|
61
|
+
start = selection.start
|
|
62
|
+
length = (
|
|
63
|
+
None
|
|
64
|
+
if selection.stop is None or selection.start is None
|
|
65
|
+
else selection.stop - selection.start
|
|
66
|
+
)
|
|
67
|
+
return cls(axis_name=axis_name, start=start, length=length)
|
|
68
|
+
|
|
69
|
+
@classmethod
|
|
70
|
+
def from_value(
|
|
71
|
+
cls,
|
|
72
|
+
axis_name: str,
|
|
73
|
+
value: float | tuple[float | None, float | None] | slice,
|
|
74
|
+
) -> "RoiSlice":
|
|
75
|
+
if isinstance(value, slice):
|
|
76
|
+
return cls._from_slice(axis_name=axis_name, selection=value)
|
|
77
|
+
elif isinstance(value, tuple):
|
|
78
|
+
return cls(axis_name=axis_name, start=value[0], length=value[1])
|
|
79
|
+
elif isinstance(value, int | float):
|
|
80
|
+
return cls(axis_name=axis_name, start=value, length=1)
|
|
81
|
+
else:
|
|
82
|
+
raise TypeError(f"Unsupported type for slice value: {type(value)}")
|
|
83
|
+
|
|
84
|
+
def __repr__(self) -> str:
|
|
85
|
+
return f"{self.axis_name}: {self.start}->{self.end}"
|
|
86
|
+
|
|
87
|
+
@property
|
|
88
|
+
def end(self) -> float | None:
|
|
89
|
+
if self.start is None or self.length is None:
|
|
90
|
+
return None
|
|
91
|
+
return self.start + self.length
|
|
92
|
+
|
|
93
|
+
def to_slice(self) -> slice:
|
|
94
|
+
return slice(self.start, self.end)
|
|
95
|
+
|
|
96
|
+
def _is_compatible(self, other: "RoiSlice", msg: str) -> None:
|
|
97
|
+
if self.axis_name != other.axis_name:
|
|
98
|
+
raise NgioValueError(
|
|
99
|
+
f"{msg}: Cannot operate on RoiSlices with different axis names"
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
def union(self, other: "RoiSlice") -> "RoiSlice":
|
|
103
|
+
self._is_compatible(other, "RoiSlice union failed")
|
|
104
|
+
start = min(self.start or 0, other.start or 0)
|
|
105
|
+
end = max(self.end or float("inf"), other.end or float("inf"))
|
|
106
|
+
length = end - start if end > start else 0
|
|
107
|
+
if length == float("inf"):
|
|
108
|
+
length = None
|
|
109
|
+
return RoiSlice(axis_name=self.axis_name, start=start, length=length)
|
|
110
|
+
|
|
111
|
+
def intersection(self, other: "RoiSlice") -> "RoiSlice | None":
|
|
112
|
+
self._is_compatible(other, "RoiSlice intersection failed")
|
|
113
|
+
start = max(self.start or 0, other.start or 0)
|
|
114
|
+
end = min(self.end or float("inf"), other.end or float("inf"))
|
|
115
|
+
if end <= start:
|
|
116
|
+
# No intersection
|
|
117
|
+
return None
|
|
118
|
+
length = end - start
|
|
119
|
+
if length == float("inf"):
|
|
120
|
+
length = None
|
|
121
|
+
return RoiSlice(axis_name=self.axis_name, start=start, length=length)
|
|
122
|
+
|
|
123
|
+
def to_world(self, pixel_size: float) -> "RoiSlice":
|
|
124
|
+
start = (
|
|
125
|
+
pixel_to_world(self.start, pixel_size) if self.start is not None else None
|
|
126
|
+
)
|
|
127
|
+
length = (
|
|
128
|
+
pixel_to_world(self.length, pixel_size) if self.length is not None else None
|
|
129
|
+
)
|
|
130
|
+
return RoiSlice(axis_name=self.axis_name, start=start, length=length)
|
|
131
|
+
|
|
132
|
+
def to_pixel(self, pixel_size: float) -> "RoiSlice":
|
|
133
|
+
start = (
|
|
134
|
+
world_to_pixel(self.start, pixel_size) if self.start is not None else None
|
|
135
|
+
)
|
|
136
|
+
length = (
|
|
137
|
+
world_to_pixel(self.length, pixel_size) if self.length is not None else None
|
|
138
|
+
)
|
|
139
|
+
return RoiSlice(axis_name=self.axis_name, start=start, length=length)
|
|
140
|
+
|
|
141
|
+
def zoom(self, zoom_factor: float = 1.0) -> "RoiSlice":
|
|
142
|
+
if zoom_factor <= 0:
|
|
143
|
+
raise NgioValueError("Zoom factor must be greater than 0")
|
|
144
|
+
zoom_factor -= 1.0
|
|
145
|
+
if self.length is None:
|
|
146
|
+
return self
|
|
147
|
+
|
|
148
|
+
diff_length = self.length * zoom_factor
|
|
149
|
+
length = self.length + diff_length
|
|
150
|
+
start = max((self.start or 0) - (diff_length / 2), 0)
|
|
151
|
+
return RoiSlice(axis_name=self.axis_name, start=start, length=length)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
class Roi(BaseModel):
|
|
155
|
+
name: str | None
|
|
156
|
+
slices: list[RoiSlice] = Field(min_length=2)
|
|
157
|
+
label: int | None = Field(default=None, ge=0)
|
|
158
|
+
space: Literal["world", "pixel"] = "world"
|
|
159
|
+
|
|
160
|
+
model_config = ConfigDict(extra="allow")
|
|
161
|
+
|
|
162
|
+
@field_validator("slices")
|
|
163
|
+
@classmethod
|
|
164
|
+
def validate_no_duplicate_axes(cls, v: list[RoiSlice]) -> list[RoiSlice]:
|
|
165
|
+
axis_names = [s.axis_name for s in v]
|
|
166
|
+
if len(axis_names) != len(set(axis_names)):
|
|
167
|
+
raise NgioValueError("Roi slices must have unique axis names")
|
|
168
|
+
return v
|
|
169
|
+
|
|
170
|
+
def _nice_repr__(self) -> str:
|
|
171
|
+
slices_repr = ", ".join(repr(s) for s in self.slices)
|
|
172
|
+
if self.label is None:
|
|
173
|
+
label_str = ""
|
|
174
|
+
else:
|
|
175
|
+
label_str = f", label={self.label}"
|
|
176
|
+
|
|
177
|
+
if self.name is None:
|
|
178
|
+
name_str = ""
|
|
179
|
+
else:
|
|
180
|
+
name_str = f"name={self.name}, "
|
|
181
|
+
return f"Roi({name_str}{slices_repr}{label_str}, space={self.space})"
|
|
182
|
+
|
|
183
|
+
@classmethod
|
|
184
|
+
def from_values(
|
|
185
|
+
cls,
|
|
186
|
+
slices: Mapping[str, float | tuple[float | None, float | None] | slice],
|
|
187
|
+
name: str | None,
|
|
188
|
+
label: int | None = None,
|
|
189
|
+
space: Literal["world", "pixel"] = "world",
|
|
190
|
+
**kwargs,
|
|
191
|
+
) -> Self:
|
|
192
|
+
_slices = []
|
|
193
|
+
for axis, _slice in slices.items():
|
|
194
|
+
_slices.append(RoiSlice.from_value(axis_name=axis, value=_slice))
|
|
195
|
+
return cls.model_construct(
|
|
196
|
+
name=name, slices=_slices, label=label, space=space, **kwargs
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
def get(self, axis_name: str) -> RoiSlice | None:
|
|
200
|
+
for roi_slice in self.slices:
|
|
201
|
+
if roi_slice.axis_name == axis_name:
|
|
202
|
+
return roi_slice
|
|
203
|
+
return None
|
|
204
|
+
|
|
205
|
+
def get_name(self) -> str:
|
|
206
|
+
if self.name is not None:
|
|
207
|
+
return self.name
|
|
208
|
+
if self.label is not None:
|
|
209
|
+
return str(self.label)
|
|
210
|
+
return self._nice_repr__()
|
|
211
|
+
|
|
212
|
+
@staticmethod
|
|
213
|
+
def _apply_sym_ops(
|
|
214
|
+
self_slices: list[RoiSlice],
|
|
215
|
+
other_slices: list[RoiSlice],
|
|
216
|
+
op: Callable[[RoiSlice, RoiSlice], RoiSlice | None],
|
|
217
|
+
) -> list[RoiSlice] | None:
|
|
218
|
+
self_axis_dict = {s.axis_name: s for s in self_slices}
|
|
219
|
+
other_axis_dict = {s.axis_name: s for s in other_slices}
|
|
220
|
+
common_axis_names = self_axis_dict.keys() | other_axis_dict.keys()
|
|
221
|
+
new_slices = []
|
|
222
|
+
for axis_name in common_axis_names:
|
|
223
|
+
slice_a = self_axis_dict.get(axis_name)
|
|
224
|
+
slice_b = other_axis_dict.get(axis_name)
|
|
225
|
+
if slice_a is not None and slice_b is not None:
|
|
226
|
+
result = op(slice_a, slice_b)
|
|
227
|
+
if result is None:
|
|
228
|
+
return None
|
|
229
|
+
new_slices.append(result)
|
|
230
|
+
elif slice_a is not None:
|
|
231
|
+
new_slices.append(slice_a)
|
|
232
|
+
elif slice_b is not None:
|
|
233
|
+
new_slices.append(slice_b)
|
|
234
|
+
return new_slices
|
|
235
|
+
|
|
236
|
+
def intersection(self, other: Self) -> Self | None:
|
|
237
|
+
if self.space != other.space:
|
|
238
|
+
raise NgioValueError(
|
|
239
|
+
"Roi intersection failed: One ROI is in pixel space and the "
|
|
240
|
+
"other in world space"
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
out_slices = self._apply_sym_ops(
|
|
244
|
+
self.slices, other.slices, op=lambda a, b: a.intersection(b)
|
|
245
|
+
)
|
|
246
|
+
if out_slices is None:
|
|
247
|
+
return None
|
|
248
|
+
|
|
249
|
+
name = _join_roi_names(self.name, other.name)
|
|
250
|
+
label = _join_roi_labels(self.label, other.label)
|
|
251
|
+
return self.model_copy(
|
|
252
|
+
update={"name": name, "slices": out_slices, "label": label}
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
def union(self, other: Self) -> Self:
|
|
256
|
+
if self.space != other.space:
|
|
257
|
+
raise NgioValueError(
|
|
258
|
+
"Roi union failed: One ROI is in pixel space and the "
|
|
259
|
+
"other in world space"
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
out_slices = self._apply_sym_ops(
|
|
263
|
+
self.slices, other.slices, op=lambda a, b: a.union(b)
|
|
264
|
+
)
|
|
265
|
+
if out_slices is None:
|
|
266
|
+
raise NgioValueError("Roi union failed: could not compute union")
|
|
267
|
+
|
|
268
|
+
name = _join_roi_names(self.name, other.name)
|
|
269
|
+
label = _join_roi_labels(self.label, other.label)
|
|
270
|
+
return self.model_copy(
|
|
271
|
+
update={"name": name, "slices": out_slices, "label": label}
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
def zoom(
|
|
275
|
+
self, zoom_factor: float = 1.0, axes: tuple[str, ...] = ("x", "y")
|
|
276
|
+
) -> Self:
|
|
277
|
+
new_slices = []
|
|
278
|
+
for roi_slice in self.slices:
|
|
279
|
+
if roi_slice.axis_name in axes:
|
|
280
|
+
new_slices.append(roi_slice.zoom(zoom_factor=zoom_factor))
|
|
281
|
+
else:
|
|
282
|
+
new_slices.append(roi_slice)
|
|
283
|
+
return self.model_copy(update={"slices": new_slices})
|
|
284
|
+
|
|
285
|
+
def to_world(self, pixel_size: PixelSize | None = None) -> Self:
|
|
286
|
+
if self.space == "world":
|
|
287
|
+
return self.model_copy()
|
|
288
|
+
if pixel_size is None:
|
|
289
|
+
raise NgioValueError(
|
|
290
|
+
"Pixel sizes must be provided to convert ROI from pixel to world"
|
|
291
|
+
)
|
|
292
|
+
new_slices = []
|
|
293
|
+
for roi_slice in self.slices:
|
|
294
|
+
pixel_size_ = pixel_size.get(roi_slice.axis_name, default=1.0)
|
|
295
|
+
new_slices.append(roi_slice.to_world(pixel_size=pixel_size_))
|
|
296
|
+
return self.model_copy(update={"slices": new_slices, "space": "world"})
|
|
297
|
+
|
|
298
|
+
def to_pixel(self, pixel_size: PixelSize | None = None) -> Self:
|
|
299
|
+
if self.space == "pixel":
|
|
300
|
+
return self.model_copy()
|
|
301
|
+
|
|
302
|
+
if pixel_size is None:
|
|
303
|
+
raise NgioValueError(
|
|
304
|
+
"Pixel sizes must be provided to convert ROI from world to pixel"
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
new_slices = []
|
|
308
|
+
for roi_slice in self.slices:
|
|
309
|
+
pixel_size_ = pixel_size.get(roi_slice.axis_name, default=1.0)
|
|
310
|
+
new_slices.append(roi_slice.to_pixel(pixel_size=pixel_size_))
|
|
311
|
+
return self.model_copy(update={"slices": new_slices, "space": "pixel"})
|
|
312
|
+
|
|
313
|
+
def to_slicing_dict(self, pixel_size: PixelSize | None = None) -> dict[str, slice]:
|
|
314
|
+
roi = self.to_pixel(pixel_size=pixel_size)
|
|
315
|
+
return {roi_slice.axis_name: roi_slice.to_slice() for roi_slice in roi.slices}
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
from math import ceil
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _center_crop(arr: np.ndarray, target: int, axis: int) -> np.ndarray:
|
|
7
|
+
# Center-crop the array `arr` along dimension `axis` to size `target`.
|
|
8
|
+
# This assumes target < arr.shape[axis].
|
|
9
|
+
n = arr.shape[axis]
|
|
10
|
+
start = (n - target) // 2
|
|
11
|
+
end = start + target
|
|
12
|
+
slc = [slice(None)] * arr.ndim
|
|
13
|
+
slc[axis] = slice(start, end)
|
|
14
|
+
return arr[tuple(slc)]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _tile_to(
|
|
18
|
+
arr: np.ndarray, target: int, axis: int, label_mode: bool = False
|
|
19
|
+
) -> np.ndarray:
|
|
20
|
+
# Tile the array `arr` along dimension `axis` to size `target`.
|
|
21
|
+
# This assumes target > arr.shape[axis].
|
|
22
|
+
n = arr.shape[axis]
|
|
23
|
+
reps = ceil(target / n)
|
|
24
|
+
|
|
25
|
+
tiles = []
|
|
26
|
+
flip = False
|
|
27
|
+
max_label = 0
|
|
28
|
+
for _ in range(reps):
|
|
29
|
+
if flip:
|
|
30
|
+
t_arr = np.flip(arr, axis=axis)
|
|
31
|
+
else:
|
|
32
|
+
t_arr = 1 * arr
|
|
33
|
+
if label_mode:
|
|
34
|
+
# Remove duplicate labels
|
|
35
|
+
t_arr = np.where(t_arr > 0, t_arr + max_label, 0)
|
|
36
|
+
max_label = t_arr.max()
|
|
37
|
+
tiles.append(t_arr)
|
|
38
|
+
flip = not flip
|
|
39
|
+
|
|
40
|
+
tiled = np.concatenate(tiles, axis=axis)
|
|
41
|
+
|
|
42
|
+
slc = [slice(None)] * arr.ndim
|
|
43
|
+
slc[axis] = slice(0, target)
|
|
44
|
+
return tiled[tuple(slc)]
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _fit_to_shape_2d(
|
|
48
|
+
src: np.ndarray, out_shape: tuple[int, int], label_mode: bool = False
|
|
49
|
+
) -> np.ndarray:
|
|
50
|
+
"""Fit a 2D array to a target shape by center-cropping or tiling as necessary."""
|
|
51
|
+
out_r, out_c = out_shape
|
|
52
|
+
arr = src
|
|
53
|
+
if out_r < arr.shape[0]:
|
|
54
|
+
arr = _center_crop(arr, out_r, axis=0)
|
|
55
|
+
else:
|
|
56
|
+
arr = _tile_to(arr, out_r, axis=0, label_mode=label_mode)
|
|
57
|
+
|
|
58
|
+
if out_c < arr.shape[1]:
|
|
59
|
+
arr = _center_crop(arr, out_c, axis=1)
|
|
60
|
+
else:
|
|
61
|
+
arr = _tile_to(arr, out_c, axis=1, label_mode=label_mode)
|
|
62
|
+
return arr
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def fit_to_shape(
|
|
66
|
+
arr: np.ndarray, out_shape: tuple[int, ...], ensure_unique_info: bool = False
|
|
67
|
+
) -> np.ndarray:
|
|
68
|
+
"""Fit a 2D array to a target shape.
|
|
69
|
+
|
|
70
|
+
The x,y dimensions of `arr` are fitted to the last two dimensions of
|
|
71
|
+
`out_shape` by center-cropping or tiling as necessary.
|
|
72
|
+
The other dimensions are broadcasted as necessary.
|
|
73
|
+
|
|
74
|
+
WARNING: This does not zoom the image, it only crops or tiles it.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
arr (np.ndarray): The input 2D array.
|
|
78
|
+
out_shape (tuple[int, ...]): The target shape. Must have at least 2
|
|
79
|
+
and at most 5 dimensions.
|
|
80
|
+
ensure_unique_info (bool, optional): If True, assumes that `arr` is a label
|
|
81
|
+
image and ensures that labels do not overlap when tiling. Defaults to False.
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
np.ndarray: The fitted array with shape `out_shape`.
|
|
85
|
+
"""
|
|
86
|
+
if len(out_shape) < 2:
|
|
87
|
+
raise ValueError("`out_shape` must contain at least 2 dimensions.")
|
|
88
|
+
|
|
89
|
+
if len(out_shape) > 5:
|
|
90
|
+
raise ValueError("`out_shape` must contain at most 5 dimensions.")
|
|
91
|
+
|
|
92
|
+
if any(d <= 0 for d in out_shape):
|
|
93
|
+
raise ValueError("`out_shape` must contain positive integers.")
|
|
94
|
+
|
|
95
|
+
if arr.ndim != 2:
|
|
96
|
+
raise ValueError("`arr` must be a 2D array.")
|
|
97
|
+
|
|
98
|
+
*_, sy, sx = out_shape
|
|
99
|
+
arr = _fit_to_shape_2d(arr, out_shape=(sy, sx), label_mode=ensure_unique_info)
|
|
100
|
+
arr = np.broadcast_to(arr, out_shape)
|
|
101
|
+
return arr
|
ngio/common/_zoom.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
from functools import partial
|
|
2
|
+
from typing import Literal
|
|
3
|
+
|
|
4
|
+
import dask.array as da
|
|
5
|
+
import numpy as np
|
|
6
|
+
from scipy.ndimage import zoom as scipy_zoom
|
|
7
|
+
|
|
8
|
+
from ngio.utils import NgioValueError
|
|
9
|
+
|
|
10
|
+
InterpolationOrder = Literal["nearest", "linear", "cubic"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def order_to_int(order: InterpolationOrder | Literal[0, 1, 2]) -> Literal[0, 1, 2]:
|
|
14
|
+
if order == "nearest" or order == 0:
|
|
15
|
+
return 0
|
|
16
|
+
elif order == "linear" or order == 1:
|
|
17
|
+
return 1
|
|
18
|
+
elif order == "cubic" or order == 2:
|
|
19
|
+
return 2
|
|
20
|
+
else:
|
|
21
|
+
raise NgioValueError(f"Invalid order: {order}")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _stacked_zoom(x, zoom_y, zoom_x, order=1, mode="grid-constant", grid_mode=True):
|
|
25
|
+
*rest, yshape, xshape = x.shape
|
|
26
|
+
x = x.reshape(-1, yshape, xshape)
|
|
27
|
+
scale_xy = (zoom_y, zoom_x)
|
|
28
|
+
_x_out = [
|
|
29
|
+
scipy_zoom(x[i], scale_xy, order=order, mode=mode, grid_mode=grid_mode)
|
|
30
|
+
for i in range(x.shape[0])
|
|
31
|
+
]
|
|
32
|
+
x_out = np.stack(_x_out) # type: ignore (scipy_zoom returns np.ndarray, but type is not inferred correctly)
|
|
33
|
+
return x_out.reshape(*rest, *x_out.shape[1:])
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def fast_zoom(x, zoom, order=1, mode="grid-constant", grid_mode=True, auto_stack=True):
|
|
37
|
+
"""Fast zoom function.
|
|
38
|
+
|
|
39
|
+
Scipy zoom function that can handle singleton dimensions
|
|
40
|
+
but the performance degrades with the number of dimensions.
|
|
41
|
+
|
|
42
|
+
This function has two small optimizations:
|
|
43
|
+
- it removes singleton dimensions before calling zoom
|
|
44
|
+
- if it detects that the zoom is only on the last two dimensions
|
|
45
|
+
it stacks the first dimensions to call zoom only on the last two.
|
|
46
|
+
"""
|
|
47
|
+
mask = np.isclose(x.shape, 1)
|
|
48
|
+
# Always keep the last two dimensions
|
|
49
|
+
# To avoid issues with singleton x or y dimensions
|
|
50
|
+
mask[-1] = False
|
|
51
|
+
mask[-2] = False
|
|
52
|
+
zoom = np.array(zoom)
|
|
53
|
+
singletons = tuple(np.where(mask)[0])
|
|
54
|
+
xs = np.squeeze(x, axis=singletons)
|
|
55
|
+
new_zoom = zoom[~mask]
|
|
56
|
+
|
|
57
|
+
*zoom_rest, zoom_y, zoom_x = new_zoom
|
|
58
|
+
if auto_stack and np.allclose(zoom_rest, 1):
|
|
59
|
+
xs = _stacked_zoom(
|
|
60
|
+
xs, zoom_y, zoom_x, order=order, mode=mode, grid_mode=grid_mode
|
|
61
|
+
)
|
|
62
|
+
else:
|
|
63
|
+
xs = scipy_zoom(xs, new_zoom, order=order, mode=mode, grid_mode=grid_mode)
|
|
64
|
+
x = np.expand_dims(xs, axis=singletons) # type: ignore (scipy_zoom returns np.ndarray, but type is not inferred correctly)
|
|
65
|
+
return x
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def _zoom_inputs_check(
|
|
69
|
+
source_array: np.ndarray | da.Array,
|
|
70
|
+
scale: tuple[int | float, ...] | None = None,
|
|
71
|
+
target_shape: tuple[int, ...] | None = None,
|
|
72
|
+
) -> tuple[np.ndarray, tuple[int, ...]]:
|
|
73
|
+
if scale is None and target_shape is None:
|
|
74
|
+
raise NgioValueError("Either scale or target_shape must be provided")
|
|
75
|
+
|
|
76
|
+
if scale is not None and target_shape is not None:
|
|
77
|
+
raise NgioValueError("Only one of scale or target_shape must be provided")
|
|
78
|
+
|
|
79
|
+
if scale is None:
|
|
80
|
+
assert target_shape is not None, "Target shape must be provided"
|
|
81
|
+
if len(target_shape) != source_array.ndim:
|
|
82
|
+
raise NgioValueError(
|
|
83
|
+
"Target shape must have the "
|
|
84
|
+
"same number of dimensions as "
|
|
85
|
+
"the source array"
|
|
86
|
+
)
|
|
87
|
+
_scale = np.array(target_shape) / np.array(source_array.shape)
|
|
88
|
+
_target_shape = target_shape
|
|
89
|
+
else:
|
|
90
|
+
_scale = np.array(scale)
|
|
91
|
+
_target_shape = tuple(map(int, np.round(np.array(source_array.shape) * scale)))
|
|
92
|
+
|
|
93
|
+
if len(_scale) != source_array.ndim:
|
|
94
|
+
raise NgioValueError(
|
|
95
|
+
f"Cannot scale array of shape {source_array.shape} with factors {_scale}."
|
|
96
|
+
" Target shape must have the same number of dimensions as the source array."
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
return _scale, _target_shape
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def dask_zoom(
|
|
103
|
+
source_array: da.Array,
|
|
104
|
+
scale: tuple[float | int, ...] | None = None,
|
|
105
|
+
target_shape: tuple[int, ...] | None = None,
|
|
106
|
+
order: InterpolationOrder = "linear",
|
|
107
|
+
) -> da.Array:
|
|
108
|
+
"""Dask implementation of zooming an array.
|
|
109
|
+
|
|
110
|
+
Only one of scale or target_shape must be provided.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
source_array (da.Array): The source array to zoom.
|
|
114
|
+
scale (tuple[int, ...] | None): The scale factor to zoom by.
|
|
115
|
+
target_shape (tuple[int, ...], None): The target shape to zoom to.
|
|
116
|
+
order (Literal["nearest", "linear", "cubic"]): The order of interpolation.
|
|
117
|
+
Defaults to "linear".
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
da.Array: The zoomed array.
|
|
121
|
+
"""
|
|
122
|
+
# This function follow the implementation from:
|
|
123
|
+
# https://github.com/ome/ome-zarr-py/blob/master/ome_zarr/dask_utils.py
|
|
124
|
+
# The module was contributed by Andreas Eisenbarth @aeisenbarth
|
|
125
|
+
# See https://github.com/toloudis/ome-zarr-py/pull/
|
|
126
|
+
_scale, _target_shape = _zoom_inputs_check(
|
|
127
|
+
source_array=source_array, scale=scale, target_shape=target_shape
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Rechunk to better match the scaling operation
|
|
131
|
+
source_chunks = np.array(source_array.chunksize) # type: ignore (da.Array.chunksize is a tuple of ints)
|
|
132
|
+
better_source_chunks = np.maximum(1, np.round(source_chunks * _scale) / _scale)
|
|
133
|
+
better_source_chunks = better_source_chunks.astype(int)
|
|
134
|
+
source_array = source_array.rechunk(better_source_chunks) # type: ignore (better_source_chunks is a valid input for rechunk)
|
|
135
|
+
|
|
136
|
+
# Calculate the block output shape
|
|
137
|
+
block_output_shape = tuple(np.ceil(better_source_chunks * _scale).astype(int))
|
|
138
|
+
|
|
139
|
+
zoom_wrapper = partial(
|
|
140
|
+
fast_zoom,
|
|
141
|
+
zoom=_scale,
|
|
142
|
+
order=order_to_int(order),
|
|
143
|
+
mode="grid-constant",
|
|
144
|
+
grid_mode=True,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
out_array = da.map_blocks(
|
|
148
|
+
zoom_wrapper, source_array, chunks=block_output_shape, dtype=source_array.dtype
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
# Slice and rechunk to target
|
|
152
|
+
slices = tuple(slice(0, ts, 1) for ts in _target_shape)
|
|
153
|
+
out_array = out_array[slices]
|
|
154
|
+
return out_array
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def numpy_zoom(
|
|
158
|
+
source_array: np.ndarray,
|
|
159
|
+
scale: tuple[int | float, ...] | None = None,
|
|
160
|
+
target_shape: tuple[int, ...] | None = None,
|
|
161
|
+
order: InterpolationOrder = "linear",
|
|
162
|
+
) -> np.ndarray:
|
|
163
|
+
"""Numpy implementation of zooming an array.
|
|
164
|
+
|
|
165
|
+
Only one of scale or target_shape must be provided.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
source_array (np.ndarray): The source array to zoom.
|
|
169
|
+
scale (tuple[int, ...] | None): The scale factor to zoom by.
|
|
170
|
+
target_shape (tuple[int, ...], None): The target shape to zoom to.
|
|
171
|
+
order (Literal[0, 1, 2]): The order of interpolation. Defaults to 1.
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
np.ndarray: The zoomed array
|
|
175
|
+
"""
|
|
176
|
+
_scale, _ = _zoom_inputs_check(
|
|
177
|
+
source_array=source_array, scale=scale, target_shape=target_shape
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
out_array = fast_zoom(
|
|
181
|
+
source_array,
|
|
182
|
+
zoom=_scale,
|
|
183
|
+
order=order_to_int(order),
|
|
184
|
+
mode="grid-constant",
|
|
185
|
+
grid_mode=True,
|
|
186
|
+
)
|
|
187
|
+
assert isinstance(out_array, np.ndarray)
|
|
188
|
+
return out_array
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""This file is part of NGIO, a library for working with OME-Zarr data."""
|
|
2
|
+
|
|
3
|
+
from ngio.experimental.iterators._feature import FeatureExtractorIterator
|
|
4
|
+
from ngio.experimental.iterators._image_processing import ImageProcessingIterator
|
|
5
|
+
from ngio.experimental.iterators._segmentation import (
|
|
6
|
+
MaskedSegmentationIterator,
|
|
7
|
+
SegmentationIterator,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"FeatureExtractorIterator",
|
|
12
|
+
"ImageProcessingIterator",
|
|
13
|
+
"MaskedSegmentationIterator",
|
|
14
|
+
"SegmentationIterator",
|
|
15
|
+
]
|