pymmcore-plus 0.10.2__py3-none-any.whl → 0.11.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pymmcore_plus/__init__.py +4 -1
- pymmcore_plus/_build.py +2 -0
- pymmcore_plus/_cli.py +49 -14
- pymmcore_plus/_util.py +99 -9
- pymmcore_plus/core/__init__.py +2 -0
- pymmcore_plus/core/_constants.py +109 -8
- pymmcore_plus/core/_mmcore_plus.py +69 -49
- pymmcore_plus/mda/__init__.py +2 -2
- pymmcore_plus/mda/_engine.py +151 -102
- pymmcore_plus/mda/_protocol.py +5 -3
- pymmcore_plus/mda/_runner.py +16 -21
- pymmcore_plus/mda/events/_protocol.py +10 -2
- pymmcore_plus/mda/handlers/_5d_writer_base.py +25 -13
- pymmcore_plus/mda/handlers/_img_sequence_writer.py +9 -5
- pymmcore_plus/mda/handlers/_ome_tiff_writer.py +7 -3
- pymmcore_plus/mda/handlers/_ome_zarr_writer.py +9 -4
- pymmcore_plus/mda/handlers/_tensorstore_handler.py +19 -19
- pymmcore_plus/metadata/__init__.py +36 -0
- pymmcore_plus/metadata/functions.py +343 -0
- pymmcore_plus/metadata/schema.py +471 -0
- pymmcore_plus/metadata/serialize.py +116 -0
- pymmcore_plus/model/_config_file.py +2 -4
- pymmcore_plus/model/_config_group.py +29 -3
- pymmcore_plus/model/_device.py +20 -1
- pymmcore_plus/model/_microscope.py +36 -2
- pymmcore_plus/model/_pixel_size_config.py +26 -4
- {pymmcore_plus-0.10.2.dist-info → pymmcore_plus-0.11.1.dist-info}/METADATA +6 -5
- pymmcore_plus-0.11.1.dist-info/RECORD +59 -0
- {pymmcore_plus-0.10.2.dist-info → pymmcore_plus-0.11.1.dist-info}/WHEEL +1 -1
- pymmcore_plus/core/_state.py +0 -244
- pymmcore_plus-0.10.2.dist-info/RECORD +0 -56
- {pymmcore_plus-0.10.2.dist-info → pymmcore_plus-0.11.1.dist-info}/entry_points.txt +0 -0
- {pymmcore_plus-0.10.2.dist-info → pymmcore_plus-0.11.1.dist-info}/licenses/LICENSE +0 -0
pymmcore_plus/mda/_runner.py
CHANGED
|
@@ -210,6 +210,10 @@ class MDARunner:
|
|
|
210
210
|
if error is not None:
|
|
211
211
|
raise error
|
|
212
212
|
|
|
213
|
+
def seconds_elapsed(self) -> float:
|
|
214
|
+
"""Return the number of seconds since the start of the acquisition."""
|
|
215
|
+
return time.perf_counter() - self._t0
|
|
216
|
+
|
|
213
217
|
def _outputs_connected(
|
|
214
218
|
self, output: SingleOutput | Sequence[SingleOutput] | None
|
|
215
219
|
) -> ContextManager:
|
|
@@ -268,6 +272,7 @@ class MDARunner:
|
|
|
268
272
|
teardown_event = getattr(engine, "teardown_event", lambda e: None)
|
|
269
273
|
event_iterator = getattr(engine, "event_iterator", iter)
|
|
270
274
|
_events: Iterator[MDAEvent] = event_iterator(events)
|
|
275
|
+
self._reset_timer()
|
|
271
276
|
|
|
272
277
|
for event in _events:
|
|
273
278
|
# If cancelled break out of the loop
|
|
@@ -279,13 +284,17 @@ class MDARunner:
|
|
|
279
284
|
engine.setup_event(event)
|
|
280
285
|
|
|
281
286
|
try:
|
|
287
|
+
elapsed_ms = self.seconds_elapsed() * 1000
|
|
288
|
+
# this is a bit of a hack to pass the time into the engine
|
|
289
|
+
# it is used for intra-event time calculations
|
|
290
|
+
# we pop it off after the event is executed.
|
|
291
|
+
event.metadata["runner_t0"] = self._t0
|
|
282
292
|
output = engine.exec_event(event) or () # in case output is None
|
|
283
|
-
|
|
284
293
|
for payload in output:
|
|
285
294
|
img, event, meta = payload
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
295
|
+
event.metadata.pop("runner_t0", None)
|
|
296
|
+
if "runner_time_ms" not in meta:
|
|
297
|
+
meta["runner_time_ms"] = elapsed_ms
|
|
289
298
|
with exceptions_logged():
|
|
290
299
|
self._signals.frameReady.emit(img, event, meta)
|
|
291
300
|
finally:
|
|
@@ -308,18 +317,13 @@ class MDARunner:
|
|
|
308
317
|
self._sequence = sequence
|
|
309
318
|
|
|
310
319
|
meta = self._engine.setup_sequence(sequence)
|
|
311
|
-
logger.info("MDA Started: %s", sequence)
|
|
312
|
-
|
|
313
320
|
self._signals.sequenceStarted.emit(sequence, meta or {})
|
|
314
|
-
|
|
321
|
+
logger.info("MDA Started: %s", sequence)
|
|
315
322
|
return self._engine
|
|
316
323
|
|
|
317
324
|
def _reset_timer(self) -> None:
|
|
318
325
|
self._t0 = time.perf_counter() # reference time, in seconds
|
|
319
326
|
|
|
320
|
-
def _time_elapsed(self) -> float:
|
|
321
|
-
return time.perf_counter() - self._t0
|
|
322
|
-
|
|
323
327
|
def _check_canceled(self) -> bool:
|
|
324
328
|
"""Return True if the cancel method has been called and emit relevant signals.
|
|
325
329
|
|
|
@@ -370,7 +374,7 @@ class MDARunner:
|
|
|
370
374
|
go_at = event.min_start_time + self._paused_time
|
|
371
375
|
# We need to enter a loop here checking paused and canceled.
|
|
372
376
|
# otherwise you'll potentially wait a long time to cancel
|
|
373
|
-
remaining_wait_time = go_at - self.
|
|
377
|
+
remaining_wait_time = go_at - self.seconds_elapsed()
|
|
374
378
|
while remaining_wait_time > 0:
|
|
375
379
|
self._signals.awaitingEvent.emit(event, remaining_wait_time)
|
|
376
380
|
while self._paused and not self._canceled:
|
|
@@ -381,7 +385,7 @@ class MDARunner:
|
|
|
381
385
|
if self._canceled:
|
|
382
386
|
break
|
|
383
387
|
time.sleep(min(remaining_wait_time, 0.5))
|
|
384
|
-
remaining_wait_time = go_at - self.
|
|
388
|
+
remaining_wait_time = go_at - self.seconds_elapsed()
|
|
385
389
|
|
|
386
390
|
# check canceled again in case it was canceled
|
|
387
391
|
# during the waiting loop
|
|
@@ -403,12 +407,3 @@ class MDARunner:
|
|
|
403
407
|
|
|
404
408
|
logger.info("MDA Finished: %s", sequence)
|
|
405
409
|
self._signals.sequenceFinished.emit(sequence)
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
def _assert_handler(handler: Any) -> None:
|
|
409
|
-
if (
|
|
410
|
-
not hasattr(handler, "start")
|
|
411
|
-
or not hasattr(handler, "finish")
|
|
412
|
-
or not hasattr(handler, "put")
|
|
413
|
-
):
|
|
414
|
-
raise TypeError("Handler must have start, finish, and put methods.")
|
|
@@ -8,7 +8,11 @@ class PMDASignaler(Protocol):
|
|
|
8
8
|
"""Declares the protocol for all signals that will be emitted from [`pymmcore_plus.mda.MDARunner`][].""" # noqa: E501
|
|
9
9
|
|
|
10
10
|
sequenceStarted: PSignal
|
|
11
|
-
"""Emits `(sequence: MDASequence, metadata: dict)` when an acquisition sequence is started.
|
|
11
|
+
"""Emits `(sequence: MDASequence, metadata: dict)` when an acquisition sequence is started.
|
|
12
|
+
|
|
13
|
+
For the default [`MDAEngine`][pymmcore_plus.mda.MDAEngine], the metadata `dict` will
|
|
14
|
+
be of type [`SummaryMetaV1`][pymmcore_plus.metadata.schema.SummaryMetaV1].
|
|
15
|
+
""" # noqa: E501
|
|
12
16
|
sequencePauseToggled: PSignal
|
|
13
17
|
"""Emits `(paused: bool)` when an acquisition sequence is paused or unpaused."""
|
|
14
18
|
sequenceCanceled: PSignal
|
|
@@ -16,7 +20,11 @@ class PMDASignaler(Protocol):
|
|
|
16
20
|
sequenceFinished: PSignal
|
|
17
21
|
"""Emits `(sequence: MDASequence)` when an acquisition sequence is finished."""
|
|
18
22
|
frameReady: PSignal
|
|
19
|
-
"""Emits `(img: np.ndarray, event: MDAEvent, metadata: dict)` after an image is acquired during an acquisition sequence.
|
|
23
|
+
"""Emits `(img: np.ndarray, event: MDAEvent, metadata: dict)` after an image is acquired during an acquisition sequence.
|
|
24
|
+
|
|
25
|
+
For the default [`MDAEngine`][pymmcore_plus.mda.MDAEngine], the metadata `dict` will
|
|
26
|
+
be of type [`FrameMetaV1`][pymmcore_plus.metadata.schema.FrameMetaV1].
|
|
27
|
+
""" # noqa: E501
|
|
20
28
|
awaitingEvent: PSignal
|
|
21
29
|
"""Emits `(event: MDAEvent, remaining_sec: float)` when the runner is waiting to start an event.
|
|
22
30
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
import
|
|
3
|
+
import warnings
|
|
4
4
|
from abc import abstractmethod
|
|
5
5
|
from collections import defaultdict
|
|
6
6
|
from typing import TYPE_CHECKING, Generic, Mapping, Protocol, TypeVar
|
|
@@ -11,10 +11,13 @@ if TYPE_CHECKING:
|
|
|
11
11
|
import numpy as np
|
|
12
12
|
import useq
|
|
13
13
|
|
|
14
|
+
from pymmcore_plus.metadata import FrameMetaV1, SummaryMetaV1
|
|
15
|
+
|
|
14
16
|
class SupportsSetItem(Protocol):
|
|
15
17
|
def __setitem__(self, key: tuple[int, ...], value: np.ndarray) -> None: ...
|
|
16
18
|
|
|
17
19
|
|
|
20
|
+
_NULL = object()
|
|
18
21
|
POS_PREFIX = "p"
|
|
19
22
|
T = TypeVar("T", bound="SupportsSetItem")
|
|
20
23
|
|
|
@@ -66,7 +69,7 @@ class _5DWriterBase(Generic[T]):
|
|
|
66
69
|
|
|
67
70
|
# storage of individual frame metadata
|
|
68
71
|
# maps position key to list of frame metadata
|
|
69
|
-
self.frame_metadatas: defaultdict[str, list[
|
|
72
|
+
self.frame_metadatas: defaultdict[str, list[FrameMetaV1]] = defaultdict(list)
|
|
70
73
|
|
|
71
74
|
# set during sequenceStarted and cleared during sequenceFinished
|
|
72
75
|
self.current_sequence: useq.MDASequence | None = None
|
|
@@ -95,8 +98,19 @@ class _5DWriterBase(Generic[T]):
|
|
|
95
98
|
"""
|
|
96
99
|
return self._position_sizes
|
|
97
100
|
|
|
98
|
-
def sequenceStarted(
|
|
101
|
+
def sequenceStarted(
|
|
102
|
+
self, seq: useq.MDASequence, meta: SummaryMetaV1 | object = _NULL
|
|
103
|
+
) -> None:
|
|
99
104
|
"""On sequence started, simply store the sequence."""
|
|
105
|
+
# this is here for backwards compatibility with experimental viewer widget.
|
|
106
|
+
if meta is _NULL: # pragma: no cover
|
|
107
|
+
warnings.warn(
|
|
108
|
+
"calling `sequenceStarted` without metadata as the second argument is "
|
|
109
|
+
"deprecated and will raise an exception in the future. Please propagate"
|
|
110
|
+
" metadata from the event callback.",
|
|
111
|
+
UserWarning,
|
|
112
|
+
stacklevel=2,
|
|
113
|
+
)
|
|
100
114
|
self.frame_metadatas.clear()
|
|
101
115
|
self.current_sequence = seq
|
|
102
116
|
if seq:
|
|
@@ -115,7 +129,9 @@ class _5DWriterBase(Generic[T]):
|
|
|
115
129
|
"""
|
|
116
130
|
return f"{POS_PREFIX}{position_index}"
|
|
117
131
|
|
|
118
|
-
def frameReady(
|
|
132
|
+
def frameReady(
|
|
133
|
+
self, frame: np.ndarray, event: useq.MDAEvent, meta: FrameMetaV1
|
|
134
|
+
) -> None:
|
|
119
135
|
"""Write frame to the zarr array for the appropriate position."""
|
|
120
136
|
# get the position key to store the array in the group
|
|
121
137
|
p_index = event.index.get("p", 0)
|
|
@@ -142,8 +158,8 @@ class _5DWriterBase(Generic[T]):
|
|
|
142
158
|
|
|
143
159
|
index = tuple(event.index[k] for k in pos_sizes)
|
|
144
160
|
t = event.index.get("t", 0)
|
|
145
|
-
if t >= len(self._timestamps) and "
|
|
146
|
-
self._timestamps.append(meta["
|
|
161
|
+
if t >= len(self._timestamps) and "runner_time_ms" in meta:
|
|
162
|
+
self._timestamps.append(meta["runner_time_ms"])
|
|
147
163
|
self.write_frame(ary, index, frame)
|
|
148
164
|
self.store_frame_metadata(key, event, meta)
|
|
149
165
|
|
|
@@ -189,7 +205,9 @@ class _5DWriterBase(Generic[T]):
|
|
|
189
205
|
# WRITE DATA TO DISK
|
|
190
206
|
ary[index] = frame
|
|
191
207
|
|
|
192
|
-
def store_frame_metadata(
|
|
208
|
+
def store_frame_metadata(
|
|
209
|
+
self, key: str, event: useq.MDAEvent, meta: FrameMetaV1
|
|
210
|
+
) -> None:
|
|
193
211
|
"""Called during each frameReady event to store metadata for the frame.
|
|
194
212
|
|
|
195
213
|
Subclasses may override this method to customize how metadata is stored for each
|
|
@@ -208,11 +226,6 @@ class _5DWriterBase(Generic[T]):
|
|
|
208
226
|
# needn't be re-implemented in subclasses
|
|
209
227
|
# default implementation is to store the metadata in self._frame_metas
|
|
210
228
|
# use finalize_metadata to write to disk at the end of the sequence.
|
|
211
|
-
if meta:
|
|
212
|
-
# fix serialization MDAEvent
|
|
213
|
-
# XXX: There is already an Event object in meta, this overwrites it.
|
|
214
|
-
event_json = event.json(exclude={"sequence"}, exclude_defaults=True)
|
|
215
|
-
meta["Event"] = json.loads(event_json)
|
|
216
229
|
self.frame_metadatas[key].append(meta or {})
|
|
217
230
|
|
|
218
231
|
def finalize_metadata(self) -> None:
|
|
@@ -266,7 +279,6 @@ class _5DWriterBase(Generic[T]):
|
|
|
266
279
|
raise IndexError(
|
|
267
280
|
f"Position index {p_index} out of range for {len(self.position_sizes)}"
|
|
268
281
|
) from e
|
|
269
|
-
|
|
270
282
|
data = self.position_arrays[self.get_position_key(p_index)]
|
|
271
283
|
full = slice(None, None)
|
|
272
284
|
index = tuple(indexers.get(k, full) for k in sizes)
|
|
@@ -7,11 +7,12 @@ provided.
|
|
|
7
7
|
|
|
8
8
|
from __future__ import annotations
|
|
9
9
|
|
|
10
|
-
import json
|
|
11
10
|
from itertools import count
|
|
12
11
|
from pathlib import Path
|
|
13
12
|
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Mapping, Sequence, cast
|
|
14
13
|
|
|
14
|
+
from pymmcore_plus.metadata.serialize import json_dumps
|
|
15
|
+
|
|
15
16
|
from ._util import get_full_sequence_axes
|
|
16
17
|
|
|
17
18
|
if TYPE_CHECKING:
|
|
@@ -176,11 +177,13 @@ class ImageSequenceWriter:
|
|
|
176
177
|
include_frame_count=self._include_frame_count,
|
|
177
178
|
)
|
|
178
179
|
# make directory and write metadata
|
|
179
|
-
self._seq_meta_file.write_text(
|
|
180
|
+
self._seq_meta_file.write_text(
|
|
181
|
+
seq.model_dump_json(exclude_unset=True, indent=2)
|
|
182
|
+
)
|
|
180
183
|
|
|
181
184
|
def sequenceFinished(self, seq: useq.MDASequence) -> None:
|
|
182
185
|
# write final frame metadata to disk
|
|
183
|
-
self._frame_meta_file.
|
|
186
|
+
self._frame_meta_file.write_bytes(json_dumps(self._frame_metadata, indent=2))
|
|
184
187
|
|
|
185
188
|
def frameReady(self, frame: np.ndarray, event: useq.MDAEvent, meta: dict) -> None:
|
|
186
189
|
"""Write a frame to disk."""
|
|
@@ -199,11 +202,12 @@ class ImageSequenceWriter:
|
|
|
199
202
|
self._imwrite(str(self._directory / filename), frame, **self._imwrite_kwargs)
|
|
200
203
|
|
|
201
204
|
# store metadata
|
|
202
|
-
meta["Event"] = json.loads(event.json(exclude={"sequence"}, exclude_unset=True))
|
|
203
205
|
self._frame_metadata[filename] = meta
|
|
204
206
|
# write metadata to disk every 10 frames
|
|
205
207
|
if frame_idx % 10 == 0:
|
|
206
|
-
self._frame_meta_file.
|
|
208
|
+
self._frame_meta_file.write_bytes(
|
|
209
|
+
json_dumps(self._frame_metadata, indent=2)
|
|
210
|
+
)
|
|
207
211
|
|
|
208
212
|
@staticmethod
|
|
209
213
|
def fname_template(
|
|
@@ -41,13 +41,15 @@ from typing import TYPE_CHECKING, Any
|
|
|
41
41
|
|
|
42
42
|
import numpy as np
|
|
43
43
|
|
|
44
|
-
from ._5d_writer_base import _5DWriterBase
|
|
44
|
+
from ._5d_writer_base import _NULL, _5DWriterBase
|
|
45
45
|
|
|
46
46
|
if TYPE_CHECKING:
|
|
47
47
|
from pathlib import Path
|
|
48
48
|
|
|
49
49
|
import useq
|
|
50
50
|
|
|
51
|
+
from pymmcore_plus.metadata import SummaryMetaV1
|
|
52
|
+
|
|
51
53
|
IMAGEJ_AXIS_ORDER = "tzcyxs"
|
|
52
54
|
|
|
53
55
|
|
|
@@ -81,8 +83,10 @@ class OMETiffWriter(_5DWriterBase[np.memmap]):
|
|
|
81
83
|
|
|
82
84
|
super().__init__()
|
|
83
85
|
|
|
84
|
-
def sequenceStarted(
|
|
85
|
-
|
|
86
|
+
def sequenceStarted(
|
|
87
|
+
self, seq: useq.MDASequence, meta: SummaryMetaV1 | object = _NULL
|
|
88
|
+
) -> None:
|
|
89
|
+
super().sequenceStarted(seq, meta)
|
|
86
90
|
# Non-OME (ImageJ) hyperstack axes MUST be in TZCYXS order
|
|
87
91
|
# so we reorder the ordered position_sizes dicts. This will ensure
|
|
88
92
|
# that the array indices created from event.index are in the correct order.
|
|
@@ -10,6 +10,8 @@ from typing import TYPE_CHECKING, Any, Literal, MutableMapping, Protocol
|
|
|
10
10
|
|
|
11
11
|
import numpy as np
|
|
12
12
|
|
|
13
|
+
from pymmcore_plus.metadata.serialize import to_builtins
|
|
14
|
+
|
|
13
15
|
from ._5d_writer_base import _5DWriterBase
|
|
14
16
|
|
|
15
17
|
if TYPE_CHECKING:
|
|
@@ -193,7 +195,7 @@ class OMEZarrWriter(_5DWriterBase["zarr.Array"]):
|
|
|
193
195
|
while self.frame_metadatas:
|
|
194
196
|
key, metas = self.frame_metadatas.popitem()
|
|
195
197
|
if key in self.position_arrays:
|
|
196
|
-
self.position_arrays[key].attrs["frame_meta"] = metas
|
|
198
|
+
self.position_arrays[key].attrs["frame_meta"] = to_builtins(metas)
|
|
197
199
|
|
|
198
200
|
if self._minify_metadata:
|
|
199
201
|
self._minify_zattrs_metadata()
|
|
@@ -209,12 +211,12 @@ class OMEZarrWriter(_5DWriterBase["zarr.Array"]):
|
|
|
209
211
|
return
|
|
210
212
|
|
|
211
213
|
sizes = {**seq.sizes}
|
|
212
|
-
px = 1
|
|
214
|
+
px: float = 1.0
|
|
213
215
|
if self.frame_metadatas:
|
|
214
216
|
key, metas = next(iter(self.frame_metadatas.items()))
|
|
215
217
|
if key in self.position_arrays:
|
|
216
218
|
shape = self.position_arrays[key].shape
|
|
217
|
-
px = metas[-1].get("
|
|
219
|
+
px = metas[-1].get("pixel_size_um", 1)
|
|
218
220
|
with suppress(IndexError):
|
|
219
221
|
sizes.update(y=shape[-2], x=shape[-1])
|
|
220
222
|
|
|
@@ -274,7 +276,10 @@ class OMEZarrWriter(_5DWriterBase["zarr.Array"]):
|
|
|
274
276
|
self._group.attrs["multiscales"] = scales
|
|
275
277
|
ary.attrs["_ARRAY_DIMENSIONS"] = dims
|
|
276
278
|
if seq := self.current_sequence:
|
|
277
|
-
ary.attrs["useq_MDASequence"] =
|
|
279
|
+
ary.attrs["useq_MDASequence"] = to_builtins(
|
|
280
|
+
seq.model_dump(exclude_unset=True)
|
|
281
|
+
)
|
|
282
|
+
|
|
278
283
|
return ary
|
|
279
284
|
|
|
280
285
|
# # the superclass implementation is all we need
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import atexit
|
|
4
|
-
import json
|
|
5
4
|
import os
|
|
6
5
|
import shutil
|
|
7
6
|
import tempfile
|
|
@@ -10,14 +9,19 @@ from itertools import product
|
|
|
10
9
|
from os import PathLike
|
|
11
10
|
from typing import TYPE_CHECKING, Any, cast
|
|
12
11
|
|
|
12
|
+
from pymmcore_plus.metadata.serialize import json_dumps, json_loads
|
|
13
|
+
|
|
13
14
|
from ._util import position_sizes
|
|
14
15
|
|
|
15
16
|
if TYPE_CHECKING:
|
|
16
|
-
from typing import Literal, Mapping,
|
|
17
|
+
from typing import Literal, Mapping, Sequence, TypeAlias
|
|
17
18
|
|
|
18
19
|
import numpy as np
|
|
19
20
|
import tensorstore as ts
|
|
20
21
|
import useq
|
|
22
|
+
from typing_extensions import Self # py311
|
|
23
|
+
|
|
24
|
+
from pymmcore_plus.metadata import FrameMetaV1, SummaryMetaV1
|
|
21
25
|
|
|
22
26
|
TsDriver: TypeAlias = Literal["zarr", "zarr3", "n5", "neuroglancer_precomputed"]
|
|
23
27
|
EventKey: TypeAlias = frozenset[tuple[str, int]]
|
|
@@ -107,12 +111,12 @@ class TensorStoreHandler:
|
|
|
107
111
|
|
|
108
112
|
# storage of individual frame metadata
|
|
109
113
|
# maps position key to list of frame metadata
|
|
110
|
-
self.frame_metadatas: list[tuple[useq.MDAEvent,
|
|
114
|
+
self.frame_metadatas: list[tuple[useq.MDAEvent, FrameMetaV1]] = []
|
|
111
115
|
|
|
112
116
|
self._size_increment = 300
|
|
113
117
|
|
|
114
118
|
self._store: ts.TensorStore | None = None
|
|
115
|
-
self._futures: list[ts.Future] = []
|
|
119
|
+
self._futures: list[ts.Future | ts.WriteFutures] = []
|
|
116
120
|
self._frame_indices: dict[EventKey, int | ts.DimExpression] = {}
|
|
117
121
|
|
|
118
122
|
# "_nd_storage" means we're greedily attempting to store the data in a
|
|
@@ -170,7 +174,7 @@ class TensorStoreHandler:
|
|
|
170
174
|
|
|
171
175
|
return cls(path=path, **kwargs)
|
|
172
176
|
|
|
173
|
-
def sequenceStarted(self, seq: useq.MDASequence) -> None:
|
|
177
|
+
def sequenceStarted(self, seq: useq.MDASequence, meta: SummaryMetaV1) -> None:
|
|
174
178
|
"""On sequence started, simply store the sequence."""
|
|
175
179
|
self._frame_index = 0
|
|
176
180
|
self._store = None
|
|
@@ -192,7 +196,9 @@ class TensorStoreHandler:
|
|
|
192
196
|
if self.frame_metadatas:
|
|
193
197
|
self.finalize_metadata()
|
|
194
198
|
|
|
195
|
-
def frameReady(
|
|
199
|
+
def frameReady(
|
|
200
|
+
self, frame: np.ndarray, event: useq.MDAEvent, meta: FrameMetaV1
|
|
201
|
+
) -> None:
|
|
196
202
|
"""Write frame to the zarr array for the appropriate position."""
|
|
197
203
|
if self._store is None:
|
|
198
204
|
self._store = self.new_store(frame, event.sequence, meta).result()
|
|
@@ -230,7 +236,7 @@ class TensorStoreHandler:
|
|
|
230
236
|
return self._store[ts_index].read().result().squeeze() # type: ignore
|
|
231
237
|
|
|
232
238
|
def new_store(
|
|
233
|
-
self, frame: np.ndarray, seq: useq.MDASequence | None, meta:
|
|
239
|
+
self, frame: np.ndarray, seq: useq.MDASequence | None, meta: FrameMetaV1
|
|
234
240
|
) -> ts.Future[ts.TensorStore]:
|
|
235
241
|
shape, chunks, labels = self.get_shape_chunks_labels(frame.shape, seq)
|
|
236
242
|
self._nd_storage = FRAME_DIM not in labels
|
|
@@ -287,14 +293,7 @@ class TensorStoreHandler:
|
|
|
287
293
|
if not (store := self._store) or not store.kvstore:
|
|
288
294
|
return # pragma: no cover
|
|
289
295
|
|
|
290
|
-
|
|
291
|
-
for event, meta in self.frame_metadatas:
|
|
292
|
-
# FIXME: unnecessary ser/des
|
|
293
|
-
js = event.model_dump_json(exclude={"sequence"}, exclude_defaults=True)
|
|
294
|
-
meta["Event"] = json.loads(js)
|
|
295
|
-
data.append(meta)
|
|
296
|
-
|
|
297
|
-
metadata = {"frame_metadatas": data}
|
|
296
|
+
metadata = {"frame_metadatas": [m[1] for m in self.frame_metadatas]}
|
|
298
297
|
if not self._nd_storage:
|
|
299
298
|
metadata["frame_indices"] = [
|
|
300
299
|
(tuple(dict(k).items()), v) # type: ignore
|
|
@@ -302,11 +301,11 @@ class TensorStoreHandler:
|
|
|
302
301
|
]
|
|
303
302
|
|
|
304
303
|
if self.ts_driver.startswith("zarr"):
|
|
305
|
-
store.kvstore.write(".zattrs",
|
|
304
|
+
store.kvstore.write(".zattrs", json_dumps(metadata).decode("utf-8"))
|
|
306
305
|
elif self.ts_driver == "n5": # pragma: no cover
|
|
307
|
-
attrs =
|
|
306
|
+
attrs = json_loads(store.kvstore.read("attributes.json").result().value)
|
|
308
307
|
attrs.update(metadata)
|
|
309
|
-
store.kvstore.write("attributes.json",
|
|
308
|
+
store.kvstore.write("attributes.json", json_dumps(attrs).decode("utf-8"))
|
|
310
309
|
|
|
311
310
|
def _expand_store(self, store: ts.TensorStore) -> ts.Future[ts.TensorStore]:
|
|
312
311
|
"""Grow the store by `self._size_increment` frames.
|
|
@@ -324,7 +323,8 @@ class TensorStoreHandler:
|
|
|
324
323
|
The return value is safe to use as an index to self._store[...]
|
|
325
324
|
"""
|
|
326
325
|
if self._nd_storage:
|
|
327
|
-
|
|
326
|
+
keys, values = zip(*index.items())
|
|
327
|
+
return self._ts.d[keys][values]
|
|
328
328
|
|
|
329
329
|
if any(isinstance(v, slice) for v in index.values()):
|
|
330
330
|
idx: list | int | ts.DimExpression = self._get_frame_indices(index)
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from .functions import frame_metadata, summary_metadata
|
|
2
|
+
from .schema import (
|
|
3
|
+
ConfigGroup,
|
|
4
|
+
ConfigPreset,
|
|
5
|
+
DeviceInfo,
|
|
6
|
+
FrameMetaV1,
|
|
7
|
+
ImageInfo,
|
|
8
|
+
PixelSizeConfigPreset,
|
|
9
|
+
Position,
|
|
10
|
+
PropertyInfo,
|
|
11
|
+
PropertyValue,
|
|
12
|
+
StagePosition,
|
|
13
|
+
SummaryMetaV1,
|
|
14
|
+
SystemInfo,
|
|
15
|
+
)
|
|
16
|
+
from .serialize import json_dumps, to_builtins
|
|
17
|
+
|
|
18
|
+
__all__ = [
|
|
19
|
+
"ConfigGroup",
|
|
20
|
+
"ConfigPreset",
|
|
21
|
+
"ConfigPreset",
|
|
22
|
+
"DeviceInfo",
|
|
23
|
+
"frame_metadata",
|
|
24
|
+
"FrameMetaV1",
|
|
25
|
+
"ImageInfo",
|
|
26
|
+
"json_dumps",
|
|
27
|
+
"PixelSizeConfigPreset",
|
|
28
|
+
"Position",
|
|
29
|
+
"PropertyInfo",
|
|
30
|
+
"PropertyValue",
|
|
31
|
+
"StagePosition",
|
|
32
|
+
"summary_metadata",
|
|
33
|
+
"SummaryMetaV1",
|
|
34
|
+
"SystemInfo",
|
|
35
|
+
"to_builtins",
|
|
36
|
+
]
|