pymmcore-plus 0.9.5__tar.gz → 0.10.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/PKG-INFO +4 -2
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/README.md +1 -1
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/pyproject.toml +5 -1
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/_engine.py +24 -11
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/_runner.py +20 -19
- pymmcore_plus-0.10.1/src/pymmcore_plus/mda/handlers/__init__.py +11 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/handlers/_ome_zarr_writer.py +1 -1
- pymmcore_plus-0.10.1/src/pymmcore_plus/mda/handlers/_tensorstore_handler.py +369 -0
- pymmcore_plus-0.9.5/tests/io/test_ome_zarr.py → pymmcore_plus-0.10.1/tests/io/test_zarr_writers.py +85 -13
- pymmcore_plus-0.9.5/src/pymmcore_plus/mda/handlers/__init__.py +0 -5
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/.gitignore +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/LICENSE +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/__init__.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/_build.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/_cli.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/_logger.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/_util.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/__init__.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/_adapter.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/_config.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/_config_group.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/_constants.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/_device.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/_metadata.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/_mmcore_plus.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/_property.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/_sequencing.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/_state.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/events/__init__.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/events/_device_signal_view.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/events/_norm_slot.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/events/_prop_event_mixin.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/events/_protocol.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/events/_psygnal.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/events/_qsignals.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/install.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/__init__.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/_protocol.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/_thread_relay.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/events/__init__.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/events/_protocol.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/events/_psygnal.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/events/_qsignals.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/handlers/_5d_writer_base.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/handlers/_img_sequence_writer.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/handlers/_ome_tiff_writer.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/handlers/_util.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/model/__init__.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/model/_config_file.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/model/_config_group.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/model/_core_device.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/model/_core_link.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/model/_device.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/model/_microscope.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/model/_pixel_size_config.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/model/_property.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/py.typed +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/seq_tester.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/__init__.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/conftest.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/io/test_image_sequence_writer.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/io/test_ome_tiff.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/local_config.cfg +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_adapter_class.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_bench.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_cli.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_config_group_class.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_core.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_device_class.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_events.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_mda.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_misc.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_model.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_pixel_config_class.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_property_class.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_sequencing.py +0 -0
- {pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/tests/test_thread_relay.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: pymmcore-plus
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.10.1
|
|
4
4
|
Summary: pymmcore superset providing improved APIs, event handling, and a pure python acquisition engine
|
|
5
5
|
Project-URL: Source, https://github.com/pymmcore-plus/pymmcore-plus
|
|
6
6
|
Project-URL: Tracker, https://github.com/pymmcore-plus/pymmcore-plus/issues
|
|
@@ -30,6 +30,7 @@ Requires-Dist: platformdirs>=3.0.0
|
|
|
30
30
|
Requires-Dist: psygnal>=0.7
|
|
31
31
|
Requires-Dist: pymmcore>=10.7.0.71.0
|
|
32
32
|
Requires-Dist: rich>=10.2.0
|
|
33
|
+
Requires-Dist: tensorstore
|
|
33
34
|
Requires-Dist: typer>=0.4.2
|
|
34
35
|
Requires-Dist: typing-extensions
|
|
35
36
|
Requires-Dist: useq-schema>=0.4.7
|
|
@@ -43,6 +44,7 @@ Requires-Dist: mypy; extra == 'dev'
|
|
|
43
44
|
Requires-Dist: pdbpp; extra == 'dev'
|
|
44
45
|
Requires-Dist: pre-commit; extra == 'dev'
|
|
45
46
|
Requires-Dist: ruff; extra == 'dev'
|
|
47
|
+
Requires-Dist: tensorstore-stubs; extra == 'dev'
|
|
46
48
|
Provides-Extra: docs
|
|
47
49
|
Requires-Dist: mkdocs-material; extra == 'docs'
|
|
48
50
|
Requires-Dist: mkdocs>=1.4; extra == 'docs'
|
|
@@ -70,7 +72,7 @@ Description-Content-Type: text/markdown
|
|
|
70
72
|
[](https://pypi.org/project/pymmcore-plus)
|
|
71
73
|
[](https://pypi.org/project/pymmcore-plus)
|
|
72
74
|
[](https://anaconda.org/conda-forge/pymmcore-plus)
|
|
73
|
-
[](https://github.com/pymmcore-plus/pymmcore-plus/actions/workflows/ci.yml)
|
|
74
76
|
[](https://pymmcore-plus.github.io/pymmcore-plus/)
|
|
75
77
|
[](https://codecov.io/gh/pymmcore-plus/pymmcore-plus)
|
|
76
78
|
[](https://codspeed.io/pymmcore-plus/pymmcore-plus)
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
[](https://pypi.org/project/pymmcore-plus)
|
|
5
5
|
[](https://pypi.org/project/pymmcore-plus)
|
|
6
6
|
[](https://anaconda.org/conda-forge/pymmcore-plus)
|
|
7
|
-
[](https://github.com/pymmcore-plus/pymmcore-plus/actions/workflows/ci.yml)
|
|
8
8
|
[](https://pymmcore-plus.github.io/pymmcore-plus/)
|
|
9
9
|
[](https://codecov.io/gh/pymmcore-plus/pymmcore-plus)
|
|
10
10
|
[](https://codspeed.io/pymmcore-plus/pymmcore-plus)
|
|
@@ -42,6 +42,7 @@ dependencies = [
|
|
|
42
42
|
"typing-extensions", # not actually required at runtime
|
|
43
43
|
"useq-schema >=0.4.7",
|
|
44
44
|
"wrapt >=1.14",
|
|
45
|
+
"tensorstore",
|
|
45
46
|
# cli requirements included by default for now
|
|
46
47
|
"typer >=0.4.2",
|
|
47
48
|
"rich >=10.2.0",
|
|
@@ -64,7 +65,7 @@ test = [
|
|
|
64
65
|
"zarr >=2.2",
|
|
65
66
|
"xarray",
|
|
66
67
|
]
|
|
67
|
-
dev = ["ipython", "mypy", "pdbpp", "pre-commit", "ruff"]
|
|
68
|
+
dev = ["ipython", "mypy", "pdbpp", "pre-commit", "ruff", "tensorstore-stubs"]
|
|
68
69
|
docs = [
|
|
69
70
|
"mkdocs >=1.4",
|
|
70
71
|
"mkdocs-material",
|
|
@@ -181,3 +182,6 @@ ignore = [
|
|
|
181
182
|
"tests/**/*",
|
|
182
183
|
"tox.ini",
|
|
183
184
|
]
|
|
185
|
+
|
|
186
|
+
[tool.typos.default]
|
|
187
|
+
extend-ignore-identifiers-re = ["(?i)nd2?.*", "(?i)ome"]
|
|
@@ -176,7 +176,7 @@ class MDAEngine(PMDAEngine):
|
|
|
176
176
|
# skip if no autofocus device is found
|
|
177
177
|
if not self._mmc.getAutoFocusDevice():
|
|
178
178
|
logger.warning("No autofocus device found. Cannot execute autofocus.")
|
|
179
|
-
return
|
|
179
|
+
return
|
|
180
180
|
|
|
181
181
|
try:
|
|
182
182
|
# execute hardware autofocus
|
|
@@ -191,7 +191,7 @@ class MDAEngine(PMDAEngine):
|
|
|
191
191
|
self._z_correction[p_idx] = new_correction + self._z_correction.get(
|
|
192
192
|
p_idx, 0.0
|
|
193
193
|
)
|
|
194
|
-
return
|
|
194
|
+
return
|
|
195
195
|
|
|
196
196
|
# if the autofocus was engaged at the start of the sequence AND autofocus action
|
|
197
197
|
# did not fail, re-engage it. NOTE: we need to do that AFTER the runner calls
|
|
@@ -283,7 +283,7 @@ class MDAEngine(PMDAEngine):
|
|
|
283
283
|
self._mmc.snapImage()
|
|
284
284
|
except Exception as e:
|
|
285
285
|
logger.warning("Failed to snap image. %s", e)
|
|
286
|
-
return
|
|
286
|
+
return
|
|
287
287
|
if not event.keep_shutter_open:
|
|
288
288
|
self._mmc.setShutterOpen(False)
|
|
289
289
|
yield ImagePayload(self._mmc.getImage(), event, self.get_frame_metadata())
|
|
@@ -326,8 +326,20 @@ class MDAEngine(PMDAEngine):
|
|
|
326
326
|
"""Teardown state of system (hardware, etc.) after `event`."""
|
|
327
327
|
# autoshutter was set at the beginning of the sequence, and this event
|
|
328
328
|
# doesn't want to leave the shutter open. Re-enable autoshutter.
|
|
329
|
+
core = self._mmc
|
|
329
330
|
if not event.keep_shutter_open and self._autoshutter_was_set:
|
|
330
|
-
|
|
331
|
+
core.setAutoShutter(True)
|
|
332
|
+
# FIXME: this may not be hitting as intended...
|
|
333
|
+
# https://github.com/pymmcore-plus/pymmcore-plus/pull/353#issuecomment-2159176491
|
|
334
|
+
if isinstance(event, SequencedEvent):
|
|
335
|
+
if event.exposure_sequence:
|
|
336
|
+
core.stopExposureSequence(self._mmc.getCameraDevice())
|
|
337
|
+
if event.x_sequence:
|
|
338
|
+
core.stopXYStageSequence(core.getXYStageDevice())
|
|
339
|
+
if event.z_sequence:
|
|
340
|
+
core.stopStageSequence(core.getFocusDevice())
|
|
341
|
+
for dev, prop in event.property_sequences(core):
|
|
342
|
+
core.stopPropertySequence(dev, prop)
|
|
331
343
|
|
|
332
344
|
def teardown_sequence(self, sequence: MDASequence) -> None:
|
|
333
345
|
"""Perform any teardown required after the sequence has been executed."""
|
|
@@ -346,22 +358,23 @@ class MDAEngine(PMDAEngine):
|
|
|
346
358
|
cam_device = self._mmc.getCameraDevice()
|
|
347
359
|
|
|
348
360
|
if event.exposure_sequence:
|
|
361
|
+
with suppress(RuntimeError):
|
|
362
|
+
core.stopExposureSequence(cam_device)
|
|
349
363
|
core.loadExposureSequence(cam_device, event.exposure_sequence)
|
|
350
364
|
if event.x_sequence: # y_sequence is implied and will be the same length
|
|
351
365
|
stage = core.getXYStageDevice()
|
|
366
|
+
with suppress(RuntimeError):
|
|
367
|
+
core.stopXYStageSequence(stage)
|
|
352
368
|
core.loadXYStageSequence(stage, event.x_sequence, event.y_sequence)
|
|
353
369
|
if event.z_sequence:
|
|
354
|
-
# these notes are from Nico Stuurman in AcqEngJ
|
|
355
|
-
# https://github.com/micro-manager/AcqEngJ/pull/108
|
|
356
|
-
# at least some zStages freak out (in this case, NIDAQ board) when you
|
|
357
|
-
# try to load a sequence while the sequence is still running. Nothing in
|
|
358
|
-
# the engine stops a stage sequence if all goes well.
|
|
359
|
-
# Stopping a sequence if it is not running hopefully will not harm anyone.
|
|
360
370
|
zstage = core.getFocusDevice()
|
|
361
|
-
|
|
371
|
+
with suppress(RuntimeError):
|
|
372
|
+
core.stopStageSequence(zstage)
|
|
362
373
|
core.loadStageSequence(zstage, event.z_sequence)
|
|
363
374
|
if prop_seqs := event.property_sequences(core):
|
|
364
375
|
for (dev, prop), value_sequence in prop_seqs.items():
|
|
376
|
+
with suppress(RuntimeError):
|
|
377
|
+
core.stopPropertySequence(dev, prop)
|
|
365
378
|
core.loadPropertySequence(dev, prop, value_sequence)
|
|
366
379
|
|
|
367
380
|
# TODO: SLM
|
|
@@ -89,7 +89,7 @@ class MDARunner:
|
|
|
89
89
|
if self.is_running(): # pragma: no cover
|
|
90
90
|
raise RuntimeError(
|
|
91
91
|
"Cannot register a new engine when the current engine is running "
|
|
92
|
-
"an
|
|
92
|
+
"an acquisition. Please cancel the current engine's acquisition "
|
|
93
93
|
"before registering"
|
|
94
94
|
)
|
|
95
95
|
|
|
@@ -115,7 +115,7 @@ class MDARunner:
|
|
|
115
115
|
return self._signals
|
|
116
116
|
|
|
117
117
|
def is_running(self) -> bool:
|
|
118
|
-
"""Return True if an
|
|
118
|
+
"""Return True if an acquisition is currently underway.
|
|
119
119
|
|
|
120
120
|
This will return True at any point between the emission of the
|
|
121
121
|
[`sequenceStarted`][pymmcore_plus.mda.PMDASignaler.sequenceStarted] and
|
|
@@ -125,19 +125,19 @@ class MDARunner:
|
|
|
125
125
|
Returns
|
|
126
126
|
-------
|
|
127
127
|
bool
|
|
128
|
-
Whether an
|
|
128
|
+
Whether an acquisition is underway.
|
|
129
129
|
"""
|
|
130
130
|
return self._running
|
|
131
131
|
|
|
132
132
|
def is_paused(self) -> bool:
|
|
133
|
-
"""Return True if the
|
|
133
|
+
"""Return True if the acquisition is currently paused.
|
|
134
134
|
|
|
135
135
|
Use `toggle_pause` to change the paused state.
|
|
136
136
|
|
|
137
137
|
Returns
|
|
138
138
|
-------
|
|
139
139
|
bool
|
|
140
|
-
Whether the current
|
|
140
|
+
Whether the current acquisition is paused.
|
|
141
141
|
"""
|
|
142
142
|
return self._paused
|
|
143
143
|
|
|
@@ -145,7 +145,7 @@ class MDARunner:
|
|
|
145
145
|
"""Cancel the currently running acquisition.
|
|
146
146
|
|
|
147
147
|
This is a no-op if no acquisition is currently running.
|
|
148
|
-
If an acquisition is running then this will cancel the
|
|
148
|
+
If an acquisition is running then this will cancel the acquisition and
|
|
149
149
|
a sequenceCanceled signal, followed by a sequenceFinished signal will
|
|
150
150
|
be emitted.
|
|
151
151
|
"""
|
|
@@ -157,7 +157,7 @@ class MDARunner:
|
|
|
157
157
|
|
|
158
158
|
To get whether the acquisition is currently paused use the
|
|
159
159
|
[`is_paused`][pymmcore_plus.mda.MDARunner.is_paused] method. This method is a
|
|
160
|
-
no-op if no
|
|
160
|
+
no-op if no acquisition is currently underway.
|
|
161
161
|
"""
|
|
162
162
|
if self.is_running():
|
|
163
163
|
self._paused = not self._paused
|
|
@@ -169,7 +169,7 @@ class MDARunner:
|
|
|
169
169
|
*,
|
|
170
170
|
output: SingleOutput | Sequence[SingleOutput] | None = None,
|
|
171
171
|
) -> None:
|
|
172
|
-
"""Run the multi-dimensional
|
|
172
|
+
"""Run the multi-dimensional acquisition defined by `sequence`.
|
|
173
173
|
|
|
174
174
|
Most users should not use this directly as it will block further
|
|
175
175
|
execution. Instead, use the
|
|
@@ -278,17 +278,18 @@ class MDARunner:
|
|
|
278
278
|
logger.info("%s", event)
|
|
279
279
|
engine.setup_event(event)
|
|
280
280
|
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
281
|
+
try:
|
|
282
|
+
output = engine.exec_event(event) or () # in case output is None
|
|
283
|
+
|
|
284
|
+
for payload in output:
|
|
285
|
+
img, event, meta = payload
|
|
286
|
+
if "PerfCounter" in meta:
|
|
287
|
+
meta["ElapsedTime-ms"] = (meta["PerfCounter"] - self._t0) * 1000
|
|
288
|
+
meta["Event"] = event
|
|
289
|
+
with exceptions_logged():
|
|
290
|
+
self._signals.frameReady.emit(img, event, meta)
|
|
291
|
+
finally:
|
|
292
|
+
teardown_event(event)
|
|
292
293
|
|
|
293
294
|
def _prepare_to_run(self, sequence: MDASequence) -> PMDAEngine:
|
|
294
295
|
"""Set up for the MDA run.
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from ._img_sequence_writer import ImageSequenceWriter
|
|
2
|
+
from ._ome_tiff_writer import OMETiffWriter
|
|
3
|
+
from ._ome_zarr_writer import OMEZarrWriter
|
|
4
|
+
from ._tensorstore_handler import TensorStoreHandler
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
"ImageSequenceWriter",
|
|
8
|
+
"OMEZarrWriter",
|
|
9
|
+
"OMETiffWriter",
|
|
10
|
+
"TensorStoreHandler",
|
|
11
|
+
]
|
|
@@ -0,0 +1,369 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import atexit
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
import shutil
|
|
7
|
+
import tempfile
|
|
8
|
+
import warnings
|
|
9
|
+
from itertools import product
|
|
10
|
+
from os import PathLike
|
|
11
|
+
from typing import TYPE_CHECKING, Any, cast
|
|
12
|
+
|
|
13
|
+
from ._util import position_sizes
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from typing import Literal, Mapping, Self, Sequence, TypeAlias
|
|
17
|
+
|
|
18
|
+
import numpy as np
|
|
19
|
+
import tensorstore as ts
|
|
20
|
+
import useq
|
|
21
|
+
|
|
22
|
+
TsDriver: TypeAlias = Literal["zarr", "zarr3", "n5", "neuroglancer_precomputed"]
|
|
23
|
+
EventKey: TypeAlias = frozenset[tuple[str, int]]
|
|
24
|
+
|
|
25
|
+
# special dimension label used when _nd_storage is False
|
|
26
|
+
FRAME_DIM = "frame"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class TensorStoreHandler:
|
|
30
|
+
"""Tensorstore handler for writing MDA sequences.
|
|
31
|
+
|
|
32
|
+
This is a performant and shape-agnostic handler for writing MDA sequences to
|
|
33
|
+
chunked storages like zarr, n5, backed by tensorstore:
|
|
34
|
+
<https://google.github.io/tensorstore/>
|
|
35
|
+
|
|
36
|
+
By default, the handler will store frames in a zarr array, with a shape of
|
|
37
|
+
(nframes, *frame_shape) and a chunk size of (1, *frame_shape), i.e. each frame
|
|
38
|
+
is stored in a separate chunk. To customize shape or chunking, override the
|
|
39
|
+
`get_full_shape`, `get_chunk_layout`, and `get_index_domain` methods (these
|
|
40
|
+
may change in the future as we learn to use tensorstore better).
|
|
41
|
+
|
|
42
|
+
Parameters
|
|
43
|
+
----------
|
|
44
|
+
driver : TsDriver, optional
|
|
45
|
+
The driver to use for the tensorstore, by default "zarr". Must be one of
|
|
46
|
+
"zarr", "zarr3", "n5", or "neuroglancer_precomputed".
|
|
47
|
+
kvstore : str | dict | None, optional
|
|
48
|
+
The key-value store to use for the tensorstore, by default "memory://".
|
|
49
|
+
A dict might look like {'driver': 'file', 'path': '/path/to/dataset.zarr'}
|
|
50
|
+
see <https://google.github.io/tensorstore/kvstore/index.html#json-KvStore>
|
|
51
|
+
for all options. If path is provided, the kvstore will be set to file://path
|
|
52
|
+
path : str | Path | None, optional
|
|
53
|
+
Convenience for specifying a local filepath. If provided, overrides the
|
|
54
|
+
kvstore option, to be `file://file_path`.
|
|
55
|
+
delete_existing : bool, optional
|
|
56
|
+
Whether to delete the existing dataset if it exists, by default False.
|
|
57
|
+
spec : Mapping, optional
|
|
58
|
+
A spec to use when opening the tensorstore, by default None. Values provided
|
|
59
|
+
in this object will override the default values provided by the handler.
|
|
60
|
+
This is a complex object that can completely define the tensorstore, see
|
|
61
|
+
<https://google.github.io/tensorstore/spec.html> for more information.
|
|
62
|
+
|
|
63
|
+
Examples
|
|
64
|
+
--------
|
|
65
|
+
```python
|
|
66
|
+
from pymmcore_plus import CMMCorePlus
|
|
67
|
+
from pymmcore_plus.mda.handlers import TensorStoreHandler
|
|
68
|
+
from useq import MDASequence
|
|
69
|
+
|
|
70
|
+
core = CMMCorePlus.instance()
|
|
71
|
+
core.loadSystemConfiguration()
|
|
72
|
+
|
|
73
|
+
sequence = MDASequence(
|
|
74
|
+
channels=["DAPI", {"config": "FITC", "exposure": 1}],
|
|
75
|
+
stage_positions=[{"x": 1, "y": 1, "name": "some position"}, {"x": 0, "y": 0}],
|
|
76
|
+
time_plan={"interval": 2, "loops": 3},
|
|
77
|
+
z_plan={"range": 4, "step": 0.5},
|
|
78
|
+
axis_order="tpcz",
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
writer = TensorStoreHandler(path="example_ts.zarr", delete_existing=True)
|
|
82
|
+
core.mda.run(sequence, output=writer)
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
def __init__(
|
|
88
|
+
self,
|
|
89
|
+
*,
|
|
90
|
+
driver: TsDriver = "zarr",
|
|
91
|
+
kvstore: str | dict | None = "memory://",
|
|
92
|
+
path: str | PathLike | None = None,
|
|
93
|
+
delete_existing: bool = False,
|
|
94
|
+
spec: Mapping | None = None,
|
|
95
|
+
) -> None:
|
|
96
|
+
try:
|
|
97
|
+
import tensorstore
|
|
98
|
+
except ImportError as e:
|
|
99
|
+
raise ImportError("Tensorstore is required to use this handler.") from e
|
|
100
|
+
|
|
101
|
+
self._ts = tensorstore
|
|
102
|
+
|
|
103
|
+
self.ts_driver = driver
|
|
104
|
+
self.kvstore = f"file://{path}" if path is not None else kvstore
|
|
105
|
+
self.delete_existing = delete_existing
|
|
106
|
+
self.spec = spec
|
|
107
|
+
|
|
108
|
+
# storage of individual frame metadata
|
|
109
|
+
# maps position key to list of frame metadata
|
|
110
|
+
self.frame_metadatas: list[tuple[useq.MDAEvent, dict]] = []
|
|
111
|
+
|
|
112
|
+
self._size_increment = 300
|
|
113
|
+
|
|
114
|
+
self._store: ts.TensorStore | None = None
|
|
115
|
+
self._futures: list[ts.Future] = []
|
|
116
|
+
self._frame_indices: dict[EventKey, int | ts.DimExpression] = {}
|
|
117
|
+
|
|
118
|
+
# "_nd_storage" means we're greedily attempting to store the data in a
|
|
119
|
+
# multi-dimensional format based on the axes of the sequence.
|
|
120
|
+
# for non-deterministic experiments, this often won't work...
|
|
121
|
+
# _nd_storage False means we simply store data as a 3D array of shape
|
|
122
|
+
# (nframes, y, x). `_nd_storage` is set when a new_store is created.
|
|
123
|
+
self._nd_storage: bool = True
|
|
124
|
+
self._frame_index: int = 0
|
|
125
|
+
|
|
126
|
+
# the highest index seen for each axis
|
|
127
|
+
self._axis_max: dict[str, int] = {}
|
|
128
|
+
|
|
129
|
+
@property
|
|
130
|
+
def store(self) -> ts.TensorStore | None:
|
|
131
|
+
"""The current tensorstore."""
|
|
132
|
+
return self._store
|
|
133
|
+
|
|
134
|
+
@classmethod
|
|
135
|
+
def in_tmpdir(
|
|
136
|
+
cls,
|
|
137
|
+
suffix: str | None = "",
|
|
138
|
+
prefix: str | None = "pymmcore_zarr_",
|
|
139
|
+
dir: str | PathLike[str] | None = None,
|
|
140
|
+
cleanup_atexit: bool = True,
|
|
141
|
+
**kwargs: Any,
|
|
142
|
+
) -> Self:
|
|
143
|
+
"""Create TensorStoreHandler that writes to a temporary directory.
|
|
144
|
+
|
|
145
|
+
Parameters
|
|
146
|
+
----------
|
|
147
|
+
suffix : str, optional
|
|
148
|
+
If suffix is specified, the file name will end with that suffix, otherwise
|
|
149
|
+
there will be no suffix.
|
|
150
|
+
prefix : str, optional
|
|
151
|
+
If prefix is specified, the file name will begin with that prefix, otherwise
|
|
152
|
+
a default prefix is used.
|
|
153
|
+
dir : str or PathLike, optional
|
|
154
|
+
If dir is specified, the file will be created in that directory, otherwise
|
|
155
|
+
a default directory is used (tempfile.gettempdir())
|
|
156
|
+
cleanup_atexit : bool, optional
|
|
157
|
+
Whether to automatically cleanup the temporary directory when the python
|
|
158
|
+
process exits. Default is True.
|
|
159
|
+
**kwargs
|
|
160
|
+
Remaining kwargs are passed to `TensorStoreHandler.__init__`
|
|
161
|
+
"""
|
|
162
|
+
# same as zarr.storage.TempStore, but with option not to cleanup
|
|
163
|
+
path = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
|
|
164
|
+
if cleanup_atexit:
|
|
165
|
+
|
|
166
|
+
@atexit.register
|
|
167
|
+
def _atexit_rmtree(_path: str = path) -> None: # pragma: no cover
|
|
168
|
+
if os.path.isdir(_path):
|
|
169
|
+
shutil.rmtree(_path, ignore_errors=True)
|
|
170
|
+
|
|
171
|
+
return cls(path=path, **kwargs)
|
|
172
|
+
|
|
173
|
+
def sequenceStarted(self, seq: useq.MDASequence) -> None:
|
|
174
|
+
"""On sequence started, simply store the sequence."""
|
|
175
|
+
self._frame_index = 0
|
|
176
|
+
self._store = None
|
|
177
|
+
self._futures.clear()
|
|
178
|
+
self.frame_metadatas.clear()
|
|
179
|
+
self.current_sequence = seq
|
|
180
|
+
|
|
181
|
+
def sequenceFinished(self, seq: useq.MDASequence) -> None:
|
|
182
|
+
"""On sequence finished, clear the current sequence."""
|
|
183
|
+
if self._store is None:
|
|
184
|
+
return # pragma: no cover
|
|
185
|
+
|
|
186
|
+
while self._futures:
|
|
187
|
+
self._futures.pop().result()
|
|
188
|
+
if not self._nd_storage:
|
|
189
|
+
self._store = self._store.resize(
|
|
190
|
+
exclusive_max=(self._frame_index, *self._store.shape[-2:])
|
|
191
|
+
).result()
|
|
192
|
+
if self.frame_metadatas:
|
|
193
|
+
self.finalize_metadata()
|
|
194
|
+
|
|
195
|
+
def frameReady(self, frame: np.ndarray, event: useq.MDAEvent, meta: dict) -> None:
|
|
196
|
+
"""Write frame to the zarr array for the appropriate position."""
|
|
197
|
+
if self._store is None:
|
|
198
|
+
self._store = self.new_store(frame, event.sequence, meta).result()
|
|
199
|
+
|
|
200
|
+
ts_index: ts.DimExpression | int
|
|
201
|
+
if self._nd_storage:
|
|
202
|
+
ts_index = self._event_index_to_store_index(event.index)
|
|
203
|
+
else:
|
|
204
|
+
if self._frame_index >= self._store.shape[0]:
|
|
205
|
+
self._store = self._expand_store(self._store).result()
|
|
206
|
+
ts_index = self._frame_index
|
|
207
|
+
# store reverse lookup of event.index -> frame_index
|
|
208
|
+
self._frame_indices[frozenset(event.index.items())] = ts_index
|
|
209
|
+
|
|
210
|
+
# write the new frame asynchronously
|
|
211
|
+
self._futures.append(self._store[ts_index].write(frame))
|
|
212
|
+
|
|
213
|
+
# store, but do not process yet, the frame metadata
|
|
214
|
+
self.frame_metadatas.append((event, meta))
|
|
215
|
+
# update the frame counter
|
|
216
|
+
self._frame_index += 1
|
|
217
|
+
# remember the highest index seen for each axis
|
|
218
|
+
for k, v in event.index.items():
|
|
219
|
+
self._axis_max[k] = max(self._axis_max.get(k, 0), v)
|
|
220
|
+
|
|
221
|
+
def isel(
|
|
222
|
+
self,
|
|
223
|
+
indexers: Mapping[str, int | slice] | None = None,
|
|
224
|
+
**indexers_kwargs: int | slice,
|
|
225
|
+
) -> np.ndarray:
|
|
226
|
+
"""Select data from the array."""
|
|
227
|
+
# FIXME: will fail on slices
|
|
228
|
+
indexers = {**(indexers or {}), **indexers_kwargs}
|
|
229
|
+
ts_index = self._event_index_to_store_index(indexers)
|
|
230
|
+
return self._store[ts_index].read().result().squeeze() # type: ignore
|
|
231
|
+
|
|
232
|
+
def new_store(
|
|
233
|
+
self, frame: np.ndarray, seq: useq.MDASequence | None, meta: dict
|
|
234
|
+
) -> ts.Future[ts.TensorStore]:
|
|
235
|
+
shape, chunks, labels = self.get_shape_chunks_labels(frame.shape, seq)
|
|
236
|
+
self._nd_storage = FRAME_DIM not in labels
|
|
237
|
+
return self._ts.open(
|
|
238
|
+
self.get_spec(),
|
|
239
|
+
create=True,
|
|
240
|
+
delete_existing=self.delete_existing,
|
|
241
|
+
dtype=self._ts.dtype(frame.dtype),
|
|
242
|
+
shape=shape,
|
|
243
|
+
chunk_layout=self._ts.ChunkLayout(chunk_shape=chunks),
|
|
244
|
+
domain=self._ts.IndexDomain(labels=labels),
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
def get_shape_chunks_labels(
|
|
248
|
+
self, frame_shape: tuple[int, ...], seq: useq.MDASequence | None
|
|
249
|
+
) -> tuple[tuple[int, ...], tuple[int, ...], tuple[str, ...]]:
|
|
250
|
+
labels: tuple[str, ...]
|
|
251
|
+
if seq is not None and seq.sizes:
|
|
252
|
+
# expand the sizes to include the largest size we encounter for each axis
|
|
253
|
+
# in the case of positions with subsequences, we'll still end up with a
|
|
254
|
+
# jagged array, but it won't take extra space, and we won't get index errors
|
|
255
|
+
max_sizes = seq.sizes.copy()
|
|
256
|
+
for psize in position_sizes(seq):
|
|
257
|
+
for k, v in psize.items():
|
|
258
|
+
max_sizes[k] = max(max_sizes.get(k, 0), v)
|
|
259
|
+
|
|
260
|
+
# remove axes with length 0
|
|
261
|
+
labels, sizes = zip(*(x for x in max_sizes.items() if x[1]))
|
|
262
|
+
full_shape: tuple[int, ...] = (*sizes, *frame_shape)
|
|
263
|
+
else:
|
|
264
|
+
labels = (FRAME_DIM,)
|
|
265
|
+
full_shape = (self._size_increment, *frame_shape)
|
|
266
|
+
|
|
267
|
+
chunks = [1] * len(full_shape)
|
|
268
|
+
chunks[-len(frame_shape) :] = frame_shape
|
|
269
|
+
labels = (*labels, "y", "x")
|
|
270
|
+
return full_shape, tuple(chunks), labels
|
|
271
|
+
|
|
272
|
+
def get_spec(self) -> dict:
|
|
273
|
+
"""Construct the tensorstore spec."""
|
|
274
|
+
spec = {"driver": self.ts_driver, "kvstore": self.kvstore}
|
|
275
|
+
if self.spec:
|
|
276
|
+
_merge_nested_dicts(spec, self.spec)
|
|
277
|
+
|
|
278
|
+
# HACK
|
|
279
|
+
if self.ts_driver == "zarr":
|
|
280
|
+
meta = cast(dict, spec.setdefault("metadata", {}))
|
|
281
|
+
if "dimension_separator" not in meta:
|
|
282
|
+
meta["dimension_separator"] = "/"
|
|
283
|
+
return spec
|
|
284
|
+
|
|
285
|
+
def finalize_metadata(self) -> None:
|
|
286
|
+
"""Finalize and flush metadata to storage."""
|
|
287
|
+
if not (store := self._store) or not store.kvstore:
|
|
288
|
+
return # pragma: no cover
|
|
289
|
+
|
|
290
|
+
data = []
|
|
291
|
+
for event, meta in self.frame_metadatas:
|
|
292
|
+
# FIXME: unnecessary ser/des
|
|
293
|
+
js = event.model_dump_json(exclude={"sequence"}, exclude_defaults=True)
|
|
294
|
+
meta["Event"] = json.loads(js)
|
|
295
|
+
data.append(meta)
|
|
296
|
+
|
|
297
|
+
metadata = {"frame_metadatas": data}
|
|
298
|
+
if not self._nd_storage:
|
|
299
|
+
metadata["frame_indices"] = [
|
|
300
|
+
(tuple(dict(k).items()), v) # type: ignore
|
|
301
|
+
for k, v in self._frame_indices.items()
|
|
302
|
+
]
|
|
303
|
+
|
|
304
|
+
if self.ts_driver.startswith("zarr"):
|
|
305
|
+
store.kvstore.write(".zattrs", json.dumps(metadata))
|
|
306
|
+
elif self.ts_driver == "n5": # pragma: no cover
|
|
307
|
+
attrs = json.loads(store.kvstore.read("attributes.json").result().value)
|
|
308
|
+
attrs.update(metadata)
|
|
309
|
+
store.kvstore.write("attributes.json", json.dumps(attrs))
|
|
310
|
+
|
|
311
|
+
def _expand_store(self, store: ts.TensorStore) -> ts.Future[ts.TensorStore]:
|
|
312
|
+
"""Grow the store by `self._size_increment` frames.
|
|
313
|
+
|
|
314
|
+
This is used when _nd_storage mode is False and we've run out of space.
|
|
315
|
+
"""
|
|
316
|
+
new_shape = [self._frame_index + self._size_increment, *store.shape[-2:]]
|
|
317
|
+
return store.resize(exclusive_max=new_shape, expand_only=True)
|
|
318
|
+
|
|
319
|
+
def _event_index_to_store_index(
|
|
320
|
+
self, index: Mapping[str, int | slice]
|
|
321
|
+
) -> ts.DimExpression:
|
|
322
|
+
"""Convert event index to store index.
|
|
323
|
+
|
|
324
|
+
The return value is safe to use as an index to self._store[...]
|
|
325
|
+
"""
|
|
326
|
+
if self._nd_storage:
|
|
327
|
+
return self._ts.d[index][tuple(index.values())]
|
|
328
|
+
|
|
329
|
+
if any(isinstance(v, slice) for v in index.values()):
|
|
330
|
+
idx: list | int | ts.DimExpression = self._get_frame_indices(index)
|
|
331
|
+
else:
|
|
332
|
+
try:
|
|
333
|
+
idx = self._frame_indices[frozenset(index.items())] # type: ignore
|
|
334
|
+
except KeyError as e:
|
|
335
|
+
raise KeyError(f"Index {index} not found in frame_indices.") from e
|
|
336
|
+
return self._ts.d[FRAME_DIM][idx]
|
|
337
|
+
|
|
338
|
+
def _get_frame_indices(self, indexers: Mapping[str, int | slice]) -> list[int]:
|
|
339
|
+
"""Convert indexers (with slices) to a list of frame indices."""
|
|
340
|
+
# converting slice objects to actual indices
|
|
341
|
+
axis_indices: dict[str, Sequence[int]] = {}
|
|
342
|
+
for k, v in indexers.items():
|
|
343
|
+
if isinstance(v, slice):
|
|
344
|
+
axis_indices[k] = tuple(range(*v.indices(self._axis_max.get(k, 0) + 1)))
|
|
345
|
+
else:
|
|
346
|
+
axis_indices[k] = (v,)
|
|
347
|
+
|
|
348
|
+
indices: list[int] = []
|
|
349
|
+
for p in product(*axis_indices.values()):
|
|
350
|
+
key = frozenset(dict(zip(axis_indices.keys(), p)).items())
|
|
351
|
+
try:
|
|
352
|
+
indices.append(self._frame_indices[key])
|
|
353
|
+
except KeyError: # pragma: no cover
|
|
354
|
+
warnings.warn(
|
|
355
|
+
f"Index {dict(key)} not found in frame_indices.", stacklevel=2
|
|
356
|
+
)
|
|
357
|
+
return indices
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
def _merge_nested_dicts(dict1: dict, dict2: Mapping) -> None:
|
|
361
|
+
"""Merge two nested dictionaries.
|
|
362
|
+
|
|
363
|
+
Values in dict2 will override values in dict1.
|
|
364
|
+
"""
|
|
365
|
+
for key, value in dict2.items():
|
|
366
|
+
if key in dict1 and isinstance(dict1[key], dict) and isinstance(value, dict):
|
|
367
|
+
_merge_nested_dicts(dict1[key], value)
|
|
368
|
+
else:
|
|
369
|
+
dict1[key] = value
|
pymmcore_plus-0.9.5/tests/io/test_ome_zarr.py → pymmcore_plus-0.10.1/tests/io/test_zarr_writers.py
RENAMED
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
from queue import Queue
|
|
3
4
|
from typing import TYPE_CHECKING
|
|
4
5
|
|
|
5
6
|
import numpy as np
|
|
6
7
|
import pytest
|
|
7
8
|
import useq
|
|
8
|
-
from pymmcore_plus.mda.handlers import OMEZarrWriter
|
|
9
|
+
from pymmcore_plus.mda.handlers import OMEZarrWriter, TensorStoreHandler
|
|
9
10
|
|
|
10
11
|
if TYPE_CHECKING:
|
|
11
12
|
from pathlib import Path
|
|
@@ -65,18 +66,18 @@ COMPLEX_EXPECTATION = {
|
|
|
65
66
|
}
|
|
66
67
|
|
|
67
68
|
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
)
|
|
69
|
+
CASES: list[str | None, useq.MDASequence, dict[str, dict]] = [
|
|
70
|
+
(None, SIMPLE_MDA, SIMPLE_EXPECTATION),
|
|
71
|
+
(None, MULTIPOINT_MDA, MULTIPOINT_EXPECTATION),
|
|
72
|
+
(None, FULL_MDA, FULL_EXPECTATION),
|
|
73
|
+
("out.zarr", FULL_MDA, FULL_EXPECTATION),
|
|
74
|
+
(None, FULL_MDA, FULL_EXPECTATION),
|
|
75
|
+
("tmp", FULL_MDA, FULL_EXPECTATION),
|
|
76
|
+
(None, COMPLEX_MDA, COMPLEX_EXPECTATION),
|
|
77
|
+
]
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
@pytest.mark.parametrize("store, mda, expected_shapes", CASES)
|
|
80
81
|
def test_ome_zarr_writer(
|
|
81
82
|
store: str | None,
|
|
82
83
|
mda: useq.MDASequence,
|
|
@@ -128,3 +129,74 @@ def test_ome_zarr_writer(
|
|
|
128
129
|
|
|
129
130
|
# smoke test the isel method
|
|
130
131
|
assert isinstance(writer.isel(p=0, t=0, x=slice(0, 100)), np.ndarray)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
@pytest.mark.parametrize("store, mda, expected_shapes", CASES)
|
|
135
|
+
def test_tensorstore_writer(
|
|
136
|
+
store: str | None,
|
|
137
|
+
mda: useq.MDASequence,
|
|
138
|
+
expected_shapes: dict[str, dict],
|
|
139
|
+
tmp_path: Path,
|
|
140
|
+
core: CMMCorePlus,
|
|
141
|
+
) -> None:
|
|
142
|
+
if store == "tmp":
|
|
143
|
+
writer = TensorStoreHandler.in_tmpdir()
|
|
144
|
+
elif store is None:
|
|
145
|
+
writer = TensorStoreHandler()
|
|
146
|
+
else:
|
|
147
|
+
writer = TensorStoreHandler(path=tmp_path / store)
|
|
148
|
+
|
|
149
|
+
core.mda.run(mda, output=writer)
|
|
150
|
+
|
|
151
|
+
assert writer.store is not None
|
|
152
|
+
|
|
153
|
+
expected_sizes = {}
|
|
154
|
+
for sizes in expected_shapes.values():
|
|
155
|
+
for dim, size in sizes.items():
|
|
156
|
+
expected_sizes[dim] = max(sizes.get(dim, 0), size)
|
|
157
|
+
if len(expected_shapes) > 1:
|
|
158
|
+
expected_sizes["p"] = len(expected_shapes)
|
|
159
|
+
|
|
160
|
+
sizes = dict(zip(writer.store.domain.labels, writer.store.shape))
|
|
161
|
+
assert sizes == expected_sizes
|
|
162
|
+
|
|
163
|
+
if store:
|
|
164
|
+
# ensure that non-memory stores were written to disk
|
|
165
|
+
ary = zarr.open(writer.store.kvstore.path)
|
|
166
|
+
# ensure real data was written
|
|
167
|
+
assert ary.nchunks_initialized > 0
|
|
168
|
+
assert ary[0, 0].mean() > (ary.fill_value or 0)
|
|
169
|
+
|
|
170
|
+
# smoke test the isel method
|
|
171
|
+
x = writer.isel(t=0, c=0, x=slice(0, 100))
|
|
172
|
+
assert isinstance(x, np.ndarray)
|
|
173
|
+
assert x.shape[-1] == 100
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def test_tensorstore_writer_spec_override(tmp_path: Path) -> None:
|
|
177
|
+
writer = TensorStoreHandler(
|
|
178
|
+
path=tmp_path / "test.zarr",
|
|
179
|
+
spec={"context": {"cache_pool": {"total_bytes_limit": 10000000}}},
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
assert writer.get_spec()["context"]["cache_pool"]["total_bytes_limit"] == 10000000
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def test_tensorstore_writer_indeterminate(tmp_path: Path, core: CMMCorePlus) -> None:
|
|
186
|
+
# FIXME: this test is actually throwing difficult-to-debug exceptions
|
|
187
|
+
# when driver=='zarr'. It happens when awaiting the result of self._store.resize()
|
|
188
|
+
# inside of sequenceFinished.
|
|
189
|
+
writer = TensorStoreHandler()
|
|
190
|
+
|
|
191
|
+
que = Queue()
|
|
192
|
+
thread = core.run_mda(iter(que.get, None), output=writer)
|
|
193
|
+
for t in range(2):
|
|
194
|
+
for z in range(2):
|
|
195
|
+
que.put(useq.MDAEvent(index={"t": t, "z": z, "c": 0}))
|
|
196
|
+
que.put(None)
|
|
197
|
+
thread.join()
|
|
198
|
+
|
|
199
|
+
assert writer.isel(t=1, z=1, c=0).shape == (512, 512)
|
|
200
|
+
assert writer.isel(t=1, z=slice(None), c=0).shape == (2, 512, 512)
|
|
201
|
+
with pytest.raises(KeyError):
|
|
202
|
+
writer.isel(t=2, z=2, c=0)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/events/_device_signal_view.py
RENAMED
|
File without changes
|
|
File without changes
|
{pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/core/events/_prop_event_mixin.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/handlers/_5d_writer_base.py
RENAMED
|
File without changes
|
{pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/handlers/_img_sequence_writer.py
RENAMED
|
File without changes
|
{pymmcore_plus-0.9.5 → pymmcore_plus-0.10.1}/src/pymmcore_plus/mda/handlers/_ome_tiff_writer.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|