ophyd-async 0.10.0a4__py3-none-any.whl → 0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ophyd_async/_version.py +2 -2
- ophyd_async/core/__init__.py +12 -1
- ophyd_async/core/_derived_signal.py +69 -23
- ophyd_async/core/_derived_signal_backend.py +53 -29
- ophyd_async/core/_detector.py +3 -3
- ophyd_async/core/_device.py +24 -16
- ophyd_async/core/_flyer.py +35 -1
- ophyd_async/core/_hdf_dataset.py +12 -11
- ophyd_async/core/_providers.py +1 -1
- ophyd_async/core/_signal.py +49 -29
- ophyd_async/core/_signal_backend.py +1 -1
- ophyd_async/core/_table.py +3 -3
- ophyd_async/core/_utils.py +25 -0
- ophyd_async/core/_yaml_settings.py +3 -3
- ophyd_async/epics/adandor/__init__.py +7 -1
- ophyd_async/epics/adandor/_andor_controller.py +5 -8
- ophyd_async/epics/adandor/_andor_io.py +12 -19
- ophyd_async/epics/adcore/_core_logic.py +34 -10
- ophyd_async/epics/adcore/_hdf_writer.py +27 -19
- ophyd_async/epics/eiger/_odin_io.py +4 -2
- ophyd_async/epics/motor.py +46 -96
- ophyd_async/epics/pmac/__init__.py +3 -0
- ophyd_async/epics/pmac/_pmac_io.py +100 -0
- ophyd_async/fastcs/eiger/__init__.py +1 -2
- ophyd_async/fastcs/eiger/_eiger.py +3 -9
- ophyd_async/fastcs/panda/_trigger.py +4 -4
- ophyd_async/fastcs/panda/_writer.py +15 -13
- ophyd_async/sim/__init__.py +1 -2
- ophyd_async/sim/_blob_detector_writer.py +6 -12
- ophyd_async/sim/_mirror_horizontal.py +3 -2
- ophyd_async/sim/_mirror_vertical.py +1 -0
- ophyd_async/sim/_motor.py +13 -43
- {ophyd_async-0.10.0a4.dist-info → ophyd_async-0.11.dist-info}/METADATA +3 -3
- {ophyd_async-0.10.0a4.dist-info → ophyd_async-0.11.dist-info}/RECORD +37 -35
- {ophyd_async-0.10.0a4.dist-info → ophyd_async-0.11.dist-info}/WHEEL +1 -1
- {ophyd_async-0.10.0a4.dist-info → ophyd_async-0.11.dist-info}/licenses/LICENSE +0 -0
- {ophyd_async-0.10.0a4.dist-info → ophyd_async-0.11.dist-info}/top_level.txt +0 -0
ophyd_async/core/_signal.py
CHANGED
|
@@ -31,6 +31,7 @@ from ._utils import (
|
|
|
31
31
|
Callback,
|
|
32
32
|
LazyMock,
|
|
33
33
|
T,
|
|
34
|
+
error_if_none,
|
|
34
35
|
)
|
|
35
36
|
|
|
36
37
|
|
|
@@ -125,10 +126,8 @@ class _SignalCache(Generic[SignalDatatypeT]):
|
|
|
125
126
|
self._signal.log.debug(f"Closing subscription on source {self._signal.source}")
|
|
126
127
|
|
|
127
128
|
def _ensure_reading(self) -> Reading[SignalDatatypeT]:
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
raise RuntimeError(msg)
|
|
131
|
-
return self._reading
|
|
129
|
+
reading = error_if_none(self._reading, "Monitor not working")
|
|
130
|
+
return reading
|
|
132
131
|
|
|
133
132
|
async def get_reading(self) -> Reading[SignalDatatypeT]:
|
|
134
133
|
await self._valid.wait()
|
|
@@ -163,7 +162,12 @@ class _SignalCache(Generic[SignalDatatypeT]):
|
|
|
163
162
|
self._notify(function, want_value)
|
|
164
163
|
|
|
165
164
|
def unsubscribe(self, function: Callback) -> bool:
|
|
166
|
-
self._listeners.pop(function)
|
|
165
|
+
_listener = self._listeners.pop(function, None)
|
|
166
|
+
if not _listener:
|
|
167
|
+
self._signal.log.warning(
|
|
168
|
+
f"Unsubscribe failed: subscriber {function} was not found "
|
|
169
|
+
f" in listeners list: {list(self._listeners)}"
|
|
170
|
+
)
|
|
167
171
|
return self._staged or bool(self._listeners)
|
|
168
172
|
|
|
169
173
|
def set_staged(self, staged: bool) -> bool:
|
|
@@ -183,11 +187,8 @@ class SignalR(Signal[SignalDatatypeT], AsyncReadable, AsyncStageable, Subscribab
|
|
|
183
187
|
if cached is None:
|
|
184
188
|
cached = self._cache is not None
|
|
185
189
|
if cached:
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
raise RuntimeError(msg)
|
|
189
|
-
# assert self._cache, f"{self.source} not being monitored"
|
|
190
|
-
return self._cache
|
|
190
|
+
cache = error_if_none(self._cache, f"{self.source} not being monitored")
|
|
191
|
+
return cache
|
|
191
192
|
else:
|
|
192
193
|
return self._connector.backend
|
|
193
194
|
|
|
@@ -687,7 +688,7 @@ async def set_and_wait_for_value(
|
|
|
687
688
|
)
|
|
688
689
|
|
|
689
690
|
|
|
690
|
-
def walk_rw_signals(device: Device
|
|
691
|
+
def walk_rw_signals(device: Device) -> dict[str, SignalRW[Any]]:
|
|
691
692
|
"""Retrieve all SignalRWs from a device.
|
|
692
693
|
|
|
693
694
|
Stores retrieved signals with their dotted attribute paths in a dictionary. Used as
|
|
@@ -699,19 +700,12 @@ def walk_rw_signals(device: Device, path_prefix: str = "") -> dict[str, SignalRW
|
|
|
699
700
|
A dictionary matching the string attribute path of a SignalRW with the
|
|
700
701
|
signal itself.
|
|
701
702
|
"""
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
for attr_name, attr in device.children():
|
|
705
|
-
dot_path = f"{path_prefix}{attr_name}"
|
|
706
|
-
if type(attr) is SignalRW:
|
|
707
|
-
signals[dot_path] = attr
|
|
708
|
-
attr_signals = walk_rw_signals(attr, path_prefix=dot_path + ".")
|
|
709
|
-
signals.update(attr_signals)
|
|
710
|
-
return signals
|
|
703
|
+
all_devices = walk_devices(device)
|
|
704
|
+
return {path: dev for path, dev in all_devices.items() if type(dev) is SignalRW}
|
|
711
705
|
|
|
712
706
|
|
|
713
707
|
async def walk_config_signals(
|
|
714
|
-
device: Device,
|
|
708
|
+
device: Device,
|
|
715
709
|
) -> dict[str, SignalRW[Any]]:
|
|
716
710
|
"""Retrieve all configuration signals from a device.
|
|
717
711
|
|
|
@@ -719,28 +713,54 @@ async def walk_config_signals(
|
|
|
719
713
|
part of saving and loading a device.
|
|
720
714
|
|
|
721
715
|
:param device: Device to retrieve configuration signals from.
|
|
722
|
-
:param path_prefix: For internal use, leave blank when calling the method.
|
|
723
716
|
:return:
|
|
724
717
|
A dictionary matching the string attribute path of a SignalRW with the
|
|
725
718
|
signal itself.
|
|
726
719
|
"""
|
|
727
|
-
signals: dict[str, SignalRW[Any]] = {}
|
|
728
720
|
config_names: list[str] = []
|
|
729
721
|
if isinstance(device, Configurable):
|
|
730
722
|
configuration = device.read_configuration()
|
|
731
723
|
if inspect.isawaitable(configuration):
|
|
732
724
|
configuration = await configuration
|
|
733
725
|
config_names = list(configuration.keys())
|
|
734
|
-
for attr_name, attr in device.children():
|
|
735
|
-
dot_path = f"{path_prefix}{attr_name}"
|
|
736
|
-
if isinstance(attr, SignalRW) and attr.name in config_names:
|
|
737
|
-
signals[dot_path] = attr
|
|
738
|
-
signals.update(await walk_config_signals(attr, path_prefix=dot_path + "."))
|
|
739
726
|
|
|
740
|
-
|
|
727
|
+
all_devices = walk_devices(device)
|
|
728
|
+
return {
|
|
729
|
+
path: dev
|
|
730
|
+
for path, dev in all_devices.items()
|
|
731
|
+
if isinstance(dev, SignalRW) and dev.name in config_names
|
|
732
|
+
}
|
|
741
733
|
|
|
742
734
|
|
|
743
735
|
class Ignore:
|
|
744
736
|
"""Annotation to ignore a signal when connecting a device."""
|
|
745
737
|
|
|
746
738
|
pass
|
|
739
|
+
|
|
740
|
+
|
|
741
|
+
def walk_devices(device: Device, path_prefix: str = "") -> dict[str, Device]:
|
|
742
|
+
"""Recursively retrieve all Devices from a device tree.
|
|
743
|
+
|
|
744
|
+
:param device: Root device to start from.
|
|
745
|
+
:param path_prefix: For internal use, leave blank when calling the method.
|
|
746
|
+
:return: A dictionary mapping dotted attribute paths to Device instances.
|
|
747
|
+
"""
|
|
748
|
+
devices: dict[str, Device] = {}
|
|
749
|
+
for attr_name, attr in device.children():
|
|
750
|
+
dot_path = f"{path_prefix}{attr_name}"
|
|
751
|
+
devices[dot_path] = attr
|
|
752
|
+
devices.update(walk_devices(attr, path_prefix=dot_path + "."))
|
|
753
|
+
return devices
|
|
754
|
+
|
|
755
|
+
|
|
756
|
+
def walk_signal_sources(device: Device) -> dict[str, str]:
|
|
757
|
+
"""Recursively gather the `source` field from every Signal in a device tree.
|
|
758
|
+
|
|
759
|
+
:param device: Root device to start from.
|
|
760
|
+
:param path_prefix: For internal use, leave blank when calling the method.
|
|
761
|
+
:return: A dictionary mapping dotted attribute paths to Signal source strings.
|
|
762
|
+
"""
|
|
763
|
+
all_devices = walk_devices(device)
|
|
764
|
+
return {
|
|
765
|
+
path: dev.source for path, dev in all_devices.items() if isinstance(dev, Signal)
|
|
766
|
+
}
|
|
@@ -176,7 +176,7 @@ def _datakey_dtype_numpy(
|
|
|
176
176
|
raise TypeError(f"Can't make dtype_numpy for {datatype}")
|
|
177
177
|
|
|
178
178
|
|
|
179
|
-
def _datakey_shape(value: SignalDatatype) -> list[int]:
|
|
179
|
+
def _datakey_shape(value: SignalDatatype) -> list[int | None]:
|
|
180
180
|
if type(value) in _primitive_dtype or isinstance(value, EnumTypes):
|
|
181
181
|
return []
|
|
182
182
|
elif isinstance(value, np.ndarray):
|
ophyd_async/core/_table.py
CHANGED
|
@@ -4,10 +4,10 @@ from collections.abc import Callable, Sequence
|
|
|
4
4
|
from typing import Annotated, Any, TypeVar, get_origin, get_type_hints
|
|
5
5
|
|
|
6
6
|
import numpy as np
|
|
7
|
-
from pydantic import
|
|
7
|
+
from pydantic import ConfigDict, Field, model_validator
|
|
8
8
|
from pydantic_numpy.helper.annotation import NpArrayPydanticAnnotation
|
|
9
9
|
|
|
10
|
-
from ._utils import get_dtype
|
|
10
|
+
from ._utils import ConfinedModel, get_dtype
|
|
11
11
|
|
|
12
12
|
TableSubclass = TypeVar("TableSubclass", bound="Table")
|
|
13
13
|
|
|
@@ -26,7 +26,7 @@ def _make_default_factory(dtype: np.dtype) -> Callable[[], np.ndarray]:
|
|
|
26
26
|
return numpy_array_default_factory
|
|
27
27
|
|
|
28
28
|
|
|
29
|
-
class Table(
|
|
29
|
+
class Table(ConfinedModel):
|
|
30
30
|
"""An abstraction of a Table where each field is a column.
|
|
31
31
|
|
|
32
32
|
For example:
|
ophyd_async/core/_utils.py
CHANGED
|
@@ -17,6 +17,7 @@ from typing import (
|
|
|
17
17
|
from unittest.mock import Mock
|
|
18
18
|
|
|
19
19
|
import numpy as np
|
|
20
|
+
from pydantic import BaseModel, ConfigDict
|
|
20
21
|
|
|
21
22
|
T = TypeVar("T")
|
|
22
23
|
V = TypeVar("V")
|
|
@@ -377,3 +378,27 @@ class LazyMock:
|
|
|
377
378
|
if self.parent is not None:
|
|
378
379
|
self.parent().attach_mock(self._mock, self.name)
|
|
379
380
|
return self._mock
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
class ConfinedModel(BaseModel):
|
|
384
|
+
"""A base class confined to explicitly defined fields in the model schema."""
|
|
385
|
+
|
|
386
|
+
model_config = ConfigDict(
|
|
387
|
+
extra="forbid",
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
def error_if_none(value: T | None, msg: str) -> T:
|
|
392
|
+
"""Check and return the value if not None.
|
|
393
|
+
|
|
394
|
+
:param value: The value to check
|
|
395
|
+
:param msg: The `RuntimeError` message to raise if it is None
|
|
396
|
+
:raises RuntimeError: If the value is None
|
|
397
|
+
:returns: The value if not None
|
|
398
|
+
|
|
399
|
+
Used to implement a pattern where a variable is None at init, then
|
|
400
|
+
changed by a method, then used in a later method.
|
|
401
|
+
"""
|
|
402
|
+
if value is None:
|
|
403
|
+
raise RuntimeError(msg)
|
|
404
|
+
return value
|
|
@@ -6,9 +6,9 @@ from typing import Any
|
|
|
6
6
|
import numpy as np
|
|
7
7
|
import numpy.typing as npt
|
|
8
8
|
import yaml
|
|
9
|
-
from pydantic import BaseModel
|
|
10
9
|
|
|
11
10
|
from ._settings import SettingsProvider
|
|
11
|
+
from ._utils import ConfinedModel
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
def ndarray_representer(dumper: yaml.Dumper, array: npt.NDArray[Any]) -> yaml.Node:
|
|
@@ -18,7 +18,7 @@ def ndarray_representer(dumper: yaml.Dumper, array: npt.NDArray[Any]) -> yaml.No
|
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
def pydantic_model_abstraction_representer(
|
|
21
|
-
dumper: yaml.Dumper, model:
|
|
21
|
+
dumper: yaml.Dumper, model: ConfinedModel
|
|
22
22
|
) -> yaml.Node:
|
|
23
23
|
return dumper.represent_data(model.model_dump(mode="python"))
|
|
24
24
|
|
|
@@ -39,7 +39,7 @@ class YamlSettingsProvider(SettingsProvider):
|
|
|
39
39
|
async def store(self, name: str, data: dict[str, Any]):
|
|
40
40
|
yaml.add_representer(np.ndarray, ndarray_representer, Dumper=yaml.Dumper)
|
|
41
41
|
yaml.add_multi_representer(
|
|
42
|
-
|
|
42
|
+
ConfinedModel,
|
|
43
43
|
pydantic_model_abstraction_representer,
|
|
44
44
|
Dumper=yaml.Dumper,
|
|
45
45
|
)
|
|
@@ -1,9 +1,15 @@
|
|
|
1
|
+
"""Support for the ADAndor areaDetector driver.
|
|
2
|
+
|
|
3
|
+
https://github.com/areaDetector/ADAndor.
|
|
4
|
+
"""
|
|
5
|
+
|
|
1
6
|
from ._andor import Andor2Detector
|
|
2
7
|
from ._andor_controller import Andor2Controller
|
|
3
|
-
from ._andor_io import Andor2DriverIO
|
|
8
|
+
from ._andor_io import Andor2DriverIO, Andor2TriggerMode
|
|
4
9
|
|
|
5
10
|
__all__ = [
|
|
6
11
|
"Andor2Detector",
|
|
7
12
|
"Andor2Controller",
|
|
8
13
|
"Andor2DriverIO",
|
|
14
|
+
"Andor2TriggerMode",
|
|
9
15
|
]
|
|
@@ -12,15 +12,11 @@ _MIN_DEAD_TIME = 0.1
|
|
|
12
12
|
_MAX_NUM_IMAGE = 999_999
|
|
13
13
|
|
|
14
14
|
|
|
15
|
+
# The deadtime of an Andor2 controller varies depending on the exact model of camera.
|
|
16
|
+
# Ideally we would maximize performance by dynamically retrieving the deadtime at
|
|
17
|
+
# runtime. See https://github.com/bluesky/ophyd-async/issues/308
|
|
15
18
|
class Andor2Controller(adcore.ADBaseController[Andor2DriverIO]):
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
def __init__(
|
|
19
|
-
self,
|
|
20
|
-
driver: Andor2DriverIO,
|
|
21
|
-
good_states: frozenset[adcore.ADState] = adcore.DEFAULT_GOOD_STATES,
|
|
22
|
-
) -> None:
|
|
23
|
-
super().__init__(driver, good_states=good_states)
|
|
19
|
+
"""DetectorCobntroller for Andor2DriverIO."""
|
|
24
20
|
|
|
25
21
|
def get_deadtime(self, exposure: float | None) -> float:
|
|
26
22
|
return _MIN_DEAD_TIME + (exposure or 0)
|
|
@@ -29,6 +25,7 @@ class Andor2Controller(adcore.ADBaseController[Andor2DriverIO]):
|
|
|
29
25
|
await self.set_exposure_time_and_acquire_period_if_supplied(
|
|
30
26
|
trigger_info.livetime
|
|
31
27
|
)
|
|
28
|
+
|
|
32
29
|
await asyncio.gather(
|
|
33
30
|
self.driver.trigger_mode.set(self._get_trigger_mode(trigger_info.trigger)),
|
|
34
31
|
self.driver.num_images.set(
|
|
@@ -1,8 +1,9 @@
|
|
|
1
|
-
from
|
|
2
|
-
|
|
1
|
+
from typing import Annotated as A
|
|
2
|
+
|
|
3
|
+
from ophyd_async.core import SignalR, SignalRW, StrictEnum
|
|
4
|
+
from ophyd_async.epics import adcore
|
|
3
5
|
from ophyd_async.epics.core import (
|
|
4
|
-
|
|
5
|
-
epics_signal_rw,
|
|
6
|
+
PvSuffix,
|
|
6
7
|
)
|
|
7
8
|
|
|
8
9
|
|
|
@@ -15,20 +16,12 @@ class Andor2TriggerMode(StrictEnum):
|
|
|
15
16
|
SOFTWARE = "Software"
|
|
16
17
|
|
|
17
18
|
|
|
18
|
-
class
|
|
19
|
-
|
|
20
|
-
UINT32 = "UInt32"
|
|
21
|
-
FLOAT32 = "Float32"
|
|
22
|
-
FLOAT64 = "Float64"
|
|
23
|
-
|
|
19
|
+
class Andor2DriverIO(adcore.ADBaseIO):
|
|
20
|
+
"""Driver for andor model:DU897_BV as deployed on p99.
|
|
24
21
|
|
|
25
|
-
|
|
26
|
-
|
|
22
|
+
This mirrors the interface provided by AdAndor/db/andor.template.
|
|
23
|
+
https://areadetector.github.io/areaDetector/ADAndor/andorDoc.html
|
|
24
|
+
"""
|
|
27
25
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
self.trigger_mode = epics_signal_rw(Andor2TriggerMode, prefix + "TriggerMode")
|
|
31
|
-
self.data_type = epics_signal_r(Andor2DataType, prefix + "DataType_RBV") # type: ignore
|
|
32
|
-
self.andor_accumulate_period = epics_signal_r(
|
|
33
|
-
float, prefix + "AndorAccumulatePeriod_RBV"
|
|
34
|
-
)
|
|
26
|
+
trigger_mode: A[SignalRW[Andor2TriggerMode], PvSuffix.rbv("TriggerMode")]
|
|
27
|
+
andor_accumulate_period: A[SignalR[float], PvSuffix("AndorAccumulatePeriod_RBV")]
|
|
@@ -7,6 +7,7 @@ from ophyd_async.core import (
|
|
|
7
7
|
DetectorController,
|
|
8
8
|
DetectorTrigger,
|
|
9
9
|
TriggerInfo,
|
|
10
|
+
observe_value,
|
|
10
11
|
set_and_wait_for_value,
|
|
11
12
|
)
|
|
12
13
|
|
|
@@ -89,34 +90,57 @@ class ADBaseController(DetectorController, Generic[ADBaseIOT]):
|
|
|
89
90
|
self.driver.acquire_period.set(full_frame_time, timeout=timeout),
|
|
90
91
|
)
|
|
91
92
|
|
|
92
|
-
async def start_acquiring_driver_and_ensure_status(
|
|
93
|
+
async def start_acquiring_driver_and_ensure_status(
|
|
94
|
+
self,
|
|
95
|
+
start_timeout: float = DEFAULT_TIMEOUT,
|
|
96
|
+
state_timeout: float = DEFAULT_TIMEOUT,
|
|
97
|
+
) -> AsyncStatus:
|
|
93
98
|
"""Start acquiring driver, raising ValueError if the detector is in a bad state.
|
|
94
99
|
|
|
95
100
|
This sets driver.acquire to True, and waits for it to be True up to a timeout.
|
|
96
101
|
Then, it checks that the DetectorState PV is in DEFAULT_GOOD_STATES,
|
|
97
102
|
and otherwise raises a ValueError.
|
|
98
103
|
|
|
104
|
+
|
|
105
|
+
:param start_timeout:
|
|
106
|
+
Timeout used for waiting for the driver to start
|
|
107
|
+
acquiring.
|
|
108
|
+
:param state_timeout:
|
|
109
|
+
Timeout used for waiting for the detector to be in a good
|
|
110
|
+
state after it stops acquiring.
|
|
99
111
|
:returns AsyncStatus:
|
|
100
112
|
An AsyncStatus that can be awaited to set driver.acquire to True and perform
|
|
101
113
|
subsequent raising (if applicable) due to detector state.
|
|
114
|
+
|
|
102
115
|
"""
|
|
103
116
|
status = await set_and_wait_for_value(
|
|
104
117
|
self.driver.acquire,
|
|
105
118
|
True,
|
|
106
|
-
timeout=
|
|
119
|
+
timeout=start_timeout,
|
|
107
120
|
wait_for_set_completion=False,
|
|
108
121
|
)
|
|
109
122
|
|
|
110
123
|
async def complete_acquisition() -> None:
|
|
111
|
-
# NOTE: possible race condition here between the callback from
|
|
112
|
-
# set_and_wait_for_value and the detector state updating.
|
|
113
124
|
await status
|
|
114
|
-
state =
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
125
|
+
state = None
|
|
126
|
+
try:
|
|
127
|
+
async for state in observe_value(
|
|
128
|
+
self.driver.detector_state, done_timeout=state_timeout
|
|
129
|
+
):
|
|
130
|
+
if state in self.good_states:
|
|
131
|
+
return
|
|
132
|
+
except asyncio.TimeoutError as exc:
|
|
133
|
+
if state is not None:
|
|
134
|
+
raise ValueError(
|
|
135
|
+
f"Final detector state {state.value} not in valid end "
|
|
136
|
+
f"states: {self.good_states}"
|
|
137
|
+
) from exc
|
|
138
|
+
else:
|
|
139
|
+
# No updates from the detector, something else is wrong
|
|
140
|
+
raise asyncio.TimeoutError(
|
|
141
|
+
"Could not monitor detector state: "
|
|
142
|
+
+ self.driver.detector_state.source
|
|
143
|
+
) from exc
|
|
120
144
|
|
|
121
145
|
return AsyncStatus(complete_acquisition())
|
|
122
146
|
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
from collections.abc import AsyncIterator
|
|
3
3
|
from pathlib import Path
|
|
4
|
+
from typing import TypeGuard
|
|
4
5
|
from xml.etree import ElementTree as ET
|
|
5
6
|
|
|
6
7
|
from bluesky.protocols import StreamAsset
|
|
@@ -22,6 +23,10 @@ from ._utils import (
|
|
|
22
23
|
)
|
|
23
24
|
|
|
24
25
|
|
|
26
|
+
def _is_fully_described(shape: tuple[int | None, ...]) -> TypeGuard[tuple[int, ...]]:
|
|
27
|
+
return None not in shape
|
|
28
|
+
|
|
29
|
+
|
|
25
30
|
class ADHDFWriter(ADWriter[NDFileHDFIO]):
|
|
26
31
|
"""Allow `NDFileHDFIO` to be used within `StandardDetector`."""
|
|
27
32
|
|
|
@@ -49,10 +54,7 @@ class ADHDFWriter(ADWriter[NDFileHDFIO]):
|
|
|
49
54
|
async def open(
|
|
50
55
|
self, name: str, exposures_per_event: PositiveInt = 1
|
|
51
56
|
) -> dict[str, DataKey]:
|
|
52
|
-
self._composer = None
|
|
53
|
-
|
|
54
57
|
# Setting HDF writer specific signals
|
|
55
|
-
|
|
56
58
|
# Make sure we are using chunk auto-sizing
|
|
57
59
|
await asyncio.gather(self.fileio.chunk_size_auto.set(True))
|
|
58
60
|
|
|
@@ -75,6 +77,16 @@ class ADHDFWriter(ADWriter[NDFileHDFIO]):
|
|
|
75
77
|
# Determine number of frames that will be saved per HDF chunk
|
|
76
78
|
frames_per_chunk = await self.fileio.num_frames_chunks.get_value()
|
|
77
79
|
|
|
80
|
+
if not _is_fully_described(detector_shape):
|
|
81
|
+
# Questions:
|
|
82
|
+
# - Can AreaDetector support this?
|
|
83
|
+
# - How to deal with chunking?
|
|
84
|
+
# Don't support for now - leave option open to support it later
|
|
85
|
+
raise ValueError(
|
|
86
|
+
"Datasets with partially unknown dimensionality "
|
|
87
|
+
"are not currently supported by ADHDFWriter."
|
|
88
|
+
)
|
|
89
|
+
|
|
78
90
|
# Add the main data
|
|
79
91
|
self._datasets = [
|
|
80
92
|
HDFDatasetDescription(
|
|
@@ -85,6 +97,13 @@ class ADHDFWriter(ADWriter[NDFileHDFIO]):
|
|
|
85
97
|
chunk_shape=(frames_per_chunk, *detector_shape),
|
|
86
98
|
)
|
|
87
99
|
]
|
|
100
|
+
|
|
101
|
+
self._composer = HDFDocumentComposer(
|
|
102
|
+
# See https://github.com/bluesky/ophyd-async/issues/122
|
|
103
|
+
Path(await self.fileio.full_file_name.get_value()),
|
|
104
|
+
self._datasets,
|
|
105
|
+
)
|
|
106
|
+
|
|
88
107
|
# And all the scalar datasets
|
|
89
108
|
for plugin in self._plugins.values():
|
|
90
109
|
maybe_xml = await plugin.nd_attributes_file.get_value()
|
|
@@ -134,20 +153,9 @@ class ADHDFWriter(ADWriter[NDFileHDFIO]):
|
|
|
134
153
|
self, name: str, indices_written: int
|
|
135
154
|
) -> AsyncIterator[StreamAsset]:
|
|
136
155
|
# TODO: fail if we get dropped frames
|
|
156
|
+
if self._composer is None:
|
|
157
|
+
msg = f"open() not called on {self}"
|
|
158
|
+
raise RuntimeError(msg)
|
|
137
159
|
await self.fileio.flush_now.set(True)
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
path = Path(await self.fileio.full_file_name.get_value())
|
|
141
|
-
self._composer = HDFDocumentComposer(
|
|
142
|
-
# See https://github.com/bluesky/ophyd-async/issues/122
|
|
143
|
-
path,
|
|
144
|
-
self._datasets,
|
|
145
|
-
)
|
|
146
|
-
# stream resource says "here is a dataset",
|
|
147
|
-
# stream datum says "here are N frames in that stream resource",
|
|
148
|
-
# you get one stream resource and many stream datums per scan
|
|
149
|
-
|
|
150
|
-
for doc in self._composer.stream_resources():
|
|
151
|
-
yield "stream_resource", doc
|
|
152
|
-
for doc in self._composer.stream_data(indices_written):
|
|
153
|
-
yield "stream_datum", doc
|
|
160
|
+
for doc in self._composer.make_stream_docs(indices_written):
|
|
161
|
+
yield doc
|
|
@@ -45,9 +45,11 @@ class OdinNode(Device):
|
|
|
45
45
|
|
|
46
46
|
|
|
47
47
|
class Odin(Device):
|
|
48
|
-
def __init__(self, prefix: str, name: str = "") -> None:
|
|
48
|
+
def __init__(self, prefix: str, name: str = "", nodes: int = 4) -> None:
|
|
49
|
+
# default nodes is set to 4, MX 16M Eiger detectors - nodes = 4.
|
|
50
|
+
# B21 4M Eiger detector - nodes = 1
|
|
49
51
|
self.nodes = DeviceVector(
|
|
50
|
-
{i: OdinNode(f"{prefix[:-1]}{i + 1}:") for i in range(
|
|
52
|
+
{i: OdinNode(f"{prefix[:-1]}{i + 1}:") for i in range(nodes)}
|
|
51
53
|
)
|
|
52
54
|
|
|
53
55
|
self.capture = epics_signal_rw(Writing, f"{prefix}Capture")
|