ophyd-async 0.10.1__py3-none-any.whl → 0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ophyd_async/_version.py +2 -2
- ophyd_async/core/__init__.py +12 -1
- ophyd_async/core/_derived_signal.py +68 -22
- ophyd_async/core/_derived_signal_backend.py +46 -24
- ophyd_async/core/_detector.py +3 -3
- ophyd_async/core/_device.py +24 -16
- ophyd_async/core/_flyer.py +39 -5
- ophyd_async/core/_hdf_dataset.py +11 -10
- ophyd_async/core/_signal.py +58 -30
- ophyd_async/core/_table.py +3 -3
- ophyd_async/core/_utils.py +25 -0
- ophyd_async/core/_yaml_settings.py +3 -3
- ophyd_async/epics/adandor/__init__.py +7 -1
- ophyd_async/epics/adandor/_andor_controller.py +5 -8
- ophyd_async/epics/adandor/_andor_io.py +12 -19
- ophyd_async/epics/adcore/_hdf_writer.py +12 -19
- ophyd_async/epics/core/_signal.py +8 -3
- ophyd_async/epics/eiger/_odin_io.py +4 -2
- ophyd_async/epics/motor.py +47 -97
- ophyd_async/epics/pmac/__init__.py +3 -0
- ophyd_async/epics/pmac/_pmac_io.py +100 -0
- ophyd_async/fastcs/eiger/__init__.py +1 -2
- ophyd_async/fastcs/eiger/_eiger.py +3 -9
- ophyd_async/fastcs/panda/_trigger.py +8 -8
- ophyd_async/fastcs/panda/_writer.py +15 -13
- ophyd_async/plan_stubs/__init__.py +0 -8
- ophyd_async/plan_stubs/_fly.py +0 -204
- ophyd_async/sim/__init__.py +1 -2
- ophyd_async/sim/_blob_detector_writer.py +6 -12
- ophyd_async/sim/_mirror_horizontal.py +3 -2
- ophyd_async/sim/_mirror_vertical.py +1 -0
- ophyd_async/sim/_motor.py +13 -43
- ophyd_async/testing/__init__.py +2 -0
- ophyd_async/testing/_assert.py +34 -6
- {ophyd_async-0.10.1.dist-info → ophyd_async-0.12.dist-info}/METADATA +4 -3
- {ophyd_async-0.10.1.dist-info → ophyd_async-0.12.dist-info}/RECORD +39 -37
- {ophyd_async-0.10.1.dist-info → ophyd_async-0.12.dist-info}/WHEEL +0 -0
- {ophyd_async-0.10.1.dist-info → ophyd_async-0.12.dist-info}/licenses/LICENSE +0 -0
- {ophyd_async-0.10.1.dist-info → ophyd_async-0.12.dist-info}/top_level.txt +0 -0
ophyd_async/core/_signal.py
CHANGED
|
@@ -17,6 +17,7 @@ from bluesky.protocols import (
|
|
|
17
17
|
Subscribable,
|
|
18
18
|
)
|
|
19
19
|
from event_model import DataKey
|
|
20
|
+
from stamina import retry_context
|
|
20
21
|
|
|
21
22
|
from ._device import Device, DeviceConnector
|
|
22
23
|
from ._mock_signal_backend import MockSignalBackend
|
|
@@ -31,6 +32,7 @@ from ._utils import (
|
|
|
31
32
|
Callback,
|
|
32
33
|
LazyMock,
|
|
33
34
|
T,
|
|
35
|
+
error_if_none,
|
|
34
36
|
)
|
|
35
37
|
|
|
36
38
|
|
|
@@ -88,9 +90,11 @@ class Signal(Device, Generic[SignalDatatypeT]):
|
|
|
88
90
|
backend: SignalBackend[SignalDatatypeT],
|
|
89
91
|
timeout: float | None = DEFAULT_TIMEOUT,
|
|
90
92
|
name: str = "",
|
|
93
|
+
attempts: int = 1,
|
|
91
94
|
) -> None:
|
|
92
95
|
super().__init__(name=name, connector=SignalConnector(backend))
|
|
93
96
|
self._timeout = timeout
|
|
97
|
+
self._attempts = attempts
|
|
94
98
|
|
|
95
99
|
@property
|
|
96
100
|
def source(self) -> str:
|
|
@@ -125,10 +129,8 @@ class _SignalCache(Generic[SignalDatatypeT]):
|
|
|
125
129
|
self._signal.log.debug(f"Closing subscription on source {self._signal.source}")
|
|
126
130
|
|
|
127
131
|
def _ensure_reading(self) -> Reading[SignalDatatypeT]:
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
raise RuntimeError(msg)
|
|
131
|
-
return self._reading
|
|
132
|
+
reading = error_if_none(self._reading, "Monitor not working")
|
|
133
|
+
return reading
|
|
132
134
|
|
|
133
135
|
async def get_reading(self) -> Reading[SignalDatatypeT]:
|
|
134
136
|
await self._valid.wait()
|
|
@@ -145,7 +147,8 @@ class _SignalCache(Generic[SignalDatatypeT]):
|
|
|
145
147
|
)
|
|
146
148
|
self._reading = reading
|
|
147
149
|
self._valid.set()
|
|
148
|
-
|
|
150
|
+
items = self._listeners.copy().items()
|
|
151
|
+
for function, want_value in items:
|
|
149
152
|
self._notify(function, want_value)
|
|
150
153
|
|
|
151
154
|
def _notify(
|
|
@@ -188,11 +191,8 @@ class SignalR(Signal[SignalDatatypeT], AsyncReadable, AsyncStageable, Subscribab
|
|
|
188
191
|
if cached is None:
|
|
189
192
|
cached = self._cache is not None
|
|
190
193
|
if cached:
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
raise RuntimeError(msg)
|
|
194
|
-
# assert self._cache, f"{self.source} not being monitored"
|
|
195
|
-
return self._cache
|
|
194
|
+
cache = error_if_none(self._cache, f"{self.source} not being monitored")
|
|
195
|
+
return cache
|
|
196
196
|
else:
|
|
197
197
|
return self._connector.backend
|
|
198
198
|
|
|
@@ -291,7 +291,16 @@ class SignalW(Signal[SignalDatatypeT], Movable):
|
|
|
291
291
|
timeout = self._timeout
|
|
292
292
|
source = self._connector.backend.source(self.name, read=False)
|
|
293
293
|
self.log.debug(f"Putting value {value} to backend at source {source}")
|
|
294
|
-
|
|
294
|
+
async for attempt in retry_context(
|
|
295
|
+
on=asyncio.TimeoutError,
|
|
296
|
+
attempts=self._attempts,
|
|
297
|
+
wait_initial=0,
|
|
298
|
+
wait_jitter=0,
|
|
299
|
+
):
|
|
300
|
+
with attempt:
|
|
301
|
+
await _wait_for(
|
|
302
|
+
self._connector.backend.put(value, wait=wait), timeout, source
|
|
303
|
+
)
|
|
295
304
|
self.log.debug(f"Successfully put value {value} to backend at source {source}")
|
|
296
305
|
|
|
297
306
|
|
|
@@ -692,7 +701,7 @@ async def set_and_wait_for_value(
|
|
|
692
701
|
)
|
|
693
702
|
|
|
694
703
|
|
|
695
|
-
def walk_rw_signals(device: Device
|
|
704
|
+
def walk_rw_signals(device: Device) -> dict[str, SignalRW[Any]]:
|
|
696
705
|
"""Retrieve all SignalRWs from a device.
|
|
697
706
|
|
|
698
707
|
Stores retrieved signals with their dotted attribute paths in a dictionary. Used as
|
|
@@ -704,19 +713,12 @@ def walk_rw_signals(device: Device, path_prefix: str = "") -> dict[str, SignalRW
|
|
|
704
713
|
A dictionary matching the string attribute path of a SignalRW with the
|
|
705
714
|
signal itself.
|
|
706
715
|
"""
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
for attr_name, attr in device.children():
|
|
710
|
-
dot_path = f"{path_prefix}{attr_name}"
|
|
711
|
-
if type(attr) is SignalRW:
|
|
712
|
-
signals[dot_path] = attr
|
|
713
|
-
attr_signals = walk_rw_signals(attr, path_prefix=dot_path + ".")
|
|
714
|
-
signals.update(attr_signals)
|
|
715
|
-
return signals
|
|
716
|
+
all_devices = walk_devices(device)
|
|
717
|
+
return {path: dev for path, dev in all_devices.items() if type(dev) is SignalRW}
|
|
716
718
|
|
|
717
719
|
|
|
718
720
|
async def walk_config_signals(
|
|
719
|
-
device: Device,
|
|
721
|
+
device: Device,
|
|
720
722
|
) -> dict[str, SignalRW[Any]]:
|
|
721
723
|
"""Retrieve all configuration signals from a device.
|
|
722
724
|
|
|
@@ -724,28 +726,54 @@ async def walk_config_signals(
|
|
|
724
726
|
part of saving and loading a device.
|
|
725
727
|
|
|
726
728
|
:param device: Device to retrieve configuration signals from.
|
|
727
|
-
:param path_prefix: For internal use, leave blank when calling the method.
|
|
728
729
|
:return:
|
|
729
730
|
A dictionary matching the string attribute path of a SignalRW with the
|
|
730
731
|
signal itself.
|
|
731
732
|
"""
|
|
732
|
-
signals: dict[str, SignalRW[Any]] = {}
|
|
733
733
|
config_names: list[str] = []
|
|
734
734
|
if isinstance(device, Configurable):
|
|
735
735
|
configuration = device.read_configuration()
|
|
736
736
|
if inspect.isawaitable(configuration):
|
|
737
737
|
configuration = await configuration
|
|
738
738
|
config_names = list(configuration.keys())
|
|
739
|
-
for attr_name, attr in device.children():
|
|
740
|
-
dot_path = f"{path_prefix}{attr_name}"
|
|
741
|
-
if isinstance(attr, SignalRW) and attr.name in config_names:
|
|
742
|
-
signals[dot_path] = attr
|
|
743
|
-
signals.update(await walk_config_signals(attr, path_prefix=dot_path + "."))
|
|
744
739
|
|
|
745
|
-
|
|
740
|
+
all_devices = walk_devices(device)
|
|
741
|
+
return {
|
|
742
|
+
path: dev
|
|
743
|
+
for path, dev in all_devices.items()
|
|
744
|
+
if isinstance(dev, SignalRW) and dev.name in config_names
|
|
745
|
+
}
|
|
746
746
|
|
|
747
747
|
|
|
748
748
|
class Ignore:
|
|
749
749
|
"""Annotation to ignore a signal when connecting a device."""
|
|
750
750
|
|
|
751
751
|
pass
|
|
752
|
+
|
|
753
|
+
|
|
754
|
+
def walk_devices(device: Device, path_prefix: str = "") -> dict[str, Device]:
|
|
755
|
+
"""Recursively retrieve all Devices from a device tree.
|
|
756
|
+
|
|
757
|
+
:param device: Root device to start from.
|
|
758
|
+
:param path_prefix: For internal use, leave blank when calling the method.
|
|
759
|
+
:return: A dictionary mapping dotted attribute paths to Device instances.
|
|
760
|
+
"""
|
|
761
|
+
devices: dict[str, Device] = {}
|
|
762
|
+
for attr_name, attr in device.children():
|
|
763
|
+
dot_path = f"{path_prefix}{attr_name}"
|
|
764
|
+
devices[dot_path] = attr
|
|
765
|
+
devices.update(walk_devices(attr, path_prefix=dot_path + "."))
|
|
766
|
+
return devices
|
|
767
|
+
|
|
768
|
+
|
|
769
|
+
def walk_signal_sources(device: Device) -> dict[str, str]:
|
|
770
|
+
"""Recursively gather the `source` field from every Signal in a device tree.
|
|
771
|
+
|
|
772
|
+
:param device: Root device to start from.
|
|
773
|
+
:param path_prefix: For internal use, leave blank when calling the method.
|
|
774
|
+
:return: A dictionary mapping dotted attribute paths to Signal source strings.
|
|
775
|
+
"""
|
|
776
|
+
all_devices = walk_devices(device)
|
|
777
|
+
return {
|
|
778
|
+
path: dev.source for path, dev in all_devices.items() if isinstance(dev, Signal)
|
|
779
|
+
}
|
ophyd_async/core/_table.py
CHANGED
|
@@ -4,10 +4,10 @@ from collections.abc import Callable, Sequence
|
|
|
4
4
|
from typing import Annotated, Any, TypeVar, get_origin, get_type_hints
|
|
5
5
|
|
|
6
6
|
import numpy as np
|
|
7
|
-
from pydantic import
|
|
7
|
+
from pydantic import ConfigDict, Field, model_validator
|
|
8
8
|
from pydantic_numpy.helper.annotation import NpArrayPydanticAnnotation
|
|
9
9
|
|
|
10
|
-
from ._utils import get_dtype
|
|
10
|
+
from ._utils import ConfinedModel, get_dtype
|
|
11
11
|
|
|
12
12
|
TableSubclass = TypeVar("TableSubclass", bound="Table")
|
|
13
13
|
|
|
@@ -26,7 +26,7 @@ def _make_default_factory(dtype: np.dtype) -> Callable[[], np.ndarray]:
|
|
|
26
26
|
return numpy_array_default_factory
|
|
27
27
|
|
|
28
28
|
|
|
29
|
-
class Table(
|
|
29
|
+
class Table(ConfinedModel):
|
|
30
30
|
"""An abstraction of a Table where each field is a column.
|
|
31
31
|
|
|
32
32
|
For example:
|
ophyd_async/core/_utils.py
CHANGED
|
@@ -17,6 +17,7 @@ from typing import (
|
|
|
17
17
|
from unittest.mock import Mock
|
|
18
18
|
|
|
19
19
|
import numpy as np
|
|
20
|
+
from pydantic import BaseModel, ConfigDict
|
|
20
21
|
|
|
21
22
|
T = TypeVar("T")
|
|
22
23
|
V = TypeVar("V")
|
|
@@ -377,3 +378,27 @@ class LazyMock:
|
|
|
377
378
|
if self.parent is not None:
|
|
378
379
|
self.parent().attach_mock(self._mock, self.name)
|
|
379
380
|
return self._mock
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
class ConfinedModel(BaseModel):
|
|
384
|
+
"""A base class confined to explicitly defined fields in the model schema."""
|
|
385
|
+
|
|
386
|
+
model_config = ConfigDict(
|
|
387
|
+
extra="forbid",
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
def error_if_none(value: T | None, msg: str) -> T:
|
|
392
|
+
"""Check and return the value if not None.
|
|
393
|
+
|
|
394
|
+
:param value: The value to check
|
|
395
|
+
:param msg: The `RuntimeError` message to raise if it is None
|
|
396
|
+
:raises RuntimeError: If the value is None
|
|
397
|
+
:returns: The value if not None
|
|
398
|
+
|
|
399
|
+
Used to implement a pattern where a variable is None at init, then
|
|
400
|
+
changed by a method, then used in a later method.
|
|
401
|
+
"""
|
|
402
|
+
if value is None:
|
|
403
|
+
raise RuntimeError(msg)
|
|
404
|
+
return value
|
|
@@ -6,9 +6,9 @@ from typing import Any
|
|
|
6
6
|
import numpy as np
|
|
7
7
|
import numpy.typing as npt
|
|
8
8
|
import yaml
|
|
9
|
-
from pydantic import BaseModel
|
|
10
9
|
|
|
11
10
|
from ._settings import SettingsProvider
|
|
11
|
+
from ._utils import ConfinedModel
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
def ndarray_representer(dumper: yaml.Dumper, array: npt.NDArray[Any]) -> yaml.Node:
|
|
@@ -18,7 +18,7 @@ def ndarray_representer(dumper: yaml.Dumper, array: npt.NDArray[Any]) -> yaml.No
|
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
def pydantic_model_abstraction_representer(
|
|
21
|
-
dumper: yaml.Dumper, model:
|
|
21
|
+
dumper: yaml.Dumper, model: ConfinedModel
|
|
22
22
|
) -> yaml.Node:
|
|
23
23
|
return dumper.represent_data(model.model_dump(mode="python"))
|
|
24
24
|
|
|
@@ -39,7 +39,7 @@ class YamlSettingsProvider(SettingsProvider):
|
|
|
39
39
|
async def store(self, name: str, data: dict[str, Any]):
|
|
40
40
|
yaml.add_representer(np.ndarray, ndarray_representer, Dumper=yaml.Dumper)
|
|
41
41
|
yaml.add_multi_representer(
|
|
42
|
-
|
|
42
|
+
ConfinedModel,
|
|
43
43
|
pydantic_model_abstraction_representer,
|
|
44
44
|
Dumper=yaml.Dumper,
|
|
45
45
|
)
|
|
@@ -1,9 +1,15 @@
|
|
|
1
|
+
"""Support for the ADAndor areaDetector driver.
|
|
2
|
+
|
|
3
|
+
https://github.com/areaDetector/ADAndor.
|
|
4
|
+
"""
|
|
5
|
+
|
|
1
6
|
from ._andor import Andor2Detector
|
|
2
7
|
from ._andor_controller import Andor2Controller
|
|
3
|
-
from ._andor_io import Andor2DriverIO
|
|
8
|
+
from ._andor_io import Andor2DriverIO, Andor2TriggerMode
|
|
4
9
|
|
|
5
10
|
__all__ = [
|
|
6
11
|
"Andor2Detector",
|
|
7
12
|
"Andor2Controller",
|
|
8
13
|
"Andor2DriverIO",
|
|
14
|
+
"Andor2TriggerMode",
|
|
9
15
|
]
|
|
@@ -12,15 +12,11 @@ _MIN_DEAD_TIME = 0.1
|
|
|
12
12
|
_MAX_NUM_IMAGE = 999_999
|
|
13
13
|
|
|
14
14
|
|
|
15
|
+
# The deadtime of an Andor2 controller varies depending on the exact model of camera.
|
|
16
|
+
# Ideally we would maximize performance by dynamically retrieving the deadtime at
|
|
17
|
+
# runtime. See https://github.com/bluesky/ophyd-async/issues/308
|
|
15
18
|
class Andor2Controller(adcore.ADBaseController[Andor2DriverIO]):
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
def __init__(
|
|
19
|
-
self,
|
|
20
|
-
driver: Andor2DriverIO,
|
|
21
|
-
good_states: frozenset[adcore.ADState] = adcore.DEFAULT_GOOD_STATES,
|
|
22
|
-
) -> None:
|
|
23
|
-
super().__init__(driver, good_states=good_states)
|
|
19
|
+
"""DetectorCobntroller for Andor2DriverIO."""
|
|
24
20
|
|
|
25
21
|
def get_deadtime(self, exposure: float | None) -> float:
|
|
26
22
|
return _MIN_DEAD_TIME + (exposure or 0)
|
|
@@ -29,6 +25,7 @@ class Andor2Controller(adcore.ADBaseController[Andor2DriverIO]):
|
|
|
29
25
|
await self.set_exposure_time_and_acquire_period_if_supplied(
|
|
30
26
|
trigger_info.livetime
|
|
31
27
|
)
|
|
28
|
+
|
|
32
29
|
await asyncio.gather(
|
|
33
30
|
self.driver.trigger_mode.set(self._get_trigger_mode(trigger_info.trigger)),
|
|
34
31
|
self.driver.num_images.set(
|
|
@@ -1,8 +1,9 @@
|
|
|
1
|
-
from
|
|
2
|
-
|
|
1
|
+
from typing import Annotated as A
|
|
2
|
+
|
|
3
|
+
from ophyd_async.core import SignalR, SignalRW, StrictEnum
|
|
4
|
+
from ophyd_async.epics import adcore
|
|
3
5
|
from ophyd_async.epics.core import (
|
|
4
|
-
|
|
5
|
-
epics_signal_rw,
|
|
6
|
+
PvSuffix,
|
|
6
7
|
)
|
|
7
8
|
|
|
8
9
|
|
|
@@ -15,20 +16,12 @@ class Andor2TriggerMode(StrictEnum):
|
|
|
15
16
|
SOFTWARE = "Software"
|
|
16
17
|
|
|
17
18
|
|
|
18
|
-
class
|
|
19
|
-
|
|
20
|
-
UINT32 = "UInt32"
|
|
21
|
-
FLOAT32 = "Float32"
|
|
22
|
-
FLOAT64 = "Float64"
|
|
23
|
-
|
|
19
|
+
class Andor2DriverIO(adcore.ADBaseIO):
|
|
20
|
+
"""Driver for andor model:DU897_BV as deployed on p99.
|
|
24
21
|
|
|
25
|
-
|
|
26
|
-
|
|
22
|
+
This mirrors the interface provided by AdAndor/db/andor.template.
|
|
23
|
+
https://areadetector.github.io/areaDetector/ADAndor/andorDoc.html
|
|
24
|
+
"""
|
|
27
25
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
self.trigger_mode = epics_signal_rw(Andor2TriggerMode, prefix + "TriggerMode")
|
|
31
|
-
self.data_type = epics_signal_r(Andor2DataType, prefix + "DataType_RBV") # type: ignore
|
|
32
|
-
self.andor_accumulate_period = epics_signal_r(
|
|
33
|
-
float, prefix + "AndorAccumulatePeriod_RBV"
|
|
34
|
-
)
|
|
26
|
+
trigger_mode: A[SignalRW[Andor2TriggerMode], PvSuffix.rbv("TriggerMode")]
|
|
27
|
+
andor_accumulate_period: A[SignalR[float], PvSuffix("AndorAccumulatePeriod_RBV")]
|
|
@@ -54,10 +54,7 @@ class ADHDFWriter(ADWriter[NDFileHDFIO]):
|
|
|
54
54
|
async def open(
|
|
55
55
|
self, name: str, exposures_per_event: PositiveInt = 1
|
|
56
56
|
) -> dict[str, DataKey]:
|
|
57
|
-
self._composer = None
|
|
58
|
-
|
|
59
57
|
# Setting HDF writer specific signals
|
|
60
|
-
|
|
61
58
|
# Make sure we are using chunk auto-sizing
|
|
62
59
|
await asyncio.gather(self.fileio.chunk_size_auto.set(True))
|
|
63
60
|
|
|
@@ -100,6 +97,13 @@ class ADHDFWriter(ADWriter[NDFileHDFIO]):
|
|
|
100
97
|
chunk_shape=(frames_per_chunk, *detector_shape),
|
|
101
98
|
)
|
|
102
99
|
]
|
|
100
|
+
|
|
101
|
+
self._composer = HDFDocumentComposer(
|
|
102
|
+
# See https://github.com/bluesky/ophyd-async/issues/122
|
|
103
|
+
Path(await self.fileio.full_file_name.get_value()),
|
|
104
|
+
self._datasets,
|
|
105
|
+
)
|
|
106
|
+
|
|
103
107
|
# And all the scalar datasets
|
|
104
108
|
for plugin in self._plugins.values():
|
|
105
109
|
maybe_xml = await plugin.nd_attributes_file.get_value()
|
|
@@ -149,20 +153,9 @@ class ADHDFWriter(ADWriter[NDFileHDFIO]):
|
|
|
149
153
|
self, name: str, indices_written: int
|
|
150
154
|
) -> AsyncIterator[StreamAsset]:
|
|
151
155
|
# TODO: fail if we get dropped frames
|
|
156
|
+
if self._composer is None:
|
|
157
|
+
msg = f"open() not called on {self}"
|
|
158
|
+
raise RuntimeError(msg)
|
|
152
159
|
await self.fileio.flush_now.set(True)
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
path = Path(await self.fileio.full_file_name.get_value())
|
|
156
|
-
self._composer = HDFDocumentComposer(
|
|
157
|
-
# See https://github.com/bluesky/ophyd-async/issues/122
|
|
158
|
-
path,
|
|
159
|
-
self._datasets,
|
|
160
|
-
)
|
|
161
|
-
# stream resource says "here is a dataset",
|
|
162
|
-
# stream datum says "here are N frames in that stream resource",
|
|
163
|
-
# you get one stream resource and many stream datums per scan
|
|
164
|
-
|
|
165
|
-
for doc in self._composer.stream_resources():
|
|
166
|
-
yield "stream_resource", doc
|
|
167
|
-
for doc in self._composer.stream_data(indices_written):
|
|
168
|
-
yield "stream_datum", doc
|
|
160
|
+
for doc in self._composer.make_stream_docs(indices_written):
|
|
161
|
+
yield doc
|
|
@@ -94,6 +94,7 @@ def epics_signal_rw(
|
|
|
94
94
|
write_pv: str | None = None,
|
|
95
95
|
name: str = "",
|
|
96
96
|
timeout: float = DEFAULT_TIMEOUT,
|
|
97
|
+
attempts: int = 1,
|
|
97
98
|
) -> SignalRW[SignalDatatypeT]:
|
|
98
99
|
"""Create a `SignalRW` backed by 1 or 2 EPICS PVs.
|
|
99
100
|
|
|
@@ -104,7 +105,7 @@ def epics_signal_rw(
|
|
|
104
105
|
:param timeout: A timeout to be used when reading (not connecting) this signal
|
|
105
106
|
"""
|
|
106
107
|
backend = _epics_signal_backend(datatype, read_pv, write_pv or read_pv)
|
|
107
|
-
return SignalRW(backend, name=name, timeout=timeout)
|
|
108
|
+
return SignalRW(backend, name=name, timeout=timeout, attempts=attempts)
|
|
108
109
|
|
|
109
110
|
|
|
110
111
|
def epics_signal_rw_rbv(
|
|
@@ -113,6 +114,7 @@ def epics_signal_rw_rbv(
|
|
|
113
114
|
read_suffix: str = "_RBV",
|
|
114
115
|
name: str = "",
|
|
115
116
|
timeout: float = DEFAULT_TIMEOUT,
|
|
117
|
+
attempts: int = 1,
|
|
116
118
|
) -> SignalRW[SignalDatatypeT]:
|
|
117
119
|
"""Create a `SignalRW` backed by 1 or 2 EPICS PVs, with a suffix on the readback pv.
|
|
118
120
|
|
|
@@ -128,7 +130,9 @@ def epics_signal_rw_rbv(
|
|
|
128
130
|
else:
|
|
129
131
|
read_pv = f"{write_pv}{read_suffix}"
|
|
130
132
|
|
|
131
|
-
return epics_signal_rw(
|
|
133
|
+
return epics_signal_rw(
|
|
134
|
+
datatype, read_pv, write_pv, name, timeout=timeout, attempts=attempts
|
|
135
|
+
)
|
|
132
136
|
|
|
133
137
|
|
|
134
138
|
def epics_signal_r(
|
|
@@ -153,6 +157,7 @@ def epics_signal_w(
|
|
|
153
157
|
write_pv: str,
|
|
154
158
|
name: str = "",
|
|
155
159
|
timeout: float = DEFAULT_TIMEOUT,
|
|
160
|
+
attempts: int = 1,
|
|
156
161
|
) -> SignalW[SignalDatatypeT]:
|
|
157
162
|
"""Create a `SignalW` backed by 1 EPICS PVs.
|
|
158
163
|
|
|
@@ -162,7 +167,7 @@ def epics_signal_w(
|
|
|
162
167
|
:param timeout: A timeout to be used when reading (not connecting) this signal
|
|
163
168
|
"""
|
|
164
169
|
backend = _epics_signal_backend(datatype, write_pv, write_pv)
|
|
165
|
-
return SignalW(backend, name=name, timeout=timeout)
|
|
170
|
+
return SignalW(backend, name=name, timeout=timeout, attempts=attempts)
|
|
166
171
|
|
|
167
172
|
|
|
168
173
|
def epics_signal_x(
|
|
@@ -45,9 +45,11 @@ class OdinNode(Device):
|
|
|
45
45
|
|
|
46
46
|
|
|
47
47
|
class Odin(Device):
|
|
48
|
-
def __init__(self, prefix: str, name: str = "") -> None:
|
|
48
|
+
def __init__(self, prefix: str, name: str = "", nodes: int = 4) -> None:
|
|
49
|
+
# default nodes is set to 4, MX 16M Eiger detectors - nodes = 4.
|
|
50
|
+
# B21 4M Eiger detector - nodes = 1
|
|
49
51
|
self.nodes = DeviceVector(
|
|
50
|
-
{i: OdinNode(f"{prefix[:-1]}{i + 1}:") for i in range(
|
|
52
|
+
{i: OdinNode(f"{prefix[:-1]}{i + 1}:") for i in range(nodes)}
|
|
51
53
|
)
|
|
52
54
|
|
|
53
55
|
self.capture = epics_signal_rw(Writing, f"{prefix}Capture")
|