ophyd-async 0.10.1__py3-none-any.whl → 0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. ophyd_async/_version.py +2 -2
  2. ophyd_async/core/__init__.py +12 -1
  3. ophyd_async/core/_derived_signal.py +68 -22
  4. ophyd_async/core/_derived_signal_backend.py +46 -24
  5. ophyd_async/core/_detector.py +3 -3
  6. ophyd_async/core/_device.py +24 -16
  7. ophyd_async/core/_flyer.py +35 -1
  8. ophyd_async/core/_hdf_dataset.py +11 -10
  9. ophyd_async/core/_signal.py +43 -28
  10. ophyd_async/core/_table.py +3 -3
  11. ophyd_async/core/_utils.py +25 -0
  12. ophyd_async/core/_yaml_settings.py +3 -3
  13. ophyd_async/epics/adandor/__init__.py +7 -1
  14. ophyd_async/epics/adandor/_andor_controller.py +5 -8
  15. ophyd_async/epics/adandor/_andor_io.py +12 -19
  16. ophyd_async/epics/adcore/_hdf_writer.py +12 -19
  17. ophyd_async/epics/eiger/_odin_io.py +4 -2
  18. ophyd_async/epics/motor.py +46 -96
  19. ophyd_async/epics/pmac/__init__.py +3 -0
  20. ophyd_async/epics/pmac/_pmac_io.py +100 -0
  21. ophyd_async/fastcs/eiger/__init__.py +1 -2
  22. ophyd_async/fastcs/eiger/_eiger.py +3 -9
  23. ophyd_async/fastcs/panda/_trigger.py +4 -4
  24. ophyd_async/fastcs/panda/_writer.py +15 -13
  25. ophyd_async/sim/__init__.py +1 -2
  26. ophyd_async/sim/_blob_detector_writer.py +6 -12
  27. ophyd_async/sim/_mirror_horizontal.py +3 -2
  28. ophyd_async/sim/_mirror_vertical.py +1 -0
  29. ophyd_async/sim/_motor.py +13 -43
  30. {ophyd_async-0.10.1.dist-info → ophyd_async-0.11.dist-info}/METADATA +2 -2
  31. {ophyd_async-0.10.1.dist-info → ophyd_async-0.11.dist-info}/RECORD +34 -32
  32. {ophyd_async-0.10.1.dist-info → ophyd_async-0.11.dist-info}/WHEEL +0 -0
  33. {ophyd_async-0.10.1.dist-info → ophyd_async-0.11.dist-info}/licenses/LICENSE +0 -0
  34. {ophyd_async-0.10.1.dist-info → ophyd_async-0.11.dist-info}/top_level.txt +0 -0
@@ -31,6 +31,7 @@ from ._utils import (
31
31
  Callback,
32
32
  LazyMock,
33
33
  T,
34
+ error_if_none,
34
35
  )
35
36
 
36
37
 
@@ -125,10 +126,8 @@ class _SignalCache(Generic[SignalDatatypeT]):
125
126
  self._signal.log.debug(f"Closing subscription on source {self._signal.source}")
126
127
 
127
128
  def _ensure_reading(self) -> Reading[SignalDatatypeT]:
128
- if not self._reading:
129
- msg = "Monitor not working"
130
- raise RuntimeError(msg)
131
- return self._reading
129
+ reading = error_if_none(self._reading, "Monitor not working")
130
+ return reading
132
131
 
133
132
  async def get_reading(self) -> Reading[SignalDatatypeT]:
134
133
  await self._valid.wait()
@@ -188,11 +187,8 @@ class SignalR(Signal[SignalDatatypeT], AsyncReadable, AsyncStageable, Subscribab
188
187
  if cached is None:
189
188
  cached = self._cache is not None
190
189
  if cached:
191
- if not self._cache:
192
- msg = f"{self.source} not being monitored"
193
- raise RuntimeError(msg)
194
- # assert self._cache, f"{self.source} not being monitored"
195
- return self._cache
190
+ cache = error_if_none(self._cache, f"{self.source} not being monitored")
191
+ return cache
196
192
  else:
197
193
  return self._connector.backend
198
194
 
@@ -692,7 +688,7 @@ async def set_and_wait_for_value(
692
688
  )
693
689
 
694
690
 
695
- def walk_rw_signals(device: Device, path_prefix: str = "") -> dict[str, SignalRW[Any]]:
691
+ def walk_rw_signals(device: Device) -> dict[str, SignalRW[Any]]:
696
692
  """Retrieve all SignalRWs from a device.
697
693
 
698
694
  Stores retrieved signals with their dotted attribute paths in a dictionary. Used as
@@ -704,19 +700,12 @@ def walk_rw_signals(device: Device, path_prefix: str = "") -> dict[str, SignalRW
704
700
  A dictionary matching the string attribute path of a SignalRW with the
705
701
  signal itself.
706
702
  """
707
- signals: dict[str, SignalRW[Any]] = {}
708
-
709
- for attr_name, attr in device.children():
710
- dot_path = f"{path_prefix}{attr_name}"
711
- if type(attr) is SignalRW:
712
- signals[dot_path] = attr
713
- attr_signals = walk_rw_signals(attr, path_prefix=dot_path + ".")
714
- signals.update(attr_signals)
715
- return signals
703
+ all_devices = walk_devices(device)
704
+ return {path: dev for path, dev in all_devices.items() if type(dev) is SignalRW}
716
705
 
717
706
 
718
707
  async def walk_config_signals(
719
- device: Device, path_prefix: str = ""
708
+ device: Device,
720
709
  ) -> dict[str, SignalRW[Any]]:
721
710
  """Retrieve all configuration signals from a device.
722
711
 
@@ -724,28 +713,54 @@ async def walk_config_signals(
724
713
  part of saving and loading a device.
725
714
 
726
715
  :param device: Device to retrieve configuration signals from.
727
- :param path_prefix: For internal use, leave blank when calling the method.
728
716
  :return:
729
717
  A dictionary matching the string attribute path of a SignalRW with the
730
718
  signal itself.
731
719
  """
732
- signals: dict[str, SignalRW[Any]] = {}
733
720
  config_names: list[str] = []
734
721
  if isinstance(device, Configurable):
735
722
  configuration = device.read_configuration()
736
723
  if inspect.isawaitable(configuration):
737
724
  configuration = await configuration
738
725
  config_names = list(configuration.keys())
739
- for attr_name, attr in device.children():
740
- dot_path = f"{path_prefix}{attr_name}"
741
- if isinstance(attr, SignalRW) and attr.name in config_names:
742
- signals[dot_path] = attr
743
- signals.update(await walk_config_signals(attr, path_prefix=dot_path + "."))
744
726
 
745
- return signals
727
+ all_devices = walk_devices(device)
728
+ return {
729
+ path: dev
730
+ for path, dev in all_devices.items()
731
+ if isinstance(dev, SignalRW) and dev.name in config_names
732
+ }
746
733
 
747
734
 
748
735
  class Ignore:
749
736
  """Annotation to ignore a signal when connecting a device."""
750
737
 
751
738
  pass
739
+
740
+
741
+ def walk_devices(device: Device, path_prefix: str = "") -> dict[str, Device]:
742
+ """Recursively retrieve all Devices from a device tree.
743
+
744
+ :param device: Root device to start from.
745
+ :param path_prefix: For internal use, leave blank when calling the method.
746
+ :return: A dictionary mapping dotted attribute paths to Device instances.
747
+ """
748
+ devices: dict[str, Device] = {}
749
+ for attr_name, attr in device.children():
750
+ dot_path = f"{path_prefix}{attr_name}"
751
+ devices[dot_path] = attr
752
+ devices.update(walk_devices(attr, path_prefix=dot_path + "."))
753
+ return devices
754
+
755
+
756
+ def walk_signal_sources(device: Device) -> dict[str, str]:
757
+ """Recursively gather the `source` field from every Signal in a device tree.
758
+
759
+ :param device: Root device to start from.
760
+ :param path_prefix: For internal use, leave blank when calling the method.
761
+ :return: A dictionary mapping dotted attribute paths to Signal source strings.
762
+ """
763
+ all_devices = walk_devices(device)
764
+ return {
765
+ path: dev.source for path, dev in all_devices.items() if isinstance(dev, Signal)
766
+ }
@@ -4,10 +4,10 @@ from collections.abc import Callable, Sequence
4
4
  from typing import Annotated, Any, TypeVar, get_origin, get_type_hints
5
5
 
6
6
  import numpy as np
7
- from pydantic import BaseModel, ConfigDict, Field, model_validator
7
+ from pydantic import ConfigDict, Field, model_validator
8
8
  from pydantic_numpy.helper.annotation import NpArrayPydanticAnnotation
9
9
 
10
- from ._utils import get_dtype
10
+ from ._utils import ConfinedModel, get_dtype
11
11
 
12
12
  TableSubclass = TypeVar("TableSubclass", bound="Table")
13
13
 
@@ -26,7 +26,7 @@ def _make_default_factory(dtype: np.dtype) -> Callable[[], np.ndarray]:
26
26
  return numpy_array_default_factory
27
27
 
28
28
 
29
- class Table(BaseModel):
29
+ class Table(ConfinedModel):
30
30
  """An abstraction of a Table where each field is a column.
31
31
 
32
32
  For example:
@@ -17,6 +17,7 @@ from typing import (
17
17
  from unittest.mock import Mock
18
18
 
19
19
  import numpy as np
20
+ from pydantic import BaseModel, ConfigDict
20
21
 
21
22
  T = TypeVar("T")
22
23
  V = TypeVar("V")
@@ -377,3 +378,27 @@ class LazyMock:
377
378
  if self.parent is not None:
378
379
  self.parent().attach_mock(self._mock, self.name)
379
380
  return self._mock
381
+
382
+
383
+ class ConfinedModel(BaseModel):
384
+ """A base class confined to explicitly defined fields in the model schema."""
385
+
386
+ model_config = ConfigDict(
387
+ extra="forbid",
388
+ )
389
+
390
+
391
+ def error_if_none(value: T | None, msg: str) -> T:
392
+ """Check and return the value if not None.
393
+
394
+ :param value: The value to check
395
+ :param msg: The `RuntimeError` message to raise if it is None
396
+ :raises RuntimeError: If the value is None
397
+ :returns: The value if not None
398
+
399
+ Used to implement a pattern where a variable is None at init, then
400
+ changed by a method, then used in a later method.
401
+ """
402
+ if value is None:
403
+ raise RuntimeError(msg)
404
+ return value
@@ -6,9 +6,9 @@ from typing import Any
6
6
  import numpy as np
7
7
  import numpy.typing as npt
8
8
  import yaml
9
- from pydantic import BaseModel
10
9
 
11
10
  from ._settings import SettingsProvider
11
+ from ._utils import ConfinedModel
12
12
 
13
13
 
14
14
  def ndarray_representer(dumper: yaml.Dumper, array: npt.NDArray[Any]) -> yaml.Node:
@@ -18,7 +18,7 @@ def ndarray_representer(dumper: yaml.Dumper, array: npt.NDArray[Any]) -> yaml.No
18
18
 
19
19
 
20
20
  def pydantic_model_abstraction_representer(
21
- dumper: yaml.Dumper, model: BaseModel
21
+ dumper: yaml.Dumper, model: ConfinedModel
22
22
  ) -> yaml.Node:
23
23
  return dumper.represent_data(model.model_dump(mode="python"))
24
24
 
@@ -39,7 +39,7 @@ class YamlSettingsProvider(SettingsProvider):
39
39
  async def store(self, name: str, data: dict[str, Any]):
40
40
  yaml.add_representer(np.ndarray, ndarray_representer, Dumper=yaml.Dumper)
41
41
  yaml.add_multi_representer(
42
- BaseModel,
42
+ ConfinedModel,
43
43
  pydantic_model_abstraction_representer,
44
44
  Dumper=yaml.Dumper,
45
45
  )
@@ -1,9 +1,15 @@
1
+ """Support for the ADAndor areaDetector driver.
2
+
3
+ https://github.com/areaDetector/ADAndor.
4
+ """
5
+
1
6
  from ._andor import Andor2Detector
2
7
  from ._andor_controller import Andor2Controller
3
- from ._andor_io import Andor2DriverIO
8
+ from ._andor_io import Andor2DriverIO, Andor2TriggerMode
4
9
 
5
10
  __all__ = [
6
11
  "Andor2Detector",
7
12
  "Andor2Controller",
8
13
  "Andor2DriverIO",
14
+ "Andor2TriggerMode",
9
15
  ]
@@ -12,15 +12,11 @@ _MIN_DEAD_TIME = 0.1
12
12
  _MAX_NUM_IMAGE = 999_999
13
13
 
14
14
 
15
+ # The deadtime of an Andor2 controller varies depending on the exact model of camera.
16
+ # Ideally we would maximize performance by dynamically retrieving the deadtime at
17
+ # runtime. See https://github.com/bluesky/ophyd-async/issues/308
15
18
  class Andor2Controller(adcore.ADBaseController[Andor2DriverIO]):
16
- """For controlling the Andor 2 detector."""
17
-
18
- def __init__(
19
- self,
20
- driver: Andor2DriverIO,
21
- good_states: frozenset[adcore.ADState] = adcore.DEFAULT_GOOD_STATES,
22
- ) -> None:
23
- super().__init__(driver, good_states=good_states)
19
+ """DetectorCobntroller for Andor2DriverIO."""
24
20
 
25
21
  def get_deadtime(self, exposure: float | None) -> float:
26
22
  return _MIN_DEAD_TIME + (exposure or 0)
@@ -29,6 +25,7 @@ class Andor2Controller(adcore.ADBaseController[Andor2DriverIO]):
29
25
  await self.set_exposure_time_and_acquire_period_if_supplied(
30
26
  trigger_info.livetime
31
27
  )
28
+
32
29
  await asyncio.gather(
33
30
  self.driver.trigger_mode.set(self._get_trigger_mode(trigger_info.trigger)),
34
31
  self.driver.num_images.set(
@@ -1,8 +1,9 @@
1
- from ophyd_async.core import StrictEnum, SubsetEnum
2
- from ophyd_async.epics.adcore import ADBaseIO
1
+ from typing import Annotated as A
2
+
3
+ from ophyd_async.core import SignalR, SignalRW, StrictEnum
4
+ from ophyd_async.epics import adcore
3
5
  from ophyd_async.epics.core import (
4
- epics_signal_r,
5
- epics_signal_rw,
6
+ PvSuffix,
6
7
  )
7
8
 
8
9
 
@@ -15,20 +16,12 @@ class Andor2TriggerMode(StrictEnum):
15
16
  SOFTWARE = "Software"
16
17
 
17
18
 
18
- class Andor2DataType(SubsetEnum):
19
- UINT16 = "UInt16"
20
- UINT32 = "UInt32"
21
- FLOAT32 = "Float32"
22
- FLOAT64 = "Float64"
23
-
19
+ class Andor2DriverIO(adcore.ADBaseIO):
20
+ """Driver for andor model:DU897_BV as deployed on p99.
24
21
 
25
- class Andor2DriverIO(ADBaseIO):
26
- """Epics pv for andor model:DU897_BV as deployed on p99."""
22
+ This mirrors the interface provided by AdAndor/db/andor.template.
23
+ https://areadetector.github.io/areaDetector/ADAndor/andorDoc.html
24
+ """
27
25
 
28
- def __init__(self, prefix: str, name: str = "") -> None:
29
- super().__init__(prefix, name=name)
30
- self.trigger_mode = epics_signal_rw(Andor2TriggerMode, prefix + "TriggerMode")
31
- self.data_type = epics_signal_r(Andor2DataType, prefix + "DataType_RBV") # type: ignore
32
- self.andor_accumulate_period = epics_signal_r(
33
- float, prefix + "AndorAccumulatePeriod_RBV"
34
- )
26
+ trigger_mode: A[SignalRW[Andor2TriggerMode], PvSuffix.rbv("TriggerMode")]
27
+ andor_accumulate_period: A[SignalR[float], PvSuffix("AndorAccumulatePeriod_RBV")]
@@ -54,10 +54,7 @@ class ADHDFWriter(ADWriter[NDFileHDFIO]):
54
54
  async def open(
55
55
  self, name: str, exposures_per_event: PositiveInt = 1
56
56
  ) -> dict[str, DataKey]:
57
- self._composer = None
58
-
59
57
  # Setting HDF writer specific signals
60
-
61
58
  # Make sure we are using chunk auto-sizing
62
59
  await asyncio.gather(self.fileio.chunk_size_auto.set(True))
63
60
 
@@ -100,6 +97,13 @@ class ADHDFWriter(ADWriter[NDFileHDFIO]):
100
97
  chunk_shape=(frames_per_chunk, *detector_shape),
101
98
  )
102
99
  ]
100
+
101
+ self._composer = HDFDocumentComposer(
102
+ # See https://github.com/bluesky/ophyd-async/issues/122
103
+ Path(await self.fileio.full_file_name.get_value()),
104
+ self._datasets,
105
+ )
106
+
103
107
  # And all the scalar datasets
104
108
  for plugin in self._plugins.values():
105
109
  maybe_xml = await plugin.nd_attributes_file.get_value()
@@ -149,20 +153,9 @@ class ADHDFWriter(ADWriter[NDFileHDFIO]):
149
153
  self, name: str, indices_written: int
150
154
  ) -> AsyncIterator[StreamAsset]:
151
155
  # TODO: fail if we get dropped frames
156
+ if self._composer is None:
157
+ msg = f"open() not called on {self}"
158
+ raise RuntimeError(msg)
152
159
  await self.fileio.flush_now.set(True)
153
- if indices_written:
154
- if not self._composer:
155
- path = Path(await self.fileio.full_file_name.get_value())
156
- self._composer = HDFDocumentComposer(
157
- # See https://github.com/bluesky/ophyd-async/issues/122
158
- path,
159
- self._datasets,
160
- )
161
- # stream resource says "here is a dataset",
162
- # stream datum says "here are N frames in that stream resource",
163
- # you get one stream resource and many stream datums per scan
164
-
165
- for doc in self._composer.stream_resources():
166
- yield "stream_resource", doc
167
- for doc in self._composer.stream_data(indices_written):
168
- yield "stream_datum", doc
160
+ for doc in self._composer.make_stream_docs(indices_written):
161
+ yield doc
@@ -45,9 +45,11 @@ class OdinNode(Device):
45
45
 
46
46
 
47
47
  class Odin(Device):
48
- def __init__(self, prefix: str, name: str = "") -> None:
48
+ def __init__(self, prefix: str, name: str = "", nodes: int = 4) -> None:
49
+ # default nodes is set to 4, MX 16M Eiger detectors - nodes = 4.
50
+ # B21 4M Eiger detector - nodes = 1
49
51
  self.nodes = DeviceVector(
50
- {i: OdinNode(f"{prefix[:-1]}{i + 1}:") for i in range(4)}
52
+ {i: OdinNode(f"{prefix[:-1]}{i + 1}:") for i in range(nodes)}
51
53
  )
52
54
 
53
55
  self.capture = epics_signal_rw(Writing, f"{prefix}Capture")
@@ -14,7 +14,6 @@ from bluesky.protocols import (
14
14
  Stoppable,
15
15
  Subscribable,
16
16
  )
17
- from pydantic import BaseModel, Field
18
17
 
19
18
  from ophyd_async.core import (
20
19
  CALCULATE_TIMEOUT,
@@ -22,16 +21,18 @@ from ophyd_async.core import (
22
21
  AsyncStatus,
23
22
  CalculatableTimeout,
24
23
  Callback,
24
+ FlyMotorInfo,
25
25
  StandardReadable,
26
26
  StrictEnum,
27
27
  WatchableAsyncStatus,
28
28
  WatcherUpdate,
29
+ error_if_none,
29
30
  observe_value,
30
31
  )
31
32
  from ophyd_async.core import StandardReadableFormat as Format
32
33
  from ophyd_async.epics.core import epics_signal_r, epics_signal_rw, epics_signal_w
33
34
 
34
- __all__ = ["MotorLimitsException", "FlyMotorInfo", "Motor"]
35
+ __all__ = ["MotorLimitsException", "Motor"]
35
36
 
36
37
 
37
38
  class MotorLimitsException(Exception):
@@ -40,26 +41,6 @@ class MotorLimitsException(Exception):
40
41
  pass
41
42
 
42
43
 
43
- class FlyMotorInfo(BaseModel):
44
- """Minimal set of information required to fly a motor."""
45
-
46
- start_position: float = Field(frozen=True)
47
- """Absolute position of the motor once it finishes accelerating to desired
48
- velocity, in motor EGUs"""
49
-
50
- end_position: float = Field(frozen=True)
51
- """Absolute position of the motor once it begins decelerating from desired
52
- velocity, in EGUs"""
53
-
54
- time_for_move: float = Field(frozen=True, gt=0)
55
- """Time taken for the motor to get from start_position to end_position, excluding
56
- run-up and run-down, in seconds."""
57
-
58
- timeout: CalculatableTimeout = Field(frozen=True, default=CALCULATE_TIMEOUT)
59
- """Maximum time for the complete motor move, including run up and run down.
60
- Defaults to `time_for_move` + run up and run down times + 10s."""
61
-
62
-
63
44
  class OffsetMode(StrictEnum):
64
45
  VARIABLE = "Variable"
65
46
  FROZEN = "Frozen"
@@ -111,15 +92,12 @@ class Motor(
111
92
  # Whether set() should complete successfully or not
112
93
  self._set_success = True
113
94
 
114
- # end_position of a fly move, with run_up_distance added on.
115
- self._fly_completed_position: float | None = None
95
+ # Currently requested fly info, stored in prepare
96
+ self._fly_info: FlyMotorInfo | None = None
116
97
 
117
98
  # Set on kickoff(), complete when motor reaches self._fly_completed_position
118
99
  self._fly_status: WatchableAsyncStatus | None = None
119
100
 
120
- # Set during prepare
121
- self._fly_timeout: CalculatableTimeout | None = CALCULATE_TIMEOUT
122
-
123
101
  super().__init__(name=name)
124
102
 
125
103
  def set_name(self, name: str, *, child_name_separator: str | None = None) -> None:
@@ -131,41 +109,64 @@ class Motor(
131
109
  @AsyncStatus.wrap
132
110
  async def prepare(self, value: FlyMotorInfo):
133
111
  """Move to the beginning of a suitable run-up distance ready for a flyscan."""
134
- self._fly_timeout = value.timeout
112
+ self._fly_info = value
135
113
 
136
114
  # Velocity, at which motor travels from start_position to end_position, in motor
137
115
  # egu/s.
138
- fly_velocity = await self._prepare_velocity(
139
- value.start_position,
140
- value.end_position,
141
- value.time_for_move,
116
+ max_speed, egu = await asyncio.gather(
117
+ self.max_velocity.get_value(), self.motor_egu.get_value()
142
118
  )
119
+ if abs(value.velocity) > max_speed:
120
+ raise MotorLimitsException(
121
+ f"Velocity {abs(value.velocity)} {egu}/s was requested for a motor "
122
+ f" with max speed of {max_speed} {egu}/s"
123
+ )
124
+
125
+ acceleration_time = await self.acceleration_time.get_value()
126
+ ramp_up_start_pos = value.ramp_up_start_pos(acceleration_time)
127
+ ramp_down_end_pos = value.ramp_down_end_pos(acceleration_time)
143
128
 
144
- # start_position with run_up_distance added on.
145
- fly_prepared_position = await self._prepare_motor_path(
146
- abs(fly_velocity), value.start_position, value.end_position
129
+ motor_lower_limit, motor_upper_limit, egu = await asyncio.gather(
130
+ self.low_limit_travel.get_value(),
131
+ self.high_limit_travel.get_value(),
132
+ self.motor_egu.get_value(),
147
133
  )
148
134
 
149
- await self.set(fly_prepared_position)
150
- await self.velocity.set(abs(fly_velocity))
135
+ if (
136
+ not motor_upper_limit >= ramp_up_start_pos >= motor_lower_limit
137
+ or not motor_upper_limit >= ramp_down_end_pos >= motor_lower_limit
138
+ ):
139
+ raise MotorLimitsException(
140
+ f"Motor trajectory for requested fly is from "
141
+ f"{ramp_up_start_pos}{egu} to "
142
+ f"{ramp_down_end_pos}{egu} but motor limits are "
143
+ f"{motor_lower_limit}{egu} <= x <= {motor_upper_limit}{egu} "
144
+ )
145
+
146
+ # move to prepare position at maximum velocity
147
+ await self.velocity.set(abs(max_speed))
148
+ await self.set(ramp_up_start_pos)
149
+
150
+ # Set velocity we will be using for the flyscan
151
+ await self.velocity.set(abs(value.velocity))
151
152
 
152
153
  @AsyncStatus.wrap
153
154
  async def kickoff(self):
154
155
  """Begin moving motor from prepared position to final position."""
155
- if not self._fly_completed_position:
156
- msg = "Motor must be prepared before attempting to kickoff"
157
- raise RuntimeError(msg)
156
+ fly_info = error_if_none(
157
+ self._fly_info, "Motor must be prepared before attempting to kickoff"
158
+ )
158
159
 
160
+ acceleration_time = await self.acceleration_time.get_value()
159
161
  self._fly_status = self.set(
160
- self._fly_completed_position, timeout=self._fly_timeout
162
+ fly_info.ramp_down_end_pos(acceleration_time),
163
+ timeout=fly_info.timeout,
161
164
  )
162
165
 
163
166
  def complete(self) -> WatchableAsyncStatus:
164
167
  """Mark as complete once motor reaches completed position."""
165
- if not self._fly_status:
166
- msg = "kickoff not called"
167
- raise RuntimeError(msg)
168
- return self._fly_status
168
+ fly_status = error_if_none(self._fly_status, "kickoff not called")
169
+ return fly_status
169
170
 
170
171
  @WatchableAsyncStatus.wrap
171
172
  async def set( # type: ignore
@@ -220,22 +221,6 @@ class Motor(
220
221
  # the move above, so need to pass wait=False
221
222
  await self.motor_stop.set(1, wait=False)
222
223
 
223
- async def _prepare_velocity(
224
- self, start_position: float, end_position: float, time_for_move: float
225
- ) -> float:
226
- fly_velocity = (end_position - start_position) / time_for_move
227
- max_speed, egu = await asyncio.gather(
228
- self.max_velocity.get_value(), self.motor_egu.get_value()
229
- )
230
- if abs(fly_velocity) > max_speed:
231
- raise MotorLimitsException(
232
- f"Motor speed of {abs(fly_velocity)} {egu}/s was requested for a motor "
233
- f" with max speed of {max_speed} {egu}/s"
234
- )
235
- # move to prepare position at maximum velocity
236
- await self.velocity.set(abs(max_speed))
237
- return fly_velocity
238
-
239
224
  async def locate(self) -> Location[float]:
240
225
  """Return the current setpoint and readback of the motor."""
241
226
  setpoint, readback = await asyncio.gather(
@@ -250,38 +235,3 @@ class Motor(
250
235
  def clear_sub(self, function: Callback[dict[str, Reading[float]]]) -> None:
251
236
  """Unsubscribe."""
252
237
  self.user_readback.clear_sub(function)
253
-
254
- async def _prepare_motor_path(
255
- self, fly_velocity: float, start_position: float, end_position: float
256
- ) -> float:
257
- # Distance required for motor to accelerate from stationary to fly_velocity, and
258
- # distance required for motor to decelerate from fly_velocity to stationary
259
- run_up_distance = (
260
- (await self.acceleration_time.get_value()) * fly_velocity * 0.5
261
- )
262
-
263
- direction = 1 if end_position > start_position else -1
264
- self._fly_completed_position = end_position + (run_up_distance * direction)
265
-
266
- # Prepared position not used after prepare, so no need to store in self
267
- fly_prepared_position = start_position - (run_up_distance * direction)
268
-
269
- motor_lower_limit, motor_upper_limit, egu = await asyncio.gather(
270
- self.low_limit_travel.get_value(),
271
- self.high_limit_travel.get_value(),
272
- self.motor_egu.get_value(),
273
- )
274
-
275
- if (
276
- not motor_upper_limit >= fly_prepared_position >= motor_lower_limit
277
- or not motor_upper_limit
278
- >= self._fly_completed_position
279
- >= motor_lower_limit
280
- ):
281
- raise MotorLimitsException(
282
- f"Motor trajectory for requested fly is from "
283
- f"{fly_prepared_position}{egu} to "
284
- f"{self._fly_completed_position}{egu} but motor limits are "
285
- f"{motor_lower_limit}{egu} <= x <= {motor_upper_limit}{egu} "
286
- )
287
- return fly_prepared_position
@@ -0,0 +1,3 @@
1
+ from ._pmac_io import PmacAxisIO, PmacCoordIO, PmacIO, PmacTrajectoryIO
2
+
3
+ __all__ = ["PmacAxisIO", "PmacCoordIO", "PmacIO", "PmacTrajectoryIO"]