dls-dodal 1.39.0__py3-none-any.whl → 1.40.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. {dls_dodal-1.39.0.dist-info → dls_dodal-1.40.0.dist-info}/METADATA +2 -2
  2. {dls_dodal-1.39.0.dist-info → dls_dodal-1.40.0.dist-info}/RECORD +48 -39
  3. dodal/_version.py +2 -2
  4. dodal/beamlines/__init__.py +2 -0
  5. dodal/beamlines/adsim.py +3 -2
  6. dodal/beamlines/b01_1.py +3 -3
  7. dodal/beamlines/i03.py +141 -292
  8. dodal/beamlines/i04.py +112 -198
  9. dodal/beamlines/i13_1.py +5 -4
  10. dodal/beamlines/i18.py +124 -0
  11. dodal/beamlines/i19_1.py +74 -0
  12. dodal/beamlines/i19_2.py +61 -0
  13. dodal/beamlines/i20_1.py +37 -22
  14. dodal/beamlines/i22.py +7 -7
  15. dodal/beamlines/i24.py +100 -145
  16. dodal/beamlines/p38.py +12 -8
  17. dodal/beamlines/p45.py +5 -4
  18. dodal/beamlines/training_rig.py +4 -4
  19. dodal/common/beamlines/beamline_utils.py +2 -3
  20. dodal/common/beamlines/device_helpers.py +3 -1
  21. dodal/devices/aperturescatterguard.py +150 -64
  22. dodal/devices/apple2_undulator.py +86 -113
  23. dodal/devices/eiger.py +3 -2
  24. dodal/devices/fast_grid_scan.py +16 -18
  25. dodal/devices/hutch_shutter.py +25 -12
  26. dodal/devices/i10/rasor/rasor_scaler_cards.py +4 -4
  27. dodal/devices/i13_1/merlin.py +4 -3
  28. dodal/devices/i13_1/merlin_controller.py +2 -7
  29. dodal/devices/i18/KBMirror.py +19 -0
  30. dodal/devices/i18/diode.py +17 -0
  31. dodal/devices/i18/table.py +14 -0
  32. dodal/devices/i18/thor_labs_stage.py +12 -0
  33. dodal/devices/i19/__init__.py +0 -0
  34. dodal/devices/i19/shutter.py +57 -0
  35. dodal/devices/i22/nxsas.py +4 -4
  36. dodal/devices/motors.py +2 -2
  37. dodal/devices/oav/oav_detector.py +10 -19
  38. dodal/devices/pressure_jump_cell.py +33 -16
  39. dodal/devices/robot.py +30 -11
  40. dodal/devices/tetramm.py +8 -3
  41. dodal/devices/turbo_slit.py +7 -6
  42. dodal/devices/zocalo/zocalo_results.py +21 -4
  43. dodal/plans/save_panda.py +30 -14
  44. dodal/utils.py +54 -15
  45. {dls_dodal-1.39.0.dist-info → dls_dodal-1.40.0.dist-info}/LICENSE +0 -0
  46. {dls_dodal-1.39.0.dist-info → dls_dodal-1.40.0.dist-info}/WHEEL +0 -0
  47. {dls_dodal-1.39.0.dist-info → dls_dodal-1.40.0.dist-info}/entry_points.txt +0 -0
  48. {dls_dodal-1.39.0.dist-info → dls_dodal-1.40.0.dist-info}/top_level.txt +0 -0
@@ -103,7 +103,7 @@ class NXSasPilatus(PilatusDetector):
103
103
  prefix: str,
104
104
  path_provider: PathProvider,
105
105
  drv_suffix: str,
106
- hdf_suffix: str,
106
+ fileio_suffix: str,
107
107
  metadata_holder: NXSasMetadataHolder,
108
108
  name: str = "",
109
109
  ):
@@ -116,7 +116,7 @@ class NXSasPilatus(PilatusDetector):
116
116
  prefix,
117
117
  path_provider,
118
118
  drv_suffix=drv_suffix,
119
- hdf_suffix=hdf_suffix,
119
+ fileio_suffix=fileio_suffix,
120
120
  name=name,
121
121
  )
122
122
  self._metadata_holder = metadata_holder
@@ -146,7 +146,7 @@ class NXSasOAV(AravisDetector):
146
146
  prefix: str,
147
147
  path_provider: PathProvider,
148
148
  drv_suffix: str,
149
- hdf_suffix: str,
149
+ fileio_suffix: str,
150
150
  metadata_holder: NXSasMetadataHolder,
151
151
  name: str = "",
152
152
  gpio_number: AravisController.GPIO_NUMBER = 1,
@@ -160,7 +160,7 @@ class NXSasOAV(AravisDetector):
160
160
  prefix,
161
161
  path_provider,
162
162
  drv_suffix=drv_suffix,
163
- hdf_suffix=hdf_suffix,
163
+ fileio_suffix=fileio_suffix,
164
164
  name=name,
165
165
  gpio_number=gpio_number,
166
166
  )
dodal/devices/motors.py CHANGED
@@ -19,10 +19,10 @@ class XYZPositioner(StandardReadable):
19
19
  Notes
20
20
  -----
21
21
  Example usage::
22
- async with DeviceCollector():
22
+ async with init_devices():
23
23
  xyz_stage = XYZPositioner("BLXX-MO-STAGE-XX:")
24
24
  Or::
25
- with DeviceCollector():
25
+ with init_devices():
26
26
  xyz_stage = XYZPositioner("BLXX-MO-STAGE-XX:", infix = ("A", "B", "C"))
27
27
 
28
28
  """
@@ -1,6 +1,12 @@
1
1
  from enum import IntEnum
2
2
 
3
- from ophyd_async.core import DEFAULT_TIMEOUT, AsyncStatus, LazyMock, StandardReadable
3
+ from bluesky.protocols import Movable
4
+ from ophyd_async.core import (
5
+ DEFAULT_TIMEOUT,
6
+ AsyncStatus,
7
+ LazyMock,
8
+ StandardReadable,
9
+ )
4
10
  from ophyd_async.epics.core import epics_signal_rw
5
11
 
6
12
  from dodal.common.signal_utils import create_hardware_backed_soft_signal
@@ -8,12 +14,6 @@ from dodal.devices.areadetector.plugins.CAM import Cam
8
14
  from dodal.devices.oav.oav_parameters import DEFAULT_OAV_WINDOW, OAVConfig
9
15
  from dodal.devices.oav.snapshots.snapshot_with_beam_centre import SnapshotWithBeamCentre
10
16
  from dodal.devices.oav.snapshots.snapshot_with_grid import SnapshotWithGrid
11
- from dodal.log import LOGGER
12
-
13
-
14
- class ZoomLevelNotFoundError(Exception):
15
- def __init__(self, errmsg):
16
- LOGGER.error(errmsg)
17
17
 
18
18
 
19
19
  class Coords(IntEnum):
@@ -29,7 +29,7 @@ def _get_correct_zoom_string(zoom: str) -> str:
29
29
  return zoom
30
30
 
31
31
 
32
- class ZoomController(StandardReadable):
32
+ class ZoomController(StandardReadable, Movable):
33
33
  """
34
34
  Device to control the zoom level. This should be set like
35
35
  o = OAV(name="oav")
@@ -46,18 +46,9 @@ class ZoomController(StandardReadable):
46
46
  self.level = epics_signal_rw(str, f"{prefix}MP:SELECT")
47
47
  super().__init__(name=name)
48
48
 
49
- async def _get_allowed_zoom_levels(self) -> list:
50
- zoom_levels = await self.level.describe()
51
- return zoom_levels[self.level.name]["choices"] # type: ignore
52
-
53
49
  @AsyncStatus.wrap
54
- async def set(self, level_to_set: str):
55
- allowed_zoom_levels = await self._get_allowed_zoom_levels()
56
- if level_to_set not in allowed_zoom_levels:
57
- raise ZoomLevelNotFoundError(
58
- f"{level_to_set} not found, expected one of {allowed_zoom_levels}"
59
- )
60
- await self.level.set(level_to_set, wait=True)
50
+ async def set(self, value: str):
51
+ await self.level.set(value, wait=True)
61
52
 
62
53
 
63
54
  class OAV(StandardReadable):
@@ -151,11 +151,11 @@ class AllValvesControl(StandardReadable, Movable):
151
151
  )
152
152
 
153
153
 
154
- class ValveControl(StandardReadable):
154
+ class ValveControl(StandardReadable, Movable):
155
155
  def __init__(self, prefix: str, name: str = "") -> None:
156
156
  with self.add_children_as_readables():
157
157
  self.close = epics_signal_rw(ValveControlRequest, prefix + ":CON")
158
- self.open = epics_signal_rw(ValveOpenSeqRequest, prefix + ":OPENSEQ")
158
+ self.open = epics_signal_rw(int, prefix + ":OPENSEQ")
159
159
 
160
160
  super().__init__(name)
161
161
 
@@ -165,16 +165,16 @@ class ValveControl(StandardReadable):
165
165
  if isinstance(value, ValveControlRequest):
166
166
  set_status = self.close.set(value)
167
167
  elif isinstance(value, ValveOpenSeqRequest):
168
- set_status = self.open.set(value)
168
+ set_status = self.open.set(value.value)
169
169
 
170
170
  return set_status
171
171
 
172
172
 
173
- class FastValveControl(StandardReadable):
173
+ class FastValveControl(StandardReadable, Movable):
174
174
  def __init__(self, prefix: str, name: str = "") -> None:
175
175
  with self.add_children_as_readables():
176
176
  self.close = epics_signal_rw(FastValveControlRequest, prefix + ":CON")
177
- self.open = epics_signal_rw(ValveOpenSeqRequest, prefix + ":OPENSEQ")
177
+ self.open = epics_signal_rw(int, prefix + ":OPENSEQ")
178
178
 
179
179
  super().__init__(name)
180
180
 
@@ -184,7 +184,7 @@ class FastValveControl(StandardReadable):
184
184
  if isinstance(value, FastValveControlRequest):
185
185
  set_status = self.close.set(value)
186
186
  elif isinstance(value, ValveOpenSeqRequest):
187
- set_status = self.open.set(value)
187
+ set_status = self.open.set(value.value)
188
188
 
189
189
  return set_status
190
190
 
@@ -219,21 +219,24 @@ class PressureTransducer(StandardReadable):
219
219
  self,
220
220
  prefix: str,
221
221
  cell_prefix: str,
222
- number: int,
222
+ transducer_number: int,
223
+ ethercat_channel_number: int,
223
224
  name: str = "",
224
225
  full_different_prefix_adc: str = "",
225
226
  ) -> None:
226
227
  final_prefix = f"{prefix}{cell_prefix}"
227
228
  with self.add_children_as_readables():
228
229
  self.omron_pressure = epics_signal_r(
229
- float, f"{final_prefix}PP{number}:PRES"
230
+ float, f"{final_prefix}PP{transducer_number}:PRES"
231
+ )
232
+ self.omron_voltage = epics_signal_r(
233
+ float, f"{final_prefix}PP{transducer_number}:RAW"
230
234
  )
231
- self.omron_voltage = epics_signal_r(float, f"{final_prefix}PP{number}:RAW")
232
235
  self.beckhoff_pressure = epics_signal_r(
233
- float, f"{final_prefix}STATP{number}:MeanValue_RBV"
236
+ float, f"{final_prefix}STATP{transducer_number}:MeanValue_RBV"
234
237
  )
235
238
  self.slow_beckhoff_voltage_readout = epics_signal_r(
236
- float, f"{full_different_prefix_adc}CH1"
239
+ float, f"{full_different_prefix_adc}CH{ethercat_channel_number}"
237
240
  )
238
241
 
239
242
  super().__init__(name)
@@ -284,13 +287,27 @@ class PressureJumpCell(StandardReadable):
284
287
  with self.add_children_as_readables():
285
288
  self.pressure_transducers: DeviceVector[PressureTransducer] = DeviceVector(
286
289
  {
287
- i: PressureTransducer(
290
+ 1: PressureTransducer(
291
+ prefix=prefix,
292
+ cell_prefix=cell_prefix,
293
+ transducer_number=1,
294
+ full_different_prefix_adc=f"{prefix}{adc_prefix}-02:",
295
+ ethercat_channel_number=1,
296
+ ),
297
+ 2: PressureTransducer(
298
+ prefix=prefix,
299
+ cell_prefix=cell_prefix,
300
+ transducer_number=2,
301
+ full_different_prefix_adc=f"{prefix}{adc_prefix}-01:",
302
+ ethercat_channel_number=2,
303
+ ),
304
+ 3: PressureTransducer(
288
305
  prefix=prefix,
289
- number=i,
290
306
  cell_prefix=cell_prefix,
291
- full_different_prefix_adc=f"{prefix}{adc_prefix}-0{i}:",
292
- )
293
- for i in [1, 2, 3]
307
+ transducer_number=3,
308
+ full_different_prefix_adc=f"{prefix}{adc_prefix}-01:",
309
+ ethercat_channel_number=1,
310
+ ),
294
311
  }
295
312
  )
296
313
 
dodal/devices/robot.py CHANGED
@@ -5,6 +5,7 @@ from dataclasses import dataclass
5
5
  from bluesky.protocols import Movable
6
6
  from ophyd_async.core import (
7
7
  AsyncStatus,
8
+ Device,
8
9
  StandardReadable,
9
10
  StrictEnum,
10
11
  set_and_wait_for_value,
@@ -38,23 +39,36 @@ class PinMounted(StrictEnum):
38
39
  PIN_MOUNTED = "Pin Mounted"
39
40
 
40
41
 
42
+ class ErrorStatus(Device):
43
+ def __init__(self, prefix: str) -> None:
44
+ self.str = epics_signal_r(str, prefix + "_ERR_MSG")
45
+ self.code = epics_signal_r(int, prefix + "_ERR_CODE")
46
+ super().__init__()
47
+
48
+ async def raise_if_error(self, raise_from: Exception):
49
+ error_code = await self.code.get_value()
50
+ if error_code:
51
+ error_string = await self.str.get_value()
52
+ raise RobotLoadFailed(int(error_code), error_string) from raise_from
53
+
54
+
41
55
  class BartRobot(StandardReadable, Movable):
42
56
  """The sample changing robot."""
43
57
 
44
58
  # How long to wait for the robot if it is busy soaking/drying
45
59
  NOT_BUSY_TIMEOUT = 5 * 60
60
+
46
61
  # How long to wait for the actual load to happen
47
62
  LOAD_TIMEOUT = 60
63
+
64
+ # Error codes that we do special things on
48
65
  NO_PIN_ERROR_CODE = 25
66
+ LIGHT_CURTAIN_TRIPPED = 40
49
67
 
50
68
  # How far the gonio position can be out before loading will fail
51
69
  LOAD_TOLERANCE_MM = 0.02
52
70
 
53
- def __init__(
54
- self,
55
- name: str,
56
- prefix: str,
57
- ) -> None:
71
+ def __init__(self, name: str, prefix: str) -> None:
58
72
  self.barcode = epics_signal_r(str, prefix + "BARCODE")
59
73
  self.gonio_pin_sensor = epics_signal_r(PinMounted, prefix + "PIN_MOUNTED")
60
74
 
@@ -69,8 +83,10 @@ class BartRobot(StandardReadable, Movable):
69
83
  self.load = epics_signal_x(prefix + "LOAD.PROC")
70
84
  self.program_running = epics_signal_r(bool, prefix + "PROGRAM_RUNNING")
71
85
  self.program_name = epics_signal_r(str, prefix + "PROGRAM_NAME")
72
- self.error_str = epics_signal_r(str, prefix + "PRG_ERR_MSG")
73
- self.error_code = epics_signal_r(int, prefix + "PRG_ERR_CODE")
86
+
87
+ self.prog_error = ErrorStatus(prefix + "PRG")
88
+ self.controller_error = ErrorStatus(prefix + "CNTL")
89
+
74
90
  self.reset = epics_signal_x(prefix + "RESET.PROC")
75
91
  super().__init__(name=name)
76
92
 
@@ -81,7 +97,7 @@ class BartRobot(StandardReadable, Movable):
81
97
  """
82
98
 
83
99
  async def raise_if_no_pin():
84
- await wait_for_value(self.error_code, self.NO_PIN_ERROR_CODE, None)
100
+ await wait_for_value(self.prog_error.code, self.NO_PIN_ERROR_CODE, None)
85
101
  raise RobotLoadFailed(self.NO_PIN_ERROR_CODE, "Pin was not detected")
86
102
 
87
103
  async def wfv():
@@ -108,6 +124,9 @@ class BartRobot(StandardReadable, Movable):
108
124
  raise
109
125
 
110
126
  async def _load_pin_and_puck(self, sample_location: SampleLocation):
127
+ if await self.controller_error.code.get_value() == self.LIGHT_CURTAIN_TRIPPED:
128
+ LOGGER.info("Light curtain tripped, trying again")
129
+ await self.reset.trigger()
111
130
  LOGGER.info(f"Loading pin {sample_location}")
112
131
  if await self.program_running.get_value():
113
132
  LOGGER.info(
@@ -137,6 +156,6 @@ class BartRobot(StandardReadable, Movable):
137
156
  )
138
157
  except (asyncio.TimeoutError, TimeoutError) as e:
139
158
  # Will only need to catch asyncio.TimeoutError after https://github.com/bluesky/ophyd-async/issues/572
140
- error_code = await self.error_code.get_value()
141
- error_string = await self.error_str.get_value()
142
- raise RobotLoadFailed(int(error_code), error_string) from e
159
+ await self.prog_error.raise_if_error(e)
160
+ await self.controller_error.raise_if_error(e)
161
+ raise RobotLoadFailed(0, "Robot timed out") from e
dodal/devices/tetramm.py CHANGED
@@ -13,7 +13,12 @@ from ophyd_async.core import (
13
13
  set_and_wait_for_value,
14
14
  soft_signal_r_and_setter,
15
15
  )
16
- from ophyd_async.epics.adcore import ADHDFWriter, NDFileHDFIO, stop_busy_record
16
+ from ophyd_async.epics.adcore import (
17
+ ADHDFWriter,
18
+ NDFileHDFIO,
19
+ NDPluginBaseIO,
20
+ stop_busy_record,
21
+ )
17
22
  from ophyd_async.epics.core import (
18
23
  epics_signal_r,
19
24
  epics_signal_rw,
@@ -221,7 +226,7 @@ class TetrammDetector(StandardDetector):
221
226
  path_provider: PathProvider,
222
227
  name: str = "",
223
228
  type: str | None = None,
224
- **scalar_sigs: str,
229
+ plugins: dict[str, NDPluginBaseIO] | None = None,
225
230
  ) -> None:
226
231
  self.drv = TetrammDriver(prefix + "DRV:")
227
232
  self.hdf = NDFileHDFIO(prefix + "HDF5:")
@@ -243,7 +248,7 @@ class TetrammDetector(StandardDetector):
243
248
  path_provider,
244
249
  lambda: self.name,
245
250
  TetrammDatasetDescriber(controller),
246
- **scalar_sigs,
251
+ plugins=plugins,
247
252
  ),
248
253
  config_signals,
249
254
  name,
@@ -1,8 +1,8 @@
1
- from ophyd_async.core import Device
1
+ from ophyd_async.core import StandardReadable
2
2
  from ophyd_async.epics.motor import Motor
3
3
 
4
4
 
5
- class TurboSlit(Device):
5
+ class TurboSlit(StandardReadable):
6
6
  """
7
7
  This collection of motors coordinates time resolved XAS experiments.
8
8
  It selects a beam out of the polychromatic fan.
@@ -17,8 +17,9 @@ class TurboSlit(Device):
17
17
  - xfine selects the energy as part of the high frequency scan
18
18
  """
19
19
 
20
- def __init__(self, prefix: str, name: str):
21
- self.gap = Motor(prefix=prefix + "GAP")
22
- self.arc = Motor(prefix=prefix + "ARC")
23
- self.xfine = Motor(prefix=prefix + "XFINE")
20
+ def __init__(self, prefix: str, name: str = ""):
21
+ with self.add_children_as_readables():
22
+ self.gap = Motor(prefix=prefix + "GAP")
23
+ self.arc = Motor(prefix=prefix + "ARC")
24
+ self.xfine = Motor(prefix=prefix + "XFINE")
24
25
  super().__init__(name=name)
@@ -129,7 +129,12 @@ class ZocaloResults(StandardReadable, Triggerable):
129
129
 
130
130
  prefix (str): EPICS PV prefix for the device
131
131
 
132
- use_cpu_and_gpu (bool): When True, ZocaloResults will wait for results from the CPU and the GPU, compare them, and provide a warning if the results differ. When False, ZocaloResults will only use results from the CPU
132
+ use_cpu_and_gpu (bool): When True, ZocaloResults will wait for results from the
133
+ CPU and the GPU, compare them, and provide a warning if the results differ. When
134
+ False, ZocaloResults will only use results from the CPU
135
+
136
+ use_gpu (bool): When True, ZocaloResults will take the first set of
137
+ results that it receives (which are likely the GPU results)
133
138
 
134
139
  """
135
140
 
@@ -142,6 +147,7 @@ class ZocaloResults(StandardReadable, Triggerable):
142
147
  timeout_s: float = DEFAULT_TIMEOUT,
143
148
  prefix: str = "",
144
149
  use_cpu_and_gpu: bool = False,
150
+ use_gpu: bool = False,
145
151
  ) -> None:
146
152
  self.zocalo_environment = zocalo_environment
147
153
  self.sort_key = SortKeys[sort_key]
@@ -151,6 +157,7 @@ class ZocaloResults(StandardReadable, Triggerable):
151
157
  self._raw_results_received: Queue = Queue()
152
158
  self.transport: CommonTransport | None = None
153
159
  self.use_cpu_and_gpu = use_cpu_and_gpu
160
+ self.use_gpu = use_gpu
154
161
 
155
162
  self.centre_of_mass, self._com_setter = soft_signal_r_and_setter(
156
163
  Array1D[np.float64], name="centre_of_mass"
@@ -213,6 +220,11 @@ class ZocaloResults(StandardReadable, Triggerable):
213
220
  clearing the queue. Plans using this device should wait on ZOCALO_STAGE_GROUP
214
221
  before triggering processing for the experiment"""
215
222
 
223
+ if self.use_cpu_and_gpu and self.use_gpu:
224
+ raise ValueError(
225
+ "Cannot compare GPU and CPU results and use GPU results at the same time."
226
+ )
227
+
216
228
  LOGGER.info("Subscribing to results queue")
217
229
  try:
218
230
  self._subscribe_to_results()
@@ -253,8 +265,13 @@ class ZocaloResults(StandardReadable, Triggerable):
253
265
  raw_results = self._raw_results_received.get(timeout=self.timeout_s)
254
266
  source_of_first_results = source_from_results(raw_results)
255
267
 
256
- # Wait for results from CPU and GPU, warn and continue if only GPU times out. Error if CPU times out
268
+ if self.use_gpu and source_of_first_results == ZocaloSource.CPU:
269
+ LOGGER.warning(
270
+ "Configured to use GPU results but CPU came first, using CPU results."
271
+ )
272
+
257
273
  if self.use_cpu_and_gpu:
274
+ # Wait for results from CPU and GPU, warn and continue if only GPU times out. Error if CPU times out
258
275
  if source_of_first_results == ZocaloSource.CPU:
259
276
  LOGGER.warning("Received zocalo results from CPU before GPU")
260
277
  raw_results_two_sources = [raw_results]
@@ -303,7 +320,7 @@ class ZocaloResults(StandardReadable, Triggerable):
303
320
  raise err
304
321
 
305
322
  LOGGER.info(
306
- f"Zocalo results from {ZocaloSource.CPU.value} processing: found {len(raw_results['results'])} crystals."
323
+ f"Zocalo results from {source_from_results(raw_results)} processing: found {len(raw_results['results'])} crystals."
307
324
  )
308
325
  # Sort from strongest to weakest in case of multiple crystals
309
326
  await self._put_results(
@@ -335,7 +352,7 @@ class ZocaloResults(StandardReadable, Triggerable):
335
352
 
336
353
  results = message.get("results", [])
337
354
 
338
- if self.use_cpu_and_gpu:
355
+ if self.use_cpu_and_gpu or self.use_gpu:
339
356
  self._raw_results_received.put(
340
357
  {"results": results, "recipe_parameters": recipe_parameters}
341
358
  )
dodal/plans/save_panda.py CHANGED
@@ -6,8 +6,10 @@ from pathlib import Path
6
6
  from typing import cast
7
7
 
8
8
  from bluesky.run_engine import RunEngine
9
- from ophyd_async.core import Device, save_device
10
- from ophyd_async.fastcs.panda import phase_sorter
9
+ from ophyd_async.core import Device, YamlSettingsProvider
10
+ from ophyd_async.plan_stubs import (
11
+ store_settings,
12
+ )
11
13
 
12
14
  from dodal.beamlines import module_name_for_beamline
13
15
  from dodal.utils import make_device
@@ -17,20 +19,24 @@ def main(argv: list[str]):
17
19
  """CLI Utility to save the panda configuration."""
18
20
  parser = ArgumentParser(description="Save an ophyd_async panda to yaml")
19
21
  parser.add_argument(
20
- "--beamline", help="beamline to save from e.g. i03. Defaults to BEAMLINE"
22
+ "--beamline", help="Beamline to save from e.g. i03. Defaults to BEAMLINE"
21
23
  )
22
24
  parser.add_argument(
23
25
  "--device-name",
24
- help='name of the device. The default is "panda"',
26
+ help='Name of the device. The default is "panda"',
25
27
  default="panda",
26
28
  )
29
+ parser.add_argument(
30
+ "--output-file",
31
+ help="Path to output file, including filename, eg '/scratch/panda_settings'. '.yaml' is appended to the file name automatically",
32
+ required=True,
33
+ )
27
34
  parser.add_argument(
28
35
  "-f",
29
36
  "--force",
30
37
  action=argparse.BooleanOptionalAction,
31
38
  help="Force overwriting an existing file",
32
39
  )
33
- parser.add_argument("output_file", help="output filename")
34
40
 
35
41
  # this exit()s with message/help unless args parsed successfully
36
42
  args = parser.parse_args(argv[1:])
@@ -40,6 +46,9 @@ def main(argv: list[str]):
40
46
  output_file = args.output_file
41
47
  force = args.force
42
48
 
49
+ p = Path(output_file)
50
+ output_directory, file_name = str(p.parent), str(p.name)
51
+
43
52
  if beamline:
44
53
  os.environ["BEAMLINE"] = beamline
45
54
  else:
@@ -49,36 +58,43 @@ def main(argv: list[str]):
49
58
  sys.stderr.write("BEAMLINE not set and --beamline not specified\n")
50
59
  return 1
51
60
 
52
- if Path(output_file).exists() and not force:
61
+ if Path(f"{output_directory}/{file_name}").exists() and not force:
53
62
  sys.stderr.write(
54
- f"Output file {output_file} already exists and --force not specified."
63
+ f"Output file {output_directory}/{file_name} already exists and --force not specified."
55
64
  )
56
65
  return 1
57
66
 
58
- _save_panda(beamline, device_name, output_file)
67
+ _save_panda(beamline, device_name, output_directory, file_name)
59
68
 
60
69
  print("Done.")
61
70
  return 0
62
71
 
63
72
 
64
- def _save_panda(beamline, device_name, output_file):
73
+ def _save_panda(beamline, device_name, output_directory, file_name):
65
74
  RE = RunEngine()
66
75
  print("Creating devices...")
67
76
  module_name = module_name_for_beamline(beamline)
68
77
  try:
69
- devices = make_device(f"dodal.beamlines.{module_name}", device_name)
78
+ devices = make_device(
79
+ f"dodal.beamlines.{module_name}", device_name, connect_immediately=True
80
+ )
70
81
  except Exception as error:
71
82
  sys.stderr.write(f"Couldn't create device {device_name}: {error}\n")
72
83
  sys.exit(1)
73
84
 
74
85
  panda = devices[device_name]
75
- print(f"Saving to {output_file} from {device_name} on {beamline}...")
76
- _save_panda_to_file(RE, cast(Device, panda), output_file)
86
+ print(
87
+ f"Saving to {output_directory}/{file_name} from {device_name} on {beamline}..."
88
+ )
89
+ _save_panda_to_yaml(RE, cast(Device, panda), file_name, output_directory)
77
90
 
78
91
 
79
- def _save_panda_to_file(RE: RunEngine, panda: Device, path: str):
92
+ def _save_panda_to_yaml(
93
+ RE: RunEngine, panda: Device, file_name: str, output_directory: str
94
+ ):
80
95
  def save_to_file():
81
- yield from save_device(panda, path, sorter=phase_sorter)
96
+ provider = YamlSettingsProvider(output_directory)
97
+ yield from store_settings(provider, file_name, panda)
82
98
 
83
99
  RE(save_to_file())
84
100
 
dodal/utils.py CHANGED
@@ -102,7 +102,7 @@ class BeamlinePrefix:
102
102
 
103
103
 
104
104
  T = TypeVar("T", bound=AnyDevice)
105
- D = TypeVar("D", bound=OphydV2Device)
105
+
106
106
  SkipType = bool | Callable[[], bool]
107
107
 
108
108
 
@@ -119,16 +119,16 @@ def skip_device(precondition=lambda: True):
119
119
  return decorator
120
120
 
121
121
 
122
- class DeviceInitializationController(Generic[D]):
122
+ class DeviceInitializationController(Generic[T]):
123
123
  def __init__(
124
124
  self,
125
- factory: Callable[[], D],
125
+ factory: Callable[[], T],
126
126
  use_factory_name: bool,
127
127
  timeout: float,
128
128
  mock: bool,
129
129
  skip: SkipType,
130
130
  ):
131
- self._factory: Callable[[], D] = functools.cache(factory)
131
+ self._factory: Callable[..., T] = functools.cache(factory)
132
132
  self._use_factory_name = use_factory_name
133
133
  self._timeout = timeout
134
134
  self._mock = mock
@@ -153,13 +153,15 @@ class DeviceInitializationController(Generic[D]):
153
153
  name: str | None = None,
154
154
  connection_timeout: float | None = None,
155
155
  mock: bool | None = None,
156
- ) -> D:
156
+ **kwargs,
157
+ ) -> T:
157
158
  """Returns an instance of the Device the wrapped factory produces: the same
158
159
  instance will be returned if this method is called multiple times, and arguments
159
160
  may be passed to override this Controller's configuration.
160
161
  Once the device is connected, the value of mock must be consistent, or connect
161
162
  must be False.
162
163
 
164
+ Additional keyword arguments will be passed through to the wrapped factory function.
163
165
 
164
166
  Args:
165
167
  connect_immediately (bool, default False): whether to call connect on the
@@ -182,19 +184,36 @@ class DeviceInitializationController(Generic[D]):
182
184
  connect is called on the Device.
183
185
 
184
186
  Returns:
185
- D: a singleton instance of the Device class returned by the wrapped factory.
187
+ T: a singleton instance of the Device class returned by the wrapped factory.
188
+
189
+ Raises:
190
+ RuntimeError: If the device factory was invoked again with different
191
+ keyword arguments, without previously invoking cache_clear()
186
192
  """
187
- device = self._factory()
193
+ is_v2_device = is_v2_device_factory(self._factory)
194
+ is_mock = mock if mock is not None else self._mock
195
+ if is_v2_device:
196
+ device: T = self._factory(**kwargs)
197
+ else:
198
+ device: T = self._factory(mock=is_mock, **kwargs)
199
+
200
+ if self._factory.cache_info().currsize > 1: # type: ignore
201
+ raise RuntimeError(
202
+ f"Device factory method called multiple times with different parameters: "
203
+ f"{self.__name__}" # type: ignore
204
+ )
188
205
 
189
206
  if connect_immediately:
190
- call_in_bluesky_event_loop(
191
- device.connect(
192
- timeout=connection_timeout
193
- if connection_timeout is not None
194
- else self._timeout,
195
- mock=mock if mock is not None else self._mock,
196
- )
207
+ timeout = (
208
+ connection_timeout if connection_timeout is not None else self._timeout
197
209
  )
210
+ if is_v2_device:
211
+ call_in_bluesky_event_loop(
212
+ device.connect(timeout=timeout, mock=is_mock)
213
+ )
214
+ else:
215
+ assert is_v1_device_type(type(device))
216
+ device.wait_for_connection(timeout=timeout) # type: ignore
198
217
 
199
218
  if name:
200
219
  device.set_name(name)
@@ -410,7 +429,27 @@ def is_any_device_factory(func: Callable) -> TypeGuard[AnyDeviceFactory]:
410
429
 
411
430
 
412
431
  def is_v2_device_type(obj: type[Any]) -> bool:
413
- return inspect.isclass(obj) and isinstance(obj, OphydV2Device)
432
+ non_parameterized_class = None
433
+ if obj != inspect.Signature.empty:
434
+ if inspect.isclass(obj):
435
+ non_parameterized_class = obj
436
+ elif hasattr(obj, "__origin__"):
437
+ # typing._GenericAlias is the same as types.GenericAlias, maybe?
438
+ # This is all very badly documented and possibly prone to change in future versions of Python
439
+ non_parameterized_class = obj.__origin__
440
+ if non_parameterized_class:
441
+ try:
442
+ return non_parameterized_class and issubclass(
443
+ non_parameterized_class, OphydV2Device
444
+ )
445
+ except TypeError:
446
+ # Python 3.10 will return inspect.isclass(t) == True but then
447
+ # raise TypeError: issubclass() arg 1 must be a class
448
+ # when inspecting device_factory decorator function itself
449
+ # Later versions of Python seem not to be affected
450
+ pass
451
+
452
+ return False
414
453
 
415
454
 
416
455
  def is_v1_device_type(obj: type[Any]) -> bool: