dls-dodal 1.50.0__py3-none-any.whl → 1.52.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dls_dodal-1.50.0.dist-info → dls_dodal-1.52.0.dist-info}/METADATA +5 -5
- {dls_dodal-1.50.0.dist-info → dls_dodal-1.52.0.dist-info}/RECORD +76 -68
- dodal/_version.py +2 -2
- dodal/beamlines/adsim.py +5 -3
- dodal/beamlines/b01_1.py +41 -5
- dodal/beamlines/b07.py +13 -2
- dodal/beamlines/b07_1.py +13 -2
- dodal/beamlines/b16.py +8 -4
- dodal/beamlines/b21.py +148 -0
- dodal/beamlines/i03.py +10 -12
- dodal/beamlines/i04.py +7 -7
- dodal/beamlines/i09.py +25 -2
- dodal/beamlines/i09_1.py +13 -2
- dodal/beamlines/i09_2.py +24 -0
- dodal/beamlines/i10.py +5 -6
- dodal/beamlines/i13_1.py +5 -5
- dodal/beamlines/i18.py +5 -6
- dodal/beamlines/i22.py +18 -1
- dodal/beamlines/i24.py +5 -5
- dodal/beamlines/p45.py +4 -3
- dodal/beamlines/p60.py +21 -2
- dodal/beamlines/p99.py +19 -5
- dodal/beamlines/training_rig.py +3 -3
- dodal/common/beamlines/beamline_utils.py +5 -2
- dodal/common/device_utils.py +45 -0
- dodal/devices/aithre_lasershaping/goniometer.py +4 -5
- dodal/devices/aperture.py +4 -7
- dodal/devices/aperturescatterguard.py +2 -2
- dodal/devices/attenuator/attenuator.py +5 -3
- dodal/devices/b07/__init__.py +3 -0
- dodal/devices/b07/enums.py +24 -0
- dodal/devices/b07_1/__init__.py +3 -0
- dodal/devices/b07_1/enums.py +18 -0
- dodal/devices/detector/detector_motion.py +19 -17
- dodal/devices/electron_analyser/abstract/__init__.py +4 -0
- dodal/devices/electron_analyser/abstract/base_driver_io.py +44 -28
- dodal/devices/electron_analyser/abstract/base_region.py +20 -7
- dodal/devices/electron_analyser/detector.py +3 -13
- dodal/devices/electron_analyser/specs/detector.py +24 -4
- dodal/devices/electron_analyser/specs/driver_io.py +20 -5
- dodal/devices/electron_analyser/specs/region.py +9 -5
- dodal/devices/electron_analyser/types.py +21 -5
- dodal/devices/electron_analyser/vgscienta/detector.py +22 -7
- dodal/devices/electron_analyser/vgscienta/driver_io.py +16 -8
- dodal/devices/electron_analyser/vgscienta/region.py +11 -6
- dodal/devices/fast_grid_scan.py +1 -2
- dodal/devices/i04/constants.py +1 -1
- dodal/devices/i09/__init__.py +4 -0
- dodal/devices/i09/dcm.py +26 -0
- dodal/devices/i09/enums.py +15 -0
- dodal/devices/i09_1/__init__.py +3 -0
- dodal/devices/i09_1/enums.py +19 -0
- dodal/devices/i10/mirrors.py +4 -6
- dodal/devices/i10/rasor/rasor_motors.py +0 -14
- dodal/devices/i19/beamstop.py +3 -7
- dodal/devices/i24/aperture.py +4 -6
- dodal/devices/i24/beamstop.py +5 -8
- dodal/devices/i24/pmac.py +4 -8
- dodal/devices/linkam3.py +25 -81
- dodal/devices/motors.py +92 -35
- dodal/devices/oav/pin_image_recognition/__init__.py +11 -14
- dodal/devices/p45.py +0 -12
- dodal/devices/p60/__init__.py +4 -0
- dodal/devices/p60/enums.py +10 -0
- dodal/devices/p60/lab_xray_source.py +21 -0
- dodal/devices/pgm.py +1 -1
- dodal/devices/robot.py +11 -7
- dodal/devices/smargon.py +8 -9
- dodal/devices/tetramm.py +134 -150
- dodal/devices/xbpm_feedback.py +6 -3
- dodal/devices/zocalo/zocalo_results.py +27 -78
- dodal/plans/configure_arm_trigger_and_disarm_detector.py +7 -5
- dodal/devices/adsim.py +0 -13
- dodal/devices/i18/table.py +0 -14
- dodal/devices/i18/thor_labs_stage.py +0 -12
- dodal/devices/i24/i24_detector_motion.py +0 -12
- dodal/devices/scatterguard.py +0 -11
- dodal/devices/training_rig/__init__.py +0 -0
- dodal/devices/training_rig/sample_stage.py +0 -10
- {dls_dodal-1.50.0.dist-info → dls_dodal-1.52.0.dist-info}/WHEEL +0 -0
- {dls_dodal-1.50.0.dist-info → dls_dodal-1.52.0.dist-info}/entry_points.txt +0 -0
- {dls_dodal-1.50.0.dist-info → dls_dodal-1.52.0.dist-info}/licenses/LICENSE +0 -0
- {dls_dodal-1.50.0.dist-info → dls_dodal-1.52.0.dist-info}/top_level.txt +0 -0
dodal/devices/tetramm.py
CHANGED
|
@@ -1,12 +1,16 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
from collections.abc import Sequence
|
|
3
|
+
from typing import Annotated as A
|
|
2
4
|
|
|
3
|
-
from bluesky.protocols import Hints
|
|
4
5
|
from ophyd_async.core import (
|
|
6
|
+
DEFAULT_TIMEOUT,
|
|
7
|
+
AsyncStatus,
|
|
5
8
|
DatasetDescriber,
|
|
6
9
|
DetectorController,
|
|
7
10
|
DetectorTrigger,
|
|
8
|
-
Device,
|
|
9
11
|
PathProvider,
|
|
12
|
+
SignalR,
|
|
13
|
+
SignalRW,
|
|
10
14
|
StandardDetector,
|
|
11
15
|
StrictEnum,
|
|
12
16
|
TriggerInfo,
|
|
@@ -15,15 +19,12 @@ from ophyd_async.core import (
|
|
|
15
19
|
)
|
|
16
20
|
from ophyd_async.epics.adcore import (
|
|
17
21
|
ADHDFWriter,
|
|
22
|
+
NDArrayBaseIO,
|
|
18
23
|
NDFileHDFIO,
|
|
19
24
|
NDPluginBaseIO,
|
|
20
25
|
stop_busy_record,
|
|
21
26
|
)
|
|
22
|
-
from ophyd_async.epics.core import
|
|
23
|
-
epics_signal_r,
|
|
24
|
-
epics_signal_rw,
|
|
25
|
-
epics_signal_rw_rbv,
|
|
26
|
-
)
|
|
27
|
+
from ophyd_async.epics.core import PvSuffix
|
|
27
28
|
|
|
28
29
|
|
|
29
30
|
class TetrammRange(StrictEnum):
|
|
@@ -54,208 +55,191 @@ class TetrammGeometry(StrictEnum):
|
|
|
54
55
|
SQUARE = "Square"
|
|
55
56
|
|
|
56
57
|
|
|
57
|
-
class TetrammDriver(
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
)
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
self.acquire = epics_signal_rw_rbv(bool, prefix + "Acquire")
|
|
73
|
-
|
|
74
|
-
# this PV is special, for some reason it doesn't have a _RBV suffix...
|
|
75
|
-
self.overflows = epics_signal_r(int, prefix + "RingOverflows")
|
|
76
|
-
|
|
77
|
-
self.num_channels = epics_signal_rw_rbv(TetrammChannels, prefix + "NumChannels")
|
|
78
|
-
self.resolution = epics_signal_rw_rbv(TetrammResolution, prefix + "Resolution")
|
|
79
|
-
self.trigger_mode = epics_signal_rw_rbv(TetrammTrigger, prefix + "TriggerMode")
|
|
80
|
-
self.bias = epics_signal_rw_rbv(bool, prefix + "BiasState")
|
|
81
|
-
self.bias_volts = epics_signal_rw_rbv(float, prefix + "BiasVoltage")
|
|
82
|
-
self.geometry = epics_signal_rw_rbv(TetrammGeometry, prefix + "Geometry")
|
|
83
|
-
self.nd_attributes_file = epics_signal_rw(str, prefix + "NDAttributesFile")
|
|
84
|
-
|
|
85
|
-
super().__init__(name=name)
|
|
58
|
+
class TetrammDriver(NDArrayBaseIO):
|
|
59
|
+
range = A[SignalRW[TetrammRange], PvSuffix.rbv("Range")]
|
|
60
|
+
sample_time: A[SignalR[float], PvSuffix("SampleTime_RBV")]
|
|
61
|
+
values_per_reading: A[SignalRW[int], PvSuffix.rbv("ValuesPerRead")]
|
|
62
|
+
averaging_time: A[SignalRW[float], PvSuffix.rbv("AveragingTime")]
|
|
63
|
+
to_average: A[SignalR[int], PvSuffix("NumAverage_RBV")]
|
|
64
|
+
averaged: A[SignalR[int], PvSuffix("NumAveraged_RBV")]
|
|
65
|
+
overflows: A[SignalR[int], PvSuffix("RingOverflows")]
|
|
66
|
+
num_channels: A[SignalRW[TetrammChannels], PvSuffix.rbv("NumChannels")]
|
|
67
|
+
resolution: A[SignalRW[TetrammResolution], PvSuffix.rbv("Resolution")]
|
|
68
|
+
trigger_mode: A[SignalRW[TetrammTrigger], PvSuffix.rbv("TriggerMode")]
|
|
69
|
+
bias: A[SignalRW[bool], PvSuffix.rbv("BiasState")]
|
|
70
|
+
bias_volts: A[SignalRW[float], PvSuffix.rbv("BiasVoltage")]
|
|
71
|
+
geometry: A[SignalRW[TetrammGeometry], PvSuffix.rbv("Geometry")]
|
|
72
|
+
read_format: A[SignalRW[bool], PvSuffix.rbv("ReadFormat")]
|
|
86
73
|
|
|
87
74
|
|
|
88
75
|
class TetrammController(DetectorController):
|
|
89
|
-
"""Controller for a TetrAMM current monitor
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
readings_per_frame (int): Actual number of readings per frame.
|
|
99
|
-
|
|
76
|
+
"""Controller for a TetrAMM current monitor"""
|
|
77
|
+
|
|
78
|
+
_supported_trigger_types = {
|
|
79
|
+
DetectorTrigger.EDGE_TRIGGER: TetrammTrigger.EXT_TRIGGER,
|
|
80
|
+
DetectorTrigger.CONSTANT_GATE: TetrammTrigger.EXT_TRIGGER,
|
|
81
|
+
}
|
|
82
|
+
""""On the TetrAMM ASCII mode requires a minimum value of ValuesPerRead of 500,
|
|
83
|
+
[...] binary mode the minimum value of ValuesPerRead is 5."
|
|
84
|
+
https://millenia.cars.aps.anl.gov/software/epics/quadEMDoc.html
|
|
100
85
|
"""
|
|
101
|
-
|
|
102
|
-
|
|
86
|
+
_minimal_values_per_reading = {0: 5, 1: 500}
|
|
87
|
+
"""The TetrAMM always digitizes at 100 kHz"""
|
|
88
|
+
_base_sample_rate: int = 100_000
|
|
103
89
|
|
|
104
90
|
def __init__(
|
|
105
91
|
self,
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
):
|
|
111
|
-
# TODO: Are any of these also fixed by hardware constraints?
|
|
112
|
-
self._drv = drv
|
|
113
|
-
self.maximum_readings_per_frame = maximum_readings_per_frame
|
|
114
|
-
self.minimum_values_per_reading = minimum_values_per_reading
|
|
115
|
-
self.readings_per_frame = readings_per_frame
|
|
92
|
+
driver: TetrammDriver,
|
|
93
|
+
) -> None:
|
|
94
|
+
self.driver = driver
|
|
95
|
+
self._arm_status: AsyncStatus | None = None
|
|
116
96
|
|
|
117
97
|
def get_deadtime(self, exposure: float | None) -> float:
|
|
118
98
|
# 2 internal clock cycles. Best effort approximation
|
|
119
|
-
return 2 / self.
|
|
99
|
+
return 2 / self._base_sample_rate
|
|
120
100
|
|
|
121
|
-
async def prepare(self, trigger_info: TriggerInfo):
|
|
122
|
-
|
|
123
|
-
|
|
101
|
+
async def prepare(self, trigger_info: TriggerInfo) -> None:
|
|
102
|
+
if trigger_info.trigger not in self._supported_trigger_types:
|
|
103
|
+
raise TypeError(
|
|
104
|
+
f"{self.__class__.__name__} only supports the following trigger "
|
|
105
|
+
f"types: {[k.name for k in self._supported_trigger_types]} but was asked to "
|
|
106
|
+
f"use {trigger_info.trigger}"
|
|
107
|
+
)
|
|
108
|
+
if trigger_info.livetime is None:
|
|
109
|
+
raise ValueError(f"{self.__class__.__name__} requires that livetime is set")
|
|
124
110
|
|
|
125
111
|
# trigger mode must be set first and on its own!
|
|
126
|
-
await self.
|
|
127
|
-
|
|
112
|
+
await self.driver.trigger_mode.set(
|
|
113
|
+
self._supported_trigger_types[trigger_info.trigger]
|
|
114
|
+
)
|
|
128
115
|
await asyncio.gather(
|
|
129
|
-
self.
|
|
116
|
+
self.driver.averaging_time.set(trigger_info.livetime),
|
|
130
117
|
self.set_exposure(trigger_info.livetime),
|
|
131
118
|
)
|
|
132
119
|
|
|
133
120
|
async def arm(self):
|
|
134
|
-
self._arm_status = await
|
|
135
|
-
self._drv.acquire, True, wait_for_set_completion=False
|
|
136
|
-
)
|
|
121
|
+
self._arm_status = await self.start_acquiring_driver_and_ensure_status()
|
|
137
122
|
|
|
138
123
|
async def wait_for_idle(self):
|
|
139
124
|
if self._arm_status and not self._arm_status.done:
|
|
140
125
|
await self._arm_status
|
|
141
126
|
self._arm_status = None
|
|
142
127
|
|
|
143
|
-
def _validate_trigger(self, trigger: DetectorTrigger) -> None:
|
|
144
|
-
supported_trigger_types = {
|
|
145
|
-
DetectorTrigger.EDGE_TRIGGER,
|
|
146
|
-
DetectorTrigger.CONSTANT_GATE,
|
|
147
|
-
}
|
|
148
|
-
|
|
149
|
-
if trigger not in supported_trigger_types:
|
|
150
|
-
raise ValueError(
|
|
151
|
-
f"{self.__class__.__name__} only supports the following trigger "
|
|
152
|
-
f"types: {supported_trigger_types} but was asked to "
|
|
153
|
-
f"use {trigger}"
|
|
154
|
-
)
|
|
155
|
-
|
|
156
128
|
async def disarm(self):
|
|
157
|
-
|
|
129
|
+
# We can't use caput callback as we already used it in arm() and we can't have
|
|
130
|
+
# 2 or they will deadlock
|
|
131
|
+
await stop_busy_record(self.driver.acquire, False, timeout=1)
|
|
158
132
|
|
|
159
|
-
async def set_exposure(self, exposure: float):
|
|
160
|
-
"""
|
|
133
|
+
async def set_exposure(self, exposure: float) -> None:
|
|
134
|
+
"""Set the exposure time and acquire period.
|
|
161
135
|
|
|
162
136
|
As during the exposure time, the device must collect an integer number
|
|
163
137
|
of readings, in the case where the exposure is not a multiple of the base
|
|
164
138
|
sample rate, it will be lowered to the prior multiple ot ensure triggers
|
|
165
139
|
are not missed.
|
|
166
140
|
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
Raises:
|
|
171
|
-
ValueError: If exposure is too low to collect the required number
|
|
172
|
-
of readings per frame.
|
|
141
|
+
:param exposure: Desired exposure time.
|
|
142
|
+
:type exposure: How long to wait for the exposure time and acquire
|
|
143
|
+
period to be set.
|
|
173
144
|
"""
|
|
145
|
+
sample_time = await self.driver.sample_time.get_value()
|
|
146
|
+
minimum_samples = self._minimal_values_per_reading[
|
|
147
|
+
await self.driver.read_format.get_value()
|
|
148
|
+
]
|
|
149
|
+
samples_per_reading = int(exposure / sample_time)
|
|
150
|
+
if samples_per_reading < minimum_samples:
|
|
151
|
+
raise ValueError(
|
|
152
|
+
"Tetramm exposure time must be at least "
|
|
153
|
+
f"{minimum_samples * sample_time}s, asked to set it to {exposure}s"
|
|
154
|
+
)
|
|
155
|
+
await self.driver.averaging_time.set(samples_per_reading * sample_time)
|
|
174
156
|
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
self._set_minimum_exposure(exposure)
|
|
178
|
-
values_per_reading: int = int(
|
|
179
|
-
exposure * self.base_sample_rate / self.readings_per_frame
|
|
180
|
-
)
|
|
181
|
-
|
|
182
|
-
await self._drv.values_per_reading.set(values_per_reading)
|
|
157
|
+
async def start_acquiring_driver_and_ensure_status(self) -> AsyncStatus:
|
|
158
|
+
"""Start acquiring driver, raising ValueError if the detector is in a bad state.
|
|
183
159
|
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
return 1 / self.minimum_exposure
|
|
160
|
+
This sets driver.acquire to True, and waits for it to be True up to a timeout.
|
|
161
|
+
Then, it checks that the DetectorState PV is in DEFAULT_GOOD_STATES,
|
|
162
|
+
and otherwise raises a ValueError.
|
|
188
163
|
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
164
|
+
:returns AsyncStatus:
|
|
165
|
+
An AsyncStatus that can be awaited to set driver.acquire to True and perform
|
|
166
|
+
subsequent raising (if applicable) due to detector state.
|
|
167
|
+
"""
|
|
168
|
+
status = await set_and_wait_for_value(
|
|
169
|
+
self.driver.acquire,
|
|
170
|
+
True,
|
|
171
|
+
timeout=DEFAULT_TIMEOUT,
|
|
172
|
+
wait_for_set_completion=False,
|
|
173
|
+
)
|
|
192
174
|
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
return self.readings_per_frame * time_per_reading
|
|
175
|
+
async def complete_acquisition() -> None:
|
|
176
|
+
# NOTE: possible race condition here between the callback from
|
|
177
|
+
# set_and_wait_for_value and the detector state updating.
|
|
178
|
+
await status
|
|
198
179
|
|
|
199
|
-
|
|
200
|
-
time_per_reading = self.minimum_values_per_reading / self.base_sample_rate
|
|
201
|
-
if exposure < time_per_reading:
|
|
202
|
-
raise ValueError(
|
|
203
|
-
"Tetramm exposure time must be at least "
|
|
204
|
-
f"{time_per_reading}s, asked to set it to {exposure}s"
|
|
205
|
-
)
|
|
206
|
-
self.readings_per_frame = int(
|
|
207
|
-
min(self.maximum_readings_per_frame, exposure / time_per_reading)
|
|
208
|
-
)
|
|
180
|
+
return AsyncStatus(complete_acquisition())
|
|
209
181
|
|
|
210
182
|
|
|
211
183
|
class TetrammDatasetDescriber(DatasetDescriber):
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
def __init__(self, controller: TetrammController) -> None:
|
|
215
|
-
self.controller = controller
|
|
184
|
+
def __init__(self, driver: TetrammDriver) -> None:
|
|
185
|
+
self._driver = driver
|
|
216
186
|
|
|
217
187
|
async def np_datatype(self) -> str:
|
|
218
188
|
return "<f8" # IEEE 754 double precision floating point
|
|
219
189
|
|
|
220
190
|
async def shape(self) -> tuple[int, int]:
|
|
221
|
-
return (
|
|
191
|
+
return (
|
|
192
|
+
int(await self._driver.num_channels.get_value()),
|
|
193
|
+
int(
|
|
194
|
+
await self._driver.averaging_time.get_value()
|
|
195
|
+
/ await self._driver.sample_time.get_value(),
|
|
196
|
+
),
|
|
197
|
+
)
|
|
222
198
|
|
|
223
199
|
|
|
224
|
-
# TODO: Support MeanValue signals https://github.com/DiamondLightSource/dodal/issues/337
|
|
225
200
|
class TetrammDetector(StandardDetector):
|
|
226
201
|
def __init__(
|
|
227
202
|
self,
|
|
228
203
|
prefix: str,
|
|
229
204
|
path_provider: PathProvider,
|
|
205
|
+
drv_suffix: str = "DRV:",
|
|
206
|
+
fileio_suffix: str = "HDF5:",
|
|
230
207
|
name: str = "",
|
|
231
|
-
type: str | None = None,
|
|
232
208
|
plugins: dict[str, NDPluginBaseIO] | None = None,
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
209
|
+
config_sigs: Sequence[SignalR] = (),
|
|
210
|
+
type: str | None = None,
|
|
211
|
+
):
|
|
212
|
+
self.driver = TetrammDriver(prefix + drv_suffix)
|
|
213
|
+
self.file_io = NDFileHDFIO(prefix + fileio_suffix)
|
|
214
|
+
controller = TetrammController(self.driver)
|
|
215
|
+
|
|
216
|
+
writer = ADHDFWriter(
|
|
217
|
+
fileio=self.file_io,
|
|
218
|
+
path_provider=path_provider,
|
|
219
|
+
dataset_describer=TetrammDatasetDescriber(self.driver),
|
|
220
|
+
plugins=plugins,
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
config_sigs = [
|
|
224
|
+
self.driver.values_per_reading,
|
|
225
|
+
self.driver.averaging_time,
|
|
226
|
+
self.driver.sample_time,
|
|
227
|
+
*config_sigs,
|
|
241
228
|
]
|
|
229
|
+
|
|
242
230
|
if type:
|
|
243
231
|
self.type, _ = soft_signal_r_and_setter(str, type)
|
|
244
|
-
|
|
232
|
+
config_sigs.append(self.type)
|
|
245
233
|
else:
|
|
246
234
|
self.type = None
|
|
235
|
+
|
|
236
|
+
if plugins is not None:
|
|
237
|
+
for plugin_name, plugin in plugins.items():
|
|
238
|
+
setattr(self, plugin_name, plugin)
|
|
239
|
+
|
|
247
240
|
super().__init__(
|
|
248
|
-
controller,
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
dataset_describer=TetrammDatasetDescriber(controller),
|
|
253
|
-
plugins=plugins,
|
|
254
|
-
),
|
|
255
|
-
config_signals,
|
|
256
|
-
name,
|
|
241
|
+
controller=controller,
|
|
242
|
+
writer=writer,
|
|
243
|
+
name=name,
|
|
244
|
+
config_sigs=config_sigs,
|
|
257
245
|
)
|
|
258
|
-
|
|
259
|
-
@property
|
|
260
|
-
def hints(self) -> Hints:
|
|
261
|
-
return {"fields": [self.name]}
|
dodal/devices/xbpm_feedback.py
CHANGED
|
@@ -2,6 +2,8 @@ from bluesky.protocols import Triggerable
|
|
|
2
2
|
from ophyd_async.core import AsyncStatus, Device, StrictEnum, observe_value
|
|
3
3
|
from ophyd_async.epics.core import epics_signal_r, epics_signal_rw
|
|
4
4
|
|
|
5
|
+
from dodal.common.device_utils import periodic_reminder
|
|
6
|
+
|
|
5
7
|
|
|
6
8
|
class Pause(StrictEnum):
|
|
7
9
|
PAUSE = "Paused" # 0
|
|
@@ -22,6 +24,7 @@ class XBPMFeedback(Device, Triggerable):
|
|
|
22
24
|
|
|
23
25
|
@AsyncStatus.wrap
|
|
24
26
|
async def trigger(self):
|
|
25
|
-
async
|
|
26
|
-
|
|
27
|
-
|
|
27
|
+
async with periodic_reminder("Waiting for XBPM"):
|
|
28
|
+
async for value in observe_value(self.pos_stable):
|
|
29
|
+
if value:
|
|
30
|
+
return
|
|
@@ -10,7 +10,6 @@ import workflows.recipe
|
|
|
10
10
|
import workflows.transport
|
|
11
11
|
from bluesky.protocols import Triggerable
|
|
12
12
|
from bluesky.utils import Msg
|
|
13
|
-
from deepdiff.diff import DeepDiff
|
|
14
13
|
from ophyd_async.core import (
|
|
15
14
|
Array1D,
|
|
16
15
|
AsyncStatus,
|
|
@@ -49,6 +48,9 @@ DEFAULT_SORT_KEY = SortKeys.max_count
|
|
|
49
48
|
CLEAR_QUEUE_WAIT_S = 2.0
|
|
50
49
|
ZOCALO_STAGE_GROUP = "clear zocalo queue"
|
|
51
50
|
|
|
51
|
+
# Sentinel value required for inserting into the soft signal array
|
|
52
|
+
_NO_SAMPLE_ID = -1
|
|
53
|
+
|
|
52
54
|
|
|
53
55
|
class XrcResult(TypedDict):
|
|
54
56
|
"""
|
|
@@ -69,6 +71,7 @@ class XrcResult(TypedDict):
|
|
|
69
71
|
as the volume of whole boxes as a half-open range i.e such that
|
|
70
72
|
p1 = (x1, y1, z1) <= p < p2 = (x2, y2, z2) and
|
|
71
73
|
p2 - p1 gives the dimensions in whole voxels.
|
|
74
|
+
sample_id: The sample id associated with the centre.
|
|
72
75
|
"""
|
|
73
76
|
|
|
74
77
|
centre_of_mass: list[float]
|
|
@@ -77,6 +80,7 @@ class XrcResult(TypedDict):
|
|
|
77
80
|
n_voxels: int
|
|
78
81
|
total_count: int
|
|
79
82
|
bounding_box: list[list[int]]
|
|
83
|
+
sample_id: int | None
|
|
80
84
|
|
|
81
85
|
|
|
82
86
|
def bbox_size(result: XrcResult):
|
|
@@ -86,18 +90,6 @@ def bbox_size(result: XrcResult):
|
|
|
86
90
|
]
|
|
87
91
|
|
|
88
92
|
|
|
89
|
-
def get_dict_differences(
|
|
90
|
-
dict1: dict, dict1_source: str, dict2: dict, dict2_source: str
|
|
91
|
-
) -> str | None:
|
|
92
|
-
"""Returns a string containing dict1 and dict2 if there are differences between them, greater than a
|
|
93
|
-
1e-5 tolerance. If dictionaries are identical, return None"""
|
|
94
|
-
|
|
95
|
-
diff = DeepDiff(dict1, dict2, math_epsilon=1e-5, ignore_numeric_type_changes=True)
|
|
96
|
-
|
|
97
|
-
if diff:
|
|
98
|
-
return f"Zocalo results from {dict1_source} and {dict2_source} are not identical.\n Results from {dict1_source}: {dict1}\n Results from {dict2_source}: {dict2}"
|
|
99
|
-
|
|
100
|
-
|
|
101
93
|
def source_from_results(results):
|
|
102
94
|
return (
|
|
103
95
|
ZocaloSource.GPU.value
|
|
@@ -127,10 +119,6 @@ class ZocaloResults(StandardReadable, Triggerable):
|
|
|
127
119
|
|
|
128
120
|
prefix (str): EPICS PV prefix for the device
|
|
129
121
|
|
|
130
|
-
use_cpu_and_gpu (bool): When True, ZocaloResults will wait for results from the
|
|
131
|
-
CPU and the GPU, compare them, and provide a warning if the results differ. When
|
|
132
|
-
False, ZocaloResults will only use results from the CPU
|
|
133
|
-
|
|
134
122
|
use_gpu (bool): When True, ZocaloResults will take the first set of
|
|
135
123
|
results that it receives (which are likely the GPU results)
|
|
136
124
|
|
|
@@ -144,7 +132,6 @@ class ZocaloResults(StandardReadable, Triggerable):
|
|
|
144
132
|
sort_key: str = DEFAULT_SORT_KEY.value,
|
|
145
133
|
timeout_s: float = DEFAULT_TIMEOUT,
|
|
146
134
|
prefix: str = "",
|
|
147
|
-
use_cpu_and_gpu: bool = False,
|
|
148
135
|
use_gpu: bool = False,
|
|
149
136
|
) -> None:
|
|
150
137
|
self.zocalo_environment = zocalo_environment
|
|
@@ -154,7 +141,6 @@ class ZocaloResults(StandardReadable, Triggerable):
|
|
|
154
141
|
self._prefix = prefix
|
|
155
142
|
self._raw_results_received: Queue = Queue()
|
|
156
143
|
self.transport: CommonTransport | None = None
|
|
157
|
-
self.use_cpu_and_gpu = use_cpu_and_gpu
|
|
158
144
|
self.use_gpu = use_gpu
|
|
159
145
|
|
|
160
146
|
self.centre_of_mass, self._com_setter = soft_signal_r_and_setter(
|
|
@@ -175,6 +161,9 @@ class ZocaloResults(StandardReadable, Triggerable):
|
|
|
175
161
|
self.total_count, self._total_count_setter = soft_signal_r_and_setter(
|
|
176
162
|
Array1D[np.uint64], name="total_count"
|
|
177
163
|
)
|
|
164
|
+
self.sample_id, self._sample_id_setter = soft_signal_r_and_setter(
|
|
165
|
+
Array1D[np.int64], name="sample_id"
|
|
166
|
+
)
|
|
178
167
|
self.ispyb_dcid, self._ispyb_dcid_setter = soft_signal_r_and_setter(
|
|
179
168
|
int, name="ispyb_dcid"
|
|
180
169
|
)
|
|
@@ -189,6 +178,7 @@ class ZocaloResults(StandardReadable, Triggerable):
|
|
|
189
178
|
self.total_count,
|
|
190
179
|
self.centre_of_mass,
|
|
191
180
|
self.bounding_box,
|
|
181
|
+
self.sample_id,
|
|
192
182
|
self.ispyb_dcid,
|
|
193
183
|
self.ispyb_dcgid,
|
|
194
184
|
],
|
|
@@ -197,13 +187,15 @@ class ZocaloResults(StandardReadable, Triggerable):
|
|
|
197
187
|
super().__init__(name)
|
|
198
188
|
|
|
199
189
|
async def _put_results(self, results: Sequence[XrcResult], recipe_parameters):
|
|
200
|
-
|
|
201
|
-
self._com_setter(centres_of_mass)
|
|
190
|
+
self._com_setter(np.array([r["centre_of_mass"] for r in results]))
|
|
202
191
|
self._bounding_box_setter(np.array([r["bounding_box"] for r in results]))
|
|
203
192
|
self._max_voxel_setter(np.array([r["max_voxel"] for r in results]))
|
|
204
193
|
self._max_count_setter(np.array([r["max_count"] for r in results]))
|
|
205
194
|
self._n_voxels_setter(np.array([r["n_voxels"] for r in results]))
|
|
206
195
|
self._total_count_setter(np.array([r["total_count"] for r in results]))
|
|
196
|
+
self._sample_id_setter(
|
|
197
|
+
np.array([r.get("sample_id") or _NO_SAMPLE_ID for r in results])
|
|
198
|
+
)
|
|
207
199
|
self._ispyb_dcid_setter(recipe_parameters["dcid"])
|
|
208
200
|
self._ispyb_dcgid_setter(recipe_parameters["dcgid"])
|
|
209
201
|
|
|
@@ -218,11 +210,6 @@ class ZocaloResults(StandardReadable, Triggerable):
|
|
|
218
210
|
clearing the queue. Plans using this device should wait on ZOCALO_STAGE_GROUP
|
|
219
211
|
before triggering processing for the experiment"""
|
|
220
212
|
|
|
221
|
-
if self.use_cpu_and_gpu and self.use_gpu:
|
|
222
|
-
raise ValueError(
|
|
223
|
-
"Cannot compare GPU and CPU results and use GPU results at the same time."
|
|
224
|
-
)
|
|
225
|
-
|
|
226
213
|
LOGGER.info("Subscribing to results queue")
|
|
227
214
|
try:
|
|
228
215
|
self._subscribe_to_results()
|
|
@@ -268,55 +255,6 @@ class ZocaloResults(StandardReadable, Triggerable):
|
|
|
268
255
|
"Configured to use GPU results but CPU came first, using CPU results."
|
|
269
256
|
)
|
|
270
257
|
|
|
271
|
-
if self.use_cpu_and_gpu:
|
|
272
|
-
# Wait for results from CPU and GPU, warn and continue if only GPU times out. Error if CPU times out
|
|
273
|
-
if source_of_first_results == ZocaloSource.CPU:
|
|
274
|
-
LOGGER.warning("Received zocalo results from CPU before GPU")
|
|
275
|
-
raw_results_two_sources = [raw_results]
|
|
276
|
-
try:
|
|
277
|
-
raw_results_two_sources.append(
|
|
278
|
-
self._raw_results_received.get(timeout=self.timeout_s / 2)
|
|
279
|
-
)
|
|
280
|
-
source_of_second_results = source_from_results(
|
|
281
|
-
raw_results_two_sources[1]
|
|
282
|
-
)
|
|
283
|
-
first_results = raw_results_two_sources[0]["results"]
|
|
284
|
-
second_results = raw_results_two_sources[1]["results"]
|
|
285
|
-
|
|
286
|
-
if first_results and second_results:
|
|
287
|
-
# Compare results from both sources and warn if they aren't the same
|
|
288
|
-
differences_str = get_dict_differences(
|
|
289
|
-
first_results[0],
|
|
290
|
-
source_of_first_results,
|
|
291
|
-
second_results[0],
|
|
292
|
-
source_of_second_results,
|
|
293
|
-
)
|
|
294
|
-
if differences_str:
|
|
295
|
-
LOGGER.warning(differences_str)
|
|
296
|
-
|
|
297
|
-
# Always use CPU results
|
|
298
|
-
raw_results = (
|
|
299
|
-
raw_results_two_sources[0]
|
|
300
|
-
if source_of_first_results == ZocaloSource.CPU
|
|
301
|
-
else raw_results_two_sources[1]
|
|
302
|
-
)
|
|
303
|
-
|
|
304
|
-
except Empty as err:
|
|
305
|
-
source_of_missing_results = (
|
|
306
|
-
ZocaloSource.CPU.value
|
|
307
|
-
if source_of_first_results == ZocaloSource.GPU.value
|
|
308
|
-
else ZocaloSource.GPU.value
|
|
309
|
-
)
|
|
310
|
-
if source_of_missing_results == ZocaloSource.GPU.value:
|
|
311
|
-
LOGGER.warning(
|
|
312
|
-
f"Zocalo results from {source_of_missing_results} timed out. Using results from {source_of_first_results}"
|
|
313
|
-
)
|
|
314
|
-
else:
|
|
315
|
-
LOGGER.error(
|
|
316
|
-
f"Zocalo results from {source_of_missing_results} timed out and GPU results not yet reliable"
|
|
317
|
-
)
|
|
318
|
-
raise err
|
|
319
|
-
|
|
320
258
|
LOGGER.info(
|
|
321
259
|
f"Zocalo results from {source_from_results(raw_results)} processing: found {len(raw_results['results'])} crystals."
|
|
322
260
|
)
|
|
@@ -350,7 +288,7 @@ class ZocaloResults(StandardReadable, Triggerable):
|
|
|
350
288
|
|
|
351
289
|
results = message.get("results", [])
|
|
352
290
|
|
|
353
|
-
if self.
|
|
291
|
+
if self.use_gpu:
|
|
354
292
|
self._raw_results_received.put(
|
|
355
293
|
{"results": results, "recipe_parameters": recipe_parameters}
|
|
356
294
|
)
|
|
@@ -360,6 +298,8 @@ class ZocaloResults(StandardReadable, Triggerable):
|
|
|
360
298
|
self._raw_results_received.put(
|
|
361
299
|
{"results": results, "recipe_parameters": recipe_parameters}
|
|
362
300
|
)
|
|
301
|
+
else:
|
|
302
|
+
LOGGER.warning("Discarding results as they are from GPU")
|
|
363
303
|
|
|
364
304
|
subscription = workflows.recipe.wrap_subscribe(
|
|
365
305
|
self.transport,
|
|
@@ -393,6 +333,7 @@ def get_full_processing_results(
|
|
|
393
333
|
n_voxels = yield from bps.rd(zocalo.n_voxels, default_value=[])
|
|
394
334
|
total_count = yield from bps.rd(zocalo.total_count, default_value=[])
|
|
395
335
|
bounding_box = yield from bps.rd(zocalo.bounding_box, default_value=[])
|
|
336
|
+
sample_id = yield from bps.rd(zocalo.sample_id, default_value=[])
|
|
396
337
|
return [
|
|
397
338
|
_corrected_xrc_result(
|
|
398
339
|
XrcResult(
|
|
@@ -402,9 +343,17 @@ def get_full_processing_results(
|
|
|
402
343
|
n_voxels=int(n),
|
|
403
344
|
total_count=int(tc),
|
|
404
345
|
bounding_box=bb.tolist(),
|
|
346
|
+
sample_id=int(s_id) if s_id != _NO_SAMPLE_ID else None,
|
|
405
347
|
)
|
|
406
348
|
)
|
|
407
|
-
for com, mv, mc, n, tc, bb in zip(
|
|
408
|
-
com,
|
|
349
|
+
for com, mv, mc, n, tc, bb, s_id in zip(
|
|
350
|
+
com,
|
|
351
|
+
max_voxel,
|
|
352
|
+
max_count,
|
|
353
|
+
n_voxels,
|
|
354
|
+
total_count,
|
|
355
|
+
bounding_box,
|
|
356
|
+
sample_id,
|
|
357
|
+
strict=True,
|
|
409
358
|
)
|
|
410
359
|
]
|
|
@@ -3,8 +3,8 @@ import time
|
|
|
3
3
|
import bluesky.plan_stubs as bps
|
|
4
4
|
from bluesky import preprocessors as bpp
|
|
5
5
|
from bluesky.run_engine import RunEngine
|
|
6
|
-
from ophyd_async.core import DetectorTrigger
|
|
7
|
-
from ophyd_async.fastcs.eiger import EigerDetector
|
|
6
|
+
from ophyd_async.core import DetectorTrigger, TriggerInfo
|
|
7
|
+
from ophyd_async.fastcs.eiger import EigerDetector
|
|
8
8
|
|
|
9
9
|
from dodal.beamlines.i03 import fastcs_eiger
|
|
10
10
|
from dodal.devices.detector import DetectorParams
|
|
@@ -15,7 +15,7 @@ from dodal.log import LOGGER, do_default_logging_setup
|
|
|
15
15
|
def configure_arm_trigger_and_disarm_detector(
|
|
16
16
|
eiger: EigerDetector,
|
|
17
17
|
detector_params: DetectorParams,
|
|
18
|
-
trigger_info:
|
|
18
|
+
trigger_info: TriggerInfo,
|
|
19
19
|
):
|
|
20
20
|
assert detector_params.expected_energy_ev
|
|
21
21
|
start = time.time()
|
|
@@ -132,6 +132,9 @@ def set_mx_settings_pvs(
|
|
|
132
132
|
yield from bps.abs_set(
|
|
133
133
|
eiger.drv.detector.omega_increment, detector_params.omega_increment, group
|
|
134
134
|
)
|
|
135
|
+
yield from bps.abs_set(
|
|
136
|
+
eiger.drv.detector.photon_energy, detector_params.expected_energy_ev, group
|
|
137
|
+
)
|
|
135
138
|
|
|
136
139
|
if wait:
|
|
137
140
|
yield from bps.wait(group)
|
|
@@ -157,9 +160,8 @@ if __name__ == "__main__":
|
|
|
157
160
|
use_roi_mode=False,
|
|
158
161
|
det_dist_to_beam_converter_path="/dls_sw/i03/software/daq_configuration/lookup/DetDistToBeamXYConverter.txt",
|
|
159
162
|
),
|
|
160
|
-
trigger_info=
|
|
163
|
+
trigger_info=TriggerInfo(
|
|
161
164
|
number_of_events=1,
|
|
162
|
-
energy_ev=12800,
|
|
163
165
|
trigger=DetectorTrigger.INTERNAL,
|
|
164
166
|
deadtime=0.0001,
|
|
165
167
|
),
|
dodal/devices/adsim.py
DELETED
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
from ophyd_async.core import StandardReadable
|
|
2
|
-
from ophyd_async.epics.motor import Motor
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
class SimStage(StandardReadable):
|
|
6
|
-
"""Simulated Sample Stage for use with the containerised simulated beamline
|
|
7
|
-
https://github.com/epics-containers/example-services"""
|
|
8
|
-
|
|
9
|
-
def __init__(self, prefix: str, name: str = "sim"):
|
|
10
|
-
with self.add_children_as_readables():
|
|
11
|
-
self.x = Motor(prefix + "M1")
|
|
12
|
-
self.y = Motor(prefix + "M2")
|
|
13
|
-
super().__init__(name=name)
|