dls-dodal 1.63.0__py3-none-any.whl → 1.65.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {dls_dodal-1.63.0.dist-info → dls_dodal-1.65.0.dist-info}/METADATA +3 -3
  2. {dls_dodal-1.63.0.dist-info → dls_dodal-1.65.0.dist-info}/RECORD +47 -39
  3. dodal/_version.py +2 -2
  4. dodal/beamline_specific_utils/i05_shared.py +6 -3
  5. dodal/beamlines/b01_1.py +1 -1
  6. dodal/beamlines/b07.py +6 -3
  7. dodal/beamlines/b07_1.py +6 -3
  8. dodal/beamlines/i03.py +9 -1
  9. dodal/beamlines/i05.py +2 -2
  10. dodal/beamlines/i05_1.py +2 -2
  11. dodal/beamlines/i07.py +21 -0
  12. dodal/beamlines/i09.py +4 -4
  13. dodal/beamlines/i09_1.py +7 -1
  14. dodal/beamlines/i09_2.py +36 -3
  15. dodal/beamlines/i10_optics.py +53 -27
  16. dodal/beamlines/i17.py +21 -11
  17. dodal/beamlines/i19_2.py +22 -0
  18. dodal/beamlines/i21.py +34 -4
  19. dodal/beamlines/i22.py +0 -17
  20. dodal/beamlines/k07.py +6 -3
  21. dodal/cli.py +3 -3
  22. dodal/devices/apple2_undulator.py +19 -17
  23. dodal/devices/b07_1/ccmc.py +1 -1
  24. dodal/devices/common_dcm.py +3 -3
  25. dodal/devices/cryostream.py +21 -0
  26. dodal/devices/i03/undulator_dcm.py +1 -1
  27. dodal/devices/i07/__init__.py +0 -0
  28. dodal/devices/i07/dcm.py +33 -0
  29. dodal/devices/i09_1_shared/__init__.py +3 -0
  30. dodal/devices/i09_1_shared/hard_undulator_functions.py +111 -0
  31. dodal/devices/i10/i10_apple2.py +4 -4
  32. dodal/devices/i15/dcm.py +1 -1
  33. dodal/devices/i22/dcm.py +1 -1
  34. dodal/devices/i22/nxsas.py +5 -24
  35. dodal/devices/pgm.py +1 -1
  36. dodal/devices/scintillator.py +4 -0
  37. dodal/devices/undulator.py +29 -1
  38. dodal/devices/util/lookup_tables.py +8 -2
  39. dodal/plan_stubs/__init__.py +3 -0
  40. dodal/plans/verify_undulator_gap.py +2 -2
  41. dodal/testing/fixtures/__init__.py +0 -0
  42. dodal/testing/fixtures/run_engine.py +118 -0
  43. dodal/testing/fixtures/utils.py +57 -0
  44. {dls_dodal-1.63.0.dist-info → dls_dodal-1.65.0.dist-info}/WHEEL +0 -0
  45. {dls_dodal-1.63.0.dist-info → dls_dodal-1.65.0.dist-info}/entry_points.txt +0 -0
  46. {dls_dodal-1.63.0.dist-info → dls_dodal-1.65.0.dist-info}/licenses/LICENSE +0 -0
  47. {dls_dodal-1.63.0.dist-info → dls_dodal-1.65.0.dist-info}/top_level.txt +0 -0
@@ -69,6 +69,10 @@ class Scintillator(StandardReadable):
69
69
  async def _set_selected_position(self, position: InOut) -> None:
70
70
  match position:
71
71
  case InOut.OUT:
72
+ current_y = await self.y_mm.user_readback.get_value()
73
+ current_z = await self.z_mm.user_readback.get_value()
74
+ if self._get_selected_position(current_y, current_z) == InOut.OUT:
75
+ return
72
76
  if (
73
77
  self._aperture_scatterguard().selected_aperture.get_value()
74
78
  != ApertureValue.PARKED
@@ -1,7 +1,7 @@
1
1
  import os
2
2
 
3
3
  import numpy as np
4
- from bluesky.protocols import Movable
4
+ from bluesky.protocols import Locatable, Location, Movable
5
5
  from numpy import ndarray
6
6
  from ophyd_async.core import (
7
7
  AsyncStatus,
@@ -9,6 +9,7 @@ from ophyd_async.core import (
9
9
  StandardReadable,
10
10
  StandardReadableFormat,
11
11
  soft_signal_r_and_setter,
12
+ soft_signal_rw,
12
13
  )
13
14
  from ophyd_async.epics.core import epics_signal_r
14
15
  from ophyd_async.epics.motor import Motor
@@ -157,3 +158,30 @@ class Undulator(StandardReadable, Movable[float]):
157
158
  energy_kev * 1000,
158
159
  energy_to_distance_table,
159
160
  )
161
+
162
+
163
+ class UndulatorOrder(StandardReadable, Locatable[int]):
164
+ """
165
+ Represents the order of an undulator device. Allows setting and locating the order.
166
+ """
167
+
168
+ def __init__(self, name: str = "") -> None:
169
+ """
170
+ Args:
171
+ name: Name for device. Defaults to ""
172
+ """
173
+ with self.add_children_as_readables():
174
+ self._value = soft_signal_rw(int, initial_value=3)
175
+ super().__init__(name=name)
176
+
177
+ @AsyncStatus.wrap
178
+ async def set(self, value: int) -> None:
179
+ if (value >= 0) and isinstance(value, int):
180
+ await self._value.set(value)
181
+ else:
182
+ raise ValueError(
183
+ f"Undulator order must be a positive integer. Requested value: {value}"
184
+ )
185
+
186
+ async def locate(self) -> Location[int]:
187
+ return await self._value.locate()
@@ -13,13 +13,19 @@ from numpy import interp, loadtxt
13
13
  from dodal.log import LOGGER
14
14
 
15
15
 
16
- async def energy_distance_table(lookup_table_path: str) -> np.ndarray:
16
+ async def energy_distance_table(
17
+ lookup_table_path: str,
18
+ comments: Sequence[str] = ["#", "Units"],
19
+ skiprows: int = 0,
20
+ ) -> np.ndarray:
17
21
  """
18
22
  Returns a numpy formatted lookup table for required positions of an ID gap to
19
23
  provide emission at a given beam energy.
20
24
 
21
25
  Args:
22
26
  lookup_table_path: Path to lookup table
27
+ comments: Lines starting with any of these strings will be ignored
28
+ skiprows: Number of rows to skip at the start of the file
23
29
 
24
30
  Returns:
25
31
  ndarray: Lookup table
@@ -29,7 +35,7 @@ async def energy_distance_table(lookup_table_path: str) -> np.ndarray:
29
35
  # decodes the text
30
36
  async with aiofiles.open(lookup_table_path) as stream:
31
37
  raw_table = await stream.read()
32
- return loadtxt(StringIO(raw_table), comments=["#", "Units"])
38
+ return loadtxt(StringIO(raw_table), comments=comments, skiprows=skiprows)
33
39
 
34
40
 
35
41
  def parse_lookup_table(filename: str) -> list[Sequence]:
@@ -0,0 +1,3 @@
1
+ from .wrapped import move, move_relative, set_absolute, set_relative, sleep, wait
2
+
3
+ __all__ = ["move", "move_relative", "set_absolute", "set_relative", "sleep", "wait"]
@@ -15,5 +15,5 @@ class CheckUndulatorDevices(Protocol):
15
15
  def verify_undulator_gap(devices: CheckUndulatorDevices):
16
16
  """Verify Undulator gap is correct - it may not be after a beam dump"""
17
17
 
18
- energy_in_kev = yield from bps.rd(devices.dcm.energy_in_kev.user_readback)
19
- yield from bps.abs_set(devices.undulator, energy_in_kev, wait=True)
18
+ energy_in_keV = yield from bps.rd(devices.dcm.energy_in_keV.user_readback) # noqa: N806
19
+ yield from bps.abs_set(devices.undulator, energy_in_keV, wait=True)
File without changes
@@ -0,0 +1,118 @@
1
+ """
2
+ Allow external repos to reuse these fixtures so defined in single place.
3
+ """
4
+
5
+ import asyncio
6
+ import os
7
+ import threading
8
+ import time
9
+ from collections.abc import AsyncGenerator, Mapping
10
+
11
+ import pytest
12
+ import pytest_asyncio
13
+ from _pytest.fixtures import FixtureRequest
14
+ from bluesky.run_engine import RunEngine
15
+ from bluesky.simulators import RunEngineSimulator
16
+
17
+ _run_engine = RunEngine()
18
+
19
+ _ENABLE_FILEHANDLE_LEAK_CHECKS = (
20
+ os.getenv("DODAL_ENABLE_FILEHANDLE_LEAK_CHECKS", "").lower() == "true"
21
+ )
22
+
23
+
24
+ @pytest_asyncio.fixture(scope="session", loop_scope="session", autouse=True)
25
+ async def _ensure_running_bluesky_event_loop(_global_run_engine):
26
+ # make sure the event loop is thoroughly up and running before we try to create
27
+ # any ophyd_async devices which might need it
28
+ timeout = time.monotonic() + 1
29
+ while not _global_run_engine.loop.is_running():
30
+ await asyncio.sleep(0)
31
+ if time.monotonic() > timeout:
32
+ raise TimeoutError("This really shouldn't happen but just in case...")
33
+
34
+
35
+ @pytest.fixture()
36
+ async def run_engine(_global_run_engine: RunEngine) -> AsyncGenerator[RunEngine, None]:
37
+ try:
38
+ yield _global_run_engine
39
+ finally:
40
+ _global_run_engine.reset()
41
+
42
+
43
+ @pytest_asyncio.fixture(scope="session", loop_scope="session")
44
+ async def _global_run_engine() -> AsyncGenerator[RunEngine, None]:
45
+ """
46
+ Obtain a run engine, with its own event loop and thread.
47
+
48
+ On closure of the scope, the run engine is stopped and the event loop closed
49
+ in order to release all resources it consumes.
50
+ """
51
+ run_engine = RunEngine({}, call_returns_result=True)
52
+ yield run_engine
53
+ try:
54
+ run_engine.halt()
55
+ except Exception as e:
56
+ # Ignore exception thrown if the run engine is already halted.
57
+ print(f"Got exception while halting RunEngine {e}")
58
+ finally:
59
+
60
+ async def get_event_loop_thread():
61
+ """Get the thread which the run engine created for the event loop."""
62
+ return threading.current_thread()
63
+
64
+ fut = asyncio.run_coroutine_threadsafe(get_event_loop_thread(), run_engine.loop)
65
+ while not fut.done():
66
+ # It's not clear why this is necessary, given we are
67
+ # on a completely different thread and event loop
68
+ # but without it our future never seems to be populated with a result
69
+ # despite the coro getting executed
70
+ await asyncio.sleep(0)
71
+ # Terminate the event loop so that we can join() the thread
72
+ run_engine.loop.call_soon_threadsafe(run_engine.loop.stop)
73
+ run_engine_thread = fut.result()
74
+ run_engine_thread.join()
75
+ # This closes the filehandle in the event loop.
76
+ # This cannot be called while the event loop is running
77
+ run_engine.loop.close()
78
+ del run_engine
79
+
80
+
81
+ @pytest.fixture
82
+ def sim_run_engine() -> RunEngineSimulator:
83
+ return RunEngineSimulator()
84
+
85
+
86
+ @pytest.fixture
87
+ def run_engine_documents(run_engine: RunEngine) -> Mapping[str, list[dict]]:
88
+ docs: dict[str, list[dict]] = {}
89
+
90
+ def append_and_print(name, doc):
91
+ if name not in docs:
92
+ docs[name] = []
93
+ docs[name] += [doc]
94
+
95
+ run_engine.subscribe(append_and_print)
96
+ return docs
97
+
98
+
99
+ @pytest.fixture(autouse=_ENABLE_FILEHANDLE_LEAK_CHECKS)
100
+ def check_for_filehandle_leaks(request: FixtureRequest):
101
+ """
102
+ Test fixture that can be enabled in order to check for leaked filehandles
103
+ (typically caused by a rogue RunEngine instance).
104
+
105
+ Note that this test is not enabled by default due to imposing a significant
106
+ overhead. When a leak is suspected, usually from seeing a
107
+ PytestUnraisableExceptionWarning, enable this via autouse and run the full
108
+ test suite.
109
+ """
110
+ pid = os.getpid()
111
+ _baseline_n_open_files = len(os.listdir(f"/proc/{pid}/fd"))
112
+ try:
113
+ yield
114
+ finally:
115
+ _n_open_files = len(os.listdir(f"/proc/{pid}/fd"))
116
+ assert _n_open_files == _baseline_n_open_files, (
117
+ f"Function {request.function.__name__} leaked some filehandles"
118
+ )
@@ -0,0 +1,57 @@
1
+ import asyncio
2
+ import threading
3
+ import time
4
+ from random import random
5
+ from threading import Thread
6
+
7
+ import pytest
8
+
9
+
10
+ @pytest.fixture
11
+ async def event_loop_fuzzing():
12
+ """
13
+ This fixture can be used to try and detect / reproduce intermittent test failures
14
+ caused by race conditions and timing issues, which are often difficult to replicate
15
+ due to caching etc. causing timing to be different on a development machine compared
16
+ to when the test runs in CI.
17
+
18
+ It works by attaching a fuzzer to the current event loop which randomly schedules
19
+ a fixed delay into the event loop thread every few milliseconds. The idea is that
20
+ over a number of iterations, there should be sufficient timing variation introduced
21
+ that the failure can be reproduced.
22
+
23
+ Examples:
24
+ Example usage:
25
+ >>> import pytest
26
+ >>> # repeat the test a number of times
27
+ >>> @pytest.mark.parametrize("i", range(0, 100))
28
+ ... async def my_unreliable_test(i, event_loop_fuzzing):
29
+ ... # Do some stuff in here
30
+ ... ...
31
+ """
32
+ fuzz_probability = 0.05
33
+ fuzz_delay_s = 0.05
34
+ fuzz_period_s = 0.001
35
+ stop_running = threading.Event()
36
+ event_loop = asyncio.get_running_loop()
37
+
38
+ def delay(finished_event: threading.Event):
39
+ time.sleep(fuzz_delay_s) # noqa: TID251
40
+ finished_event.set()
41
+
42
+ def fuzz():
43
+ while not stop_running.is_set():
44
+ if random() < fuzz_probability:
45
+ delay_is_finished = threading.Event()
46
+ event_loop.call_soon_threadsafe(delay, delay_is_finished)
47
+ delay_is_finished.wait()
48
+
49
+ time.sleep(fuzz_period_s) # noqa: TID251
50
+
51
+ fuzzer_thread = Thread(group=None, target=fuzz, name="Event loop fuzzer")
52
+ fuzzer_thread.start()
53
+ try:
54
+ yield None
55
+ finally:
56
+ stop_running.set()
57
+ fuzzer_thread.join()