dls-dodal 1.64.0__py3-none-any.whl → 1.66.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. {dls_dodal-1.64.0.dist-info → dls_dodal-1.66.0.dist-info}/METADATA +3 -4
  2. {dls_dodal-1.64.0.dist-info → dls_dodal-1.66.0.dist-info}/RECORD +72 -66
  3. dodal/_version.py +2 -2
  4. dodal/beamline_specific_utils/i05_shared.py +6 -3
  5. dodal/beamlines/aithre.py +21 -2
  6. dodal/beamlines/b01_1.py +1 -1
  7. dodal/beamlines/b07.py +6 -3
  8. dodal/beamlines/b07_1.py +6 -3
  9. dodal/beamlines/i03.py +32 -4
  10. dodal/beamlines/i04.py +18 -3
  11. dodal/beamlines/i05.py +30 -3
  12. dodal/beamlines/i05_1.py +2 -2
  13. dodal/beamlines/i06.py +62 -0
  14. dodal/beamlines/i07.py +20 -0
  15. dodal/beamlines/i09.py +3 -3
  16. dodal/beamlines/i09_1.py +12 -1
  17. dodal/beamlines/i09_2.py +6 -3
  18. dodal/beamlines/i10_optics.py +21 -11
  19. dodal/beamlines/i17.py +3 -3
  20. dodal/beamlines/i18.py +3 -3
  21. dodal/beamlines/i19_2.py +22 -0
  22. dodal/beamlines/i21.py +3 -3
  23. dodal/beamlines/i22.py +3 -20
  24. dodal/beamlines/k07.py +6 -3
  25. dodal/beamlines/p38.py +3 -3
  26. dodal/devices/aithre_lasershaping/goniometer.py +26 -9
  27. dodal/devices/aperturescatterguard.py +3 -2
  28. dodal/devices/apple2_undulator.py +89 -44
  29. dodal/devices/areadetector/plugins/mjpg.py +10 -3
  30. dodal/devices/beamsize/__init__.py +0 -0
  31. dodal/devices/beamsize/beamsize.py +6 -0
  32. dodal/devices/cryostream.py +21 -0
  33. dodal/devices/detector/det_resolution.py +4 -2
  34. dodal/devices/fast_grid_scan.py +14 -2
  35. dodal/devices/i03/beamsize.py +35 -0
  36. dodal/devices/i03/constants.py +7 -0
  37. dodal/devices/i03/undulator_dcm.py +2 -2
  38. dodal/devices/i04/beamsize.py +45 -0
  39. dodal/devices/i04/murko_results.py +36 -26
  40. dodal/devices/i04/transfocator.py +23 -29
  41. dodal/devices/i07/id.py +38 -0
  42. dodal/devices/i09_1_shared/__init__.py +6 -2
  43. dodal/devices/i09_1_shared/hard_undulator_functions.py +85 -21
  44. dodal/devices/i10/i10_apple2.py +22 -316
  45. dodal/devices/i17/i17_apple2.py +7 -4
  46. dodal/devices/i22/nxsas.py +5 -24
  47. dodal/devices/ipin.py +20 -2
  48. dodal/devices/motors.py +19 -3
  49. dodal/devices/mx_phase1/beamstop.py +31 -12
  50. dodal/devices/oav/oav_calculations.py +9 -4
  51. dodal/devices/oav/oav_detector.py +65 -7
  52. dodal/devices/oav/oav_parameters.py +3 -1
  53. dodal/devices/oav/oav_to_redis_forwarder.py +18 -15
  54. dodal/devices/oav/pin_image_recognition/__init__.py +5 -1
  55. dodal/devices/oav/pin_image_recognition/utils.py +23 -1
  56. dodal/devices/oav/snapshots/snapshot_with_grid.py +8 -2
  57. dodal/devices/oav/utils.py +16 -6
  58. dodal/devices/pgm.py +1 -1
  59. dodal/devices/robot.py +17 -7
  60. dodal/devices/scintillator.py +40 -14
  61. dodal/devices/smargon.py +2 -3
  62. dodal/devices/thawer.py +7 -45
  63. dodal/devices/undulator.py +178 -66
  64. dodal/devices/util/lookup_tables_apple2.py +390 -0
  65. dodal/plan_stubs/__init__.py +3 -0
  66. dodal/plans/load_panda_yaml.py +9 -0
  67. dodal/plans/verify_undulator_gap.py +2 -2
  68. dodal/testing/fixtures/run_engine.py +79 -7
  69. dodal/beamline_specific_utils/i03.py +0 -17
  70. dodal/testing/__init__.py +0 -3
  71. dodal/testing/setup.py +0 -67
  72. {dls_dodal-1.64.0.dist-info → dls_dodal-1.66.0.dist-info}/WHEEL +0 -0
  73. {dls_dodal-1.64.0.dist-info → dls_dodal-1.66.0.dist-info}/entry_points.txt +0 -0
  74. {dls_dodal-1.64.0.dist-info → dls_dodal-1.66.0.dist-info}/licenses/LICENSE +0 -0
  75. {dls_dodal-1.64.0.dist-info → dls_dodal-1.66.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,390 @@
1
+ """Apple2 lookup table utilities and CSV converter.
2
+
3
+ This module provides helpers to read, validate and convert Apple2 insertion-device
4
+ lookup tables (energy -> gap/phase polynomials) from CSV sources into an
5
+ in-memory dictionary format used by the Apple2 controllers.
6
+
7
+ Data format produced
8
+ The lookup-table dictionary created by convert_csv_to_lookup() follows this
9
+ structure:
10
+
11
+ {
12
+ "POL_MODE": {
13
+ "energies": {
14
+ "<min_energy>": {
15
+ "low": <float>,
16
+ "high": <float>,
17
+ "poly": <numpy.poly1d>
18
+ },
19
+ ...
20
+ },
21
+ "limit": {
22
+ "minimum": <float>,
23
+ "maximum": <float>
24
+ }
25
+ },
26
+ }
27
+
28
+ """
29
+
30
+ import csv
31
+ import io
32
+ from collections.abc import Generator
33
+ from pathlib import Path
34
+
35
+ import numpy as np
36
+ from daq_config_server.client import ConfigServer
37
+ from pydantic import (
38
+ BaseModel,
39
+ ConfigDict,
40
+ Field,
41
+ RootModel,
42
+ field_serializer,
43
+ field_validator,
44
+ )
45
+
46
+ from dodal.devices.apple2_undulator import Pol
47
+ from dodal.log import LOGGER
48
+
49
+ DEFAULT_GAP_FILE = "IDEnergy2GapCalibrations.csv"
50
+ DEFAULT_PHASE_FILE = "IDEnergy2PhaseCalibrations.csv"
51
+
52
+
53
+ DEFAULT_POLY_DEG = [
54
+ "7th-order",
55
+ "6th-order",
56
+ "5th-order",
57
+ "4th-order",
58
+ "3rd-order",
59
+ "2nd-order",
60
+ "1st-order",
61
+ "b",
62
+ ]
63
+
64
+ MODE_NAME_CONVERT = {"CR": "pc", "CL": "nc"}
65
+
66
+
67
+ class LookupTableConfig(BaseModel):
68
+ source: tuple[str, str] | None = None
69
+ mode: str = "Mode"
70
+ min_energy: str = "MinEnergy"
71
+ max_energy: str = "MaxEnergy"
72
+ poly_deg: list[str] = Field(default_factory=lambda: DEFAULT_POLY_DEG)
73
+ mode_name_convert: dict[str, str] = Field(default_factory=lambda: MODE_NAME_CONVERT)
74
+
75
+
76
+ class EnergyMinMax(BaseModel):
77
+ minimum: float
78
+ maximum: float
79
+
80
+
81
+ class EnergyCoverageEntry(BaseModel):
82
+ model_config = ConfigDict(arbitrary_types_allowed=True) # So np.poly1d can be used.
83
+ low: float
84
+ high: float
85
+ poly: np.poly1d
86
+
87
+ @field_validator("poly", mode="before")
88
+ @classmethod
89
+ def validate_and_convert_poly(cls, value):
90
+ """If reading from serialized data, it will be using a list. Convert to np.poly1d"""
91
+ if isinstance(value, list):
92
+ return np.poly1d(value)
93
+ return value
94
+
95
+ @field_serializer("poly", mode="plain")
96
+ def serialize_poly(self, value: np.poly1d) -> list:
97
+ """Allow np.poly1d to work when serializing."""
98
+ return value.coefficients.tolist()
99
+
100
+
101
+ class EnergyCoverage(RootModel[dict[float, EnergyCoverageEntry]]):
102
+ pass
103
+
104
+
105
+ class LookupTableEntries(BaseModel):
106
+ energies: EnergyCoverage
107
+ limit: EnergyMinMax
108
+
109
+
110
+ class LookupTable(RootModel[dict[Pol, LookupTableEntries]]):
111
+ # Allow to auto specify a dict if one not provided
112
+ def __init__(self, root: dict[Pol, LookupTableEntries] | None = None):
113
+ super().__init__(root=root or {})
114
+
115
+
116
+ class GapPhaseLookupTables(BaseModel):
117
+ gap: LookupTable = Field(default_factory=lambda: LookupTable())
118
+ phase: LookupTable = Field(default_factory=lambda: LookupTable())
119
+
120
+
121
+ def convert_csv_to_lookup(
122
+ file_contents: str,
123
+ lut_config: LookupTableConfig,
124
+ skip_line_start_with: str = "#",
125
+ ) -> LookupTable:
126
+ """
127
+ Convert CSV content into the Apple2 lookup-table dictionary.
128
+
129
+ Parameters:
130
+ -----------
131
+ file_contents:
132
+ The CSV file contents as string.
133
+ lut_config:
134
+ The configuration that how to process the file_contents into a LookupTable.
135
+ skip_line_start_with
136
+ Lines beginning with this prefix are skipped (default "#").
137
+
138
+ Returns:
139
+ -----------
140
+ LookupTable
141
+ """
142
+
143
+ def process_row(row: dict, lut: LookupTable):
144
+ """Process a single row from the CSV file and update the lookup table."""
145
+ mode_value = str(row[lut_config.mode]).lower()
146
+ if mode_value in lut_config.mode_name_convert:
147
+ mode_value = lut_config.mode_name_convert[f"{mode_value}"]
148
+ mode_value = Pol(mode_value)
149
+
150
+ # Create polynomial object for energy-to-gap/phase conversion
151
+ coefficients = [float(row[coef]) for coef in lut_config.poly_deg]
152
+ if mode_value not in lut.root:
153
+ lut.root[mode_value] = generate_lookup_table_entry(
154
+ min_energy=float(row[lut_config.min_energy]),
155
+ max_energy=float(row[lut_config.max_energy]),
156
+ poly1d_param=coefficients,
157
+ )
158
+
159
+ else:
160
+ lut.root[mode_value].energies.root[float(row[lut_config.min_energy])] = (
161
+ EnergyCoverageEntry(
162
+ low=float(row[lut_config.min_energy]),
163
+ high=float(row[lut_config.max_energy]),
164
+ poly=np.poly1d(coefficients),
165
+ )
166
+ )
167
+
168
+ # Update energy limits
169
+ lut.root[mode_value].limit.minimum = min(
170
+ lut.root[mode_value].limit.minimum,
171
+ float(row[lut_config.min_energy]),
172
+ )
173
+ lut.root[mode_value].limit.maximum = max(
174
+ lut.root[mode_value].limit.maximum,
175
+ float(row[lut_config.max_energy]),
176
+ )
177
+ return lut
178
+
179
+ reader = csv.DictReader(read_file_and_skip(file_contents, skip_line_start_with))
180
+ lut = LookupTable()
181
+
182
+ for row in reader:
183
+ # If there are multiple source only convert requested.
184
+ if lut_config.source is not None:
185
+ if row[lut_config.source[0]] == lut_config.source[1]:
186
+ process_row(row=row, lut=lut)
187
+ else:
188
+ process_row(row=row, lut=lut)
189
+
190
+ # Check if our LookupTable is empty after processing, raise error if it is.
191
+ if not lut.root:
192
+ raise RuntimeError(
193
+ "LookupTable content is empty, failed to convert the file contents to "
194
+ "a LookupTable!"
195
+ )
196
+ return lut
197
+
198
+
199
+ def read_file_and_skip(file: str, skip_line_start_with: str = "#") -> Generator[str]:
200
+ """Yield non-comment lines from the CSV content string."""
201
+ for line in io.StringIO(file):
202
+ if line.startswith(skip_line_start_with):
203
+ continue
204
+ else:
205
+ yield line
206
+
207
+
208
+ def get_poly(
209
+ energy: float,
210
+ pol: Pol,
211
+ lookup_table: LookupTable,
212
+ ) -> np.poly1d:
213
+ """
214
+ Return the numpy.poly1d polynomial applicable for the given energy and polarisation.
215
+
216
+ Parameters:
217
+ -----------
218
+ energy:
219
+ Energy value in the same units used to create the lookup table (eV).
220
+ pol:
221
+ Polarisation mode (Pol enum).
222
+ lookup_table:
223
+ The converted lookup table dictionary for either 'gap' or 'phase'.
224
+ """
225
+ if (
226
+ energy < lookup_table.root[pol].limit.minimum
227
+ or energy > lookup_table.root[pol].limit.maximum
228
+ ):
229
+ raise ValueError(
230
+ "Demanding energy must lie between"
231
+ + f" {lookup_table.root[pol].limit.minimum}"
232
+ + f" and {lookup_table.root[pol].limit.maximum} eV!"
233
+ )
234
+ else:
235
+ for energy_range in lookup_table.root[pol].energies.root.values():
236
+ if energy >= energy_range.low and energy < energy_range.high:
237
+ return energy_range.poly
238
+
239
+ raise ValueError(
240
+ "Cannot find polynomial coefficients for your requested energy."
241
+ + " There might be gap in the calibration lookup table."
242
+ )
243
+
244
+
245
+ def generate_lookup_table_entry(
246
+ min_energy: float, max_energy: float, poly1d_param: list[float]
247
+ ) -> LookupTableEntries:
248
+ return LookupTableEntries(
249
+ energies=EnergyCoverage(
250
+ {
251
+ min_energy: EnergyCoverageEntry(
252
+ low=min_energy,
253
+ high=max_energy,
254
+ poly=np.poly1d(poly1d_param),
255
+ )
256
+ }
257
+ ),
258
+ limit=EnergyMinMax(
259
+ minimum=float(min_energy),
260
+ maximum=float(max_energy),
261
+ ),
262
+ )
263
+
264
+
265
+ def generate_lookup_table(
266
+ pol: Pol, min_energy: float, max_energy: float, poly1d_param: list[float]
267
+ ) -> LookupTable:
268
+ return LookupTable(
269
+ {pol: generate_lookup_table_entry(min_energy, max_energy, poly1d_param)}
270
+ )
271
+
272
+
273
+ def make_phase_tables(
274
+ pols: list[Pol],
275
+ min_energies: list[float],
276
+ max_energies: list[float],
277
+ poly1d_params: list[list[float]],
278
+ ) -> LookupTable:
279
+ """Generate a dictionary containing multiple lookuptable entries
280
+ for provided polarisations."""
281
+ lookuptable_phase = LookupTable()
282
+ for i in range(len(pols)):
283
+ lookuptable_phase.root[pols[i]] = generate_lookup_table_entry(
284
+ min_energy=min_energies[i],
285
+ max_energy=max_energies[i],
286
+ poly1d_param=poly1d_params[i],
287
+ )
288
+
289
+ return lookuptable_phase
290
+
291
+
292
+ class EnergyMotorLookup:
293
+ """
294
+ Handles lookup tables for Apple2 ID, converting energy and polarisation to gap
295
+ and phase. Fetches and parses lookup tables from a config server, supports dynamic
296
+ updates, and validates input. If custom logic is required for lookup tables, sub
297
+ classes should override the _update_gap_lut and _update_phase_lut methods.
298
+
299
+ After update_lookuptable() has populated the 'gap' and 'phase' tables,
300
+ `get_motor_from_energy()` can be used to compute (gap, phase) for a requested
301
+ (energy, pol) pair.
302
+ """
303
+
304
+ def __init__(
305
+ self,
306
+ config_client: ConfigServer,
307
+ lut_config: LookupTableConfig,
308
+ gap_path: Path,
309
+ phase_path: Path,
310
+ ):
311
+ """Initialise the EnergyMotorLookup class with lookup table headers provided.
312
+
313
+ Parameters:
314
+ -----------
315
+ config_client:
316
+ The config server client to fetch the look up table data.
317
+ lut_config:
318
+ Configuration that defines how to process file contents into a LookupTable
319
+ gap_path:
320
+ File path to the gap lookup table.
321
+ phase_path:
322
+ File path to the phase lookup table.
323
+ """
324
+ self.lookup_tables = GapPhaseLookupTables()
325
+ self.config_client = config_client
326
+ self.lut_config = lut_config
327
+ self.gap_path = gap_path
328
+ self.phase_path = phase_path
329
+ self._available_pol = []
330
+
331
+ @property
332
+ def available_pol(self) -> list[Pol]:
333
+ return self._available_pol
334
+
335
+ @available_pol.setter
336
+ def available_pol(self, value: list[Pol]) -> None:
337
+ self._available_pol = value
338
+
339
+ def _update_gap_lut(self) -> None:
340
+ file_contents = self.config_client.get_file_contents(
341
+ self.gap_path, reset_cached_result=True
342
+ )
343
+ self.lookup_tables.gap = convert_csv_to_lookup(
344
+ file_contents, lut_config=self.lut_config
345
+ )
346
+ self.available_pol = list(self.lookup_tables.gap.root.keys())
347
+
348
+ def _update_phase_lut(self) -> None:
349
+ file_contents = self.config_client.get_file_contents(
350
+ self.phase_path, reset_cached_result=True
351
+ )
352
+ self.lookup_tables.phase = convert_csv_to_lookup(
353
+ file_contents, lut_config=self.lut_config
354
+ )
355
+
356
+ def update_lookuptables(self):
357
+ """
358
+ Update lookup tables from files and validate their format.
359
+ """
360
+ LOGGER.info("Updating lookup table from file for gap.")
361
+ self._update_gap_lut()
362
+ LOGGER.info("Updating lookup table from file for phase.")
363
+ self._update_phase_lut()
364
+
365
+ def get_motor_from_energy(self, energy: float, pol: Pol) -> tuple[float, float]:
366
+ """
367
+ Convert energy and polarisation to gap and phase motor positions.
368
+
369
+ Parameters:
370
+ -----------
371
+ energy : float
372
+ Desired energy in eV.
373
+ pol : Pol
374
+ Polarisation mode.
375
+
376
+ Returns:
377
+ ----------
378
+ tuple[float, float]
379
+ (gap, phase) motor positions.
380
+ """
381
+ if self.available_pol == []:
382
+ self.update_lookuptables()
383
+
384
+ gap_poly = get_poly(lookup_table=self.lookup_tables.gap, energy=energy, pol=pol)
385
+ phase_poly = get_poly(
386
+ lookup_table=self.lookup_tables.phase,
387
+ energy=energy,
388
+ pol=pol,
389
+ )
390
+ return gap_poly(energy), phase_poly(energy)
@@ -0,0 +1,3 @@
1
+ from .wrapped import move, move_relative, set_absolute, set_relative, sleep, wait
2
+
3
+ __all__ = ["move", "move_relative", "set_absolute", "set_relative", "sleep", "wait"]
@@ -0,0 +1,9 @@
1
+ from ophyd_async.core import YamlSettingsProvider
2
+ from ophyd_async.fastcs.panda import HDFPanda
3
+ from ophyd_async.plan_stubs import apply_panda_settings, retrieve_settings
4
+
5
+
6
+ def load_panda_from_yaml(yaml_directory: str, yaml_file_name: str, panda: HDFPanda):
7
+ provider = YamlSettingsProvider(yaml_directory)
8
+ settings = yield from retrieve_settings(provider, yaml_file_name, panda)
9
+ yield from apply_panda_settings(settings)
@@ -3,12 +3,12 @@ from typing import Protocol, runtime_checkable
3
3
  from bluesky import plan_stubs as bps
4
4
 
5
5
  from dodal.devices.common_dcm import DoubleCrystalMonochromatorBase
6
- from dodal.devices.undulator import Undulator
6
+ from dodal.devices.undulator import UndulatorInKeV
7
7
 
8
8
 
9
9
  @runtime_checkable
10
10
  class CheckUndulatorDevices(Protocol):
11
- undulator: Undulator
11
+ undulator: UndulatorInKeV
12
12
  dcm: DoubleCrystalMonochromatorBase
13
13
 
14
14
 
@@ -3,29 +3,79 @@ Allow external repos to reuse these fixtures so defined in single place.
3
3
  """
4
4
 
5
5
  import asyncio
6
+ import os
7
+ import threading
6
8
  import time
7
- from collections.abc import Mapping
9
+ from collections.abc import AsyncGenerator, Mapping
8
10
 
9
11
  import pytest
12
+ import pytest_asyncio
13
+ from _pytest.fixtures import FixtureRequest
10
14
  from bluesky.run_engine import RunEngine
11
15
  from bluesky.simulators import RunEngineSimulator
12
16
 
17
+ _run_engine = RunEngine()
13
18
 
14
- @pytest.fixture(scope="session", autouse=True)
15
- async def _ensure_running_bluesky_event_loop():
16
- run_engine = RunEngine()
19
+ _ENABLE_FILEHANDLE_LEAK_CHECKS = (
20
+ os.getenv("DODAL_ENABLE_FILEHANDLE_LEAK_CHECKS", "").lower() == "true"
21
+ )
22
+
23
+
24
+ @pytest_asyncio.fixture(scope="session", loop_scope="session", autouse=True)
25
+ async def _ensure_running_bluesky_event_loop(_global_run_engine):
17
26
  # make sure the event loop is thoroughly up and running before we try to create
18
27
  # any ophyd_async devices which might need it
19
28
  timeout = time.monotonic() + 1
20
- while not run_engine.loop.is_running():
29
+ while not _global_run_engine.loop.is_running():
21
30
  await asyncio.sleep(0)
22
31
  if time.monotonic() > timeout:
23
32
  raise TimeoutError("This really shouldn't happen but just in case...")
24
33
 
25
34
 
26
35
  @pytest.fixture()
27
- async def run_engine():
28
- yield RunEngine()
36
+ async def run_engine(_global_run_engine: RunEngine) -> AsyncGenerator[RunEngine, None]:
37
+ try:
38
+ yield _global_run_engine
39
+ finally:
40
+ _global_run_engine.reset()
41
+
42
+
43
+ @pytest_asyncio.fixture(scope="session", loop_scope="session")
44
+ async def _global_run_engine() -> AsyncGenerator[RunEngine, None]:
45
+ """
46
+ Obtain a run engine, with its own event loop and thread.
47
+
48
+ On closure of the scope, the run engine is stopped and the event loop closed
49
+ in order to release all resources it consumes.
50
+ """
51
+ run_engine = RunEngine({}, call_returns_result=True)
52
+ yield run_engine
53
+ try:
54
+ run_engine.halt()
55
+ except Exception as e:
56
+ # Ignore exception thrown if the run engine is already halted.
57
+ print(f"Got exception while halting RunEngine {e}")
58
+ finally:
59
+
60
+ async def get_event_loop_thread():
61
+ """Get the thread which the run engine created for the event loop."""
62
+ return threading.current_thread()
63
+
64
+ fut = asyncio.run_coroutine_threadsafe(get_event_loop_thread(), run_engine.loop)
65
+ while not fut.done():
66
+ # It's not clear why this is necessary, given we are
67
+ # on a completely different thread and event loop
68
+ # but without it our future never seems to be populated with a result
69
+ # despite the coro getting executed
70
+ await asyncio.sleep(0)
71
+ # Terminate the event loop so that we can join() the thread
72
+ run_engine.loop.call_soon_threadsafe(run_engine.loop.stop)
73
+ run_engine_thread = fut.result()
74
+ run_engine_thread.join()
75
+ # This closes the filehandle in the event loop.
76
+ # This cannot be called while the event loop is running
77
+ run_engine.loop.close()
78
+ del run_engine
29
79
 
30
80
 
31
81
  @pytest.fixture
@@ -44,3 +94,25 @@ def run_engine_documents(run_engine: RunEngine) -> Mapping[str, list[dict]]:
44
94
 
45
95
  run_engine.subscribe(append_and_print)
46
96
  return docs
97
+
98
+
99
+ @pytest.fixture(autouse=_ENABLE_FILEHANDLE_LEAK_CHECKS)
100
+ def check_for_filehandle_leaks(request: FixtureRequest):
101
+ """
102
+ Test fixture that can be enabled in order to check for leaked filehandles
103
+ (typically caused by a rogue RunEngine instance).
104
+
105
+ Note that this test is not enabled by default due to imposing a significant
106
+ overhead. When a leak is suspected, usually from seeing a
107
+ PytestUnraisableExceptionWarning, enable this via autouse and run the full
108
+ test suite.
109
+ """
110
+ pid = os.getpid()
111
+ _baseline_n_open_files = len(os.listdir(f"/proc/{pid}/fd"))
112
+ try:
113
+ yield
114
+ finally:
115
+ _n_open_files = len(os.listdir(f"/proc/{pid}/fd"))
116
+ assert _n_open_files == _baseline_n_open_files, (
117
+ f"Function {request.function.__name__} leaked some filehandles"
118
+ )
@@ -1,17 +0,0 @@
1
- from dataclasses import dataclass
2
-
3
- I03_BEAM_HEIGHT_UM = 20.0
4
- I03_BEAM_WIDTH_UM = 80.0
5
-
6
-
7
- @dataclass
8
- class BeamSize:
9
- x_um: float | None
10
- y_um: float | None
11
-
12
-
13
- def beam_size_from_aperture(aperture_size: float | None):
14
- return BeamSize(
15
- min(aperture_size, I03_BEAM_WIDTH_UM) if aperture_size else None,
16
- I03_BEAM_HEIGHT_UM if aperture_size else None,
17
- )
dodal/testing/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from .setup import patch_all_motors, patch_motor
2
-
3
- __all__ = ["patch_motor", "patch_all_motors"]
dodal/testing/setup.py DELETED
@@ -1,67 +0,0 @@
1
- from contextlib import ExitStack
2
-
3
- from ophyd_async.core import Device
4
- from ophyd_async.epics.motor import Motor
5
- from ophyd_async.testing import (
6
- callback_on_mock_put,
7
- set_mock_value,
8
- )
9
-
10
-
11
- def patch_motor(
12
- motor: Motor,
13
- initial_position: float = 0,
14
- deadband: float = 0.001,
15
- velocity: float = 3,
16
- max_velocity: float = 5,
17
- low_limit_travel: float = float("-inf"),
18
- high_limit_travel: float = float("inf"),
19
- ):
20
- """
21
- Patch a mock motor with sensible default values so that it can still be used in
22
- tests and plans without running into errors as default values are zero.
23
-
24
- Parameters:
25
- motor: The mock motor to set mock values with.
26
- initial_position: The default initial position of the motor to be set.
27
- deadband: The tolerance between readback value and demand setpoint which the
28
- motor is considered at position.
29
- velocity: Requested move speed when the mock motor moves.
30
- max_velocity: The maximum allowable velocity that can be set for the motor.
31
- low_limit_travel: The lower limit that the motor can move to.
32
- high_limit_travel: The higher limit that the motor can move to.
33
- """
34
- set_mock_value(motor.user_setpoint, initial_position)
35
- set_mock_value(motor.user_readback, initial_position)
36
- set_mock_value(motor.deadband, deadband)
37
- set_mock_value(motor.motor_done_move, 1)
38
- set_mock_value(motor.velocity, velocity)
39
- set_mock_value(motor.max_velocity, max_velocity)
40
- set_mock_value(motor.low_limit_travel, low_limit_travel)
41
- set_mock_value(motor.high_limit_travel, high_limit_travel)
42
- return callback_on_mock_put(
43
- motor.user_setpoint,
44
- lambda pos, *args, **kwargs: set_mock_value(motor.user_readback, pos),
45
- )
46
-
47
-
48
- def patch_all_motors(parent_device: Device):
49
- """
50
- Check all children of a device and patch any motors with mock values.
51
-
52
- Parameters:
53
- parent_device: The device that hold motor(s) as children.
54
- """
55
- motors = []
56
-
57
- def recursively_find_motors(device: Device):
58
- for _, child_device in device.children():
59
- if isinstance(child_device, Motor):
60
- motors.append(child_device)
61
- recursively_find_motors(child_device)
62
-
63
- recursively_find_motors(parent_device)
64
- motor_patch_stack = ExitStack()
65
- for motor in motors:
66
- motor_patch_stack.enter_context(patch_motor(motor))
67
- return motor_patch_stack