ophyd-async 0.8.0a6__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. ophyd_async/_version.py +2 -2
  2. ophyd_async/core/__init__.py +15 -46
  3. ophyd_async/core/_detector.py +68 -44
  4. ophyd_async/core/_device.py +120 -79
  5. ophyd_async/core/_device_filler.py +17 -8
  6. ophyd_async/core/_flyer.py +2 -2
  7. ophyd_async/core/_protocol.py +0 -28
  8. ophyd_async/core/_readable.py +30 -23
  9. ophyd_async/core/_settings.py +104 -0
  10. ophyd_async/core/_signal.py +91 -151
  11. ophyd_async/core/_signal_backend.py +4 -1
  12. ophyd_async/core/_soft_signal_backend.py +2 -1
  13. ophyd_async/core/_table.py +18 -10
  14. ophyd_async/core/_utils.py +30 -5
  15. ophyd_async/core/_yaml_settings.py +64 -0
  16. ophyd_async/epics/adandor/__init__.py +9 -0
  17. ophyd_async/epics/adandor/_andor.py +45 -0
  18. ophyd_async/epics/adandor/_andor_controller.py +49 -0
  19. ophyd_async/epics/adandor/_andor_io.py +36 -0
  20. ophyd_async/epics/adaravis/__init__.py +3 -1
  21. ophyd_async/epics/adaravis/_aravis.py +23 -37
  22. ophyd_async/epics/adaravis/_aravis_controller.py +21 -30
  23. ophyd_async/epics/adaravis/_aravis_io.py +4 -4
  24. ophyd_async/epics/adcore/__init__.py +15 -8
  25. ophyd_async/epics/adcore/_core_detector.py +41 -0
  26. ophyd_async/epics/adcore/_core_io.py +56 -31
  27. ophyd_async/epics/adcore/_core_logic.py +99 -86
  28. ophyd_async/epics/adcore/_core_writer.py +219 -0
  29. ophyd_async/epics/adcore/_hdf_writer.py +33 -59
  30. ophyd_async/epics/adcore/_jpeg_writer.py +26 -0
  31. ophyd_async/epics/adcore/_single_trigger.py +5 -4
  32. ophyd_async/epics/adcore/_tiff_writer.py +26 -0
  33. ophyd_async/epics/adcore/_utils.py +37 -36
  34. ophyd_async/epics/adkinetix/_kinetix.py +29 -24
  35. ophyd_async/epics/adkinetix/_kinetix_controller.py +15 -27
  36. ophyd_async/epics/adkinetix/_kinetix_io.py +7 -7
  37. ophyd_async/epics/adpilatus/__init__.py +2 -2
  38. ophyd_async/epics/adpilatus/_pilatus.py +28 -40
  39. ophyd_async/epics/adpilatus/_pilatus_controller.py +47 -25
  40. ophyd_async/epics/adpilatus/_pilatus_io.py +5 -5
  41. ophyd_async/epics/adsimdetector/__init__.py +3 -3
  42. ophyd_async/epics/adsimdetector/_sim.py +33 -17
  43. ophyd_async/epics/advimba/_vimba.py +23 -23
  44. ophyd_async/epics/advimba/_vimba_controller.py +21 -35
  45. ophyd_async/epics/advimba/_vimba_io.py +23 -23
  46. ophyd_async/epics/core/_aioca.py +52 -21
  47. ophyd_async/epics/core/_p4p.py +59 -16
  48. ophyd_async/epics/core/_pvi_connector.py +4 -2
  49. ophyd_async/epics/core/_signal.py +9 -2
  50. ophyd_async/epics/core/_util.py +10 -1
  51. ophyd_async/epics/eiger/_eiger_controller.py +4 -4
  52. ophyd_async/epics/eiger/_eiger_io.py +3 -3
  53. ophyd_async/epics/motor.py +26 -15
  54. ophyd_async/epics/sim/_ioc.py +29 -0
  55. ophyd_async/epics/{demo → sim}/_mover.py +12 -6
  56. ophyd_async/epics/{demo → sim}/_sensor.py +2 -2
  57. ophyd_async/epics/testing/__init__.py +14 -14
  58. ophyd_async/epics/testing/_example_ioc.py +53 -67
  59. ophyd_async/epics/testing/_utils.py +17 -45
  60. ophyd_async/epics/testing/test_records.db +22 -0
  61. ophyd_async/fastcs/core.py +2 -2
  62. ophyd_async/fastcs/panda/__init__.py +0 -2
  63. ophyd_async/fastcs/panda/_block.py +9 -9
  64. ophyd_async/fastcs/panda/_control.py +9 -4
  65. ophyd_async/fastcs/panda/_hdf_panda.py +7 -2
  66. ophyd_async/fastcs/panda/_table.py +4 -1
  67. ophyd_async/fastcs/panda/_trigger.py +7 -7
  68. ophyd_async/plan_stubs/__init__.py +14 -0
  69. ophyd_async/plan_stubs/_ensure_connected.py +11 -17
  70. ophyd_async/plan_stubs/_fly.py +2 -2
  71. ophyd_async/plan_stubs/_nd_attributes.py +7 -5
  72. ophyd_async/plan_stubs/_panda.py +13 -0
  73. ophyd_async/plan_stubs/_settings.py +125 -0
  74. ophyd_async/plan_stubs/_wait_for_awaitable.py +13 -0
  75. ophyd_async/sim/__init__.py +19 -0
  76. ophyd_async/sim/{demo/_pattern_detector → _pattern_detector}/_pattern_detector_controller.py +9 -2
  77. ophyd_async/sim/{demo/_pattern_detector → _pattern_detector}/_pattern_generator.py +13 -6
  78. ophyd_async/sim/{demo/_sim_motor.py → _sim_motor.py} +34 -32
  79. ophyd_async/tango/core/_signal.py +3 -1
  80. ophyd_async/tango/core/_tango_transport.py +13 -15
  81. ophyd_async/tango/{demo → sim}/_mover.py +5 -2
  82. ophyd_async/testing/__init__.py +52 -0
  83. ophyd_async/testing/__pytest_assert_rewrite.py +4 -0
  84. ophyd_async/testing/_assert.py +176 -0
  85. ophyd_async/{core → testing}/_mock_signal_utils.py +15 -11
  86. ophyd_async/testing/_one_of_everything.py +126 -0
  87. ophyd_async/testing/_wait_for_pending.py +22 -0
  88. {ophyd_async-0.8.0a6.dist-info → ophyd_async-0.9.0.dist-info}/METADATA +4 -2
  89. ophyd_async-0.9.0.dist-info/RECORD +129 -0
  90. {ophyd_async-0.8.0a6.dist-info → ophyd_async-0.9.0.dist-info}/WHEEL +1 -1
  91. ophyd_async/core/_device_save_loader.py +0 -274
  92. ophyd_async/epics/adsimdetector/_sim_controller.py +0 -51
  93. ophyd_async/fastcs/panda/_utils.py +0 -16
  94. ophyd_async/sim/demo/__init__.py +0 -19
  95. ophyd_async/sim/testing/__init__.py +0 -0
  96. ophyd_async-0.8.0a6.dist-info/RECORD +0 -116
  97. ophyd_async-0.8.0a6.dist-info/entry_points.txt +0 -2
  98. /ophyd_async/epics/{demo → sim}/__init__.py +0 -0
  99. /ophyd_async/epics/{demo → sim}/mover.db +0 -0
  100. /ophyd_async/epics/{demo → sim}/sensor.db +0 -0
  101. /ophyd_async/sim/{demo/_pattern_detector → _pattern_detector}/__init__.py +0 -0
  102. /ophyd_async/sim/{demo/_pattern_detector → _pattern_detector}/_pattern_detector.py +0 -0
  103. /ophyd_async/sim/{demo/_pattern_detector → _pattern_detector}/_pattern_detector_writer.py +0 -0
  104. /ophyd_async/tango/{demo → sim}/__init__.py +0 -0
  105. /ophyd_async/tango/{demo → sim}/_counter.py +0 -0
  106. /ophyd_async/tango/{demo → sim}/_detector.py +0 -0
  107. /ophyd_async/tango/{demo → sim}/_tango/__init__.py +0 -0
  108. /ophyd_async/tango/{demo → sim}/_tango/_servers.py +0 -0
  109. {ophyd_async-0.8.0a6.dist-info → ophyd_async-0.9.0.dist-info}/LICENSE +0 -0
  110. {ophyd_async-0.8.0a6.dist-info → ophyd_async-0.9.0.dist-info}/top_level.txt +0 -0
@@ -2,7 +2,8 @@ from __future__ import annotations
2
2
 
3
3
  import asyncio
4
4
  import functools
5
- from collections.abc import AsyncGenerator, Awaitable, Callable, Mapping
5
+ import time
6
+ from collections.abc import AsyncGenerator, Awaitable, Callable
6
7
  from typing import Any, Generic, cast
7
8
 
8
9
  from bluesky.protocols import (
@@ -17,7 +18,6 @@ from event_model import DataKey
17
18
  from ._device import Device, DeviceConnector
18
19
  from ._mock_signal_backend import MockSignalBackend
19
20
  from ._protocol import (
20
- AsyncConfigurable,
21
21
  AsyncReadable,
22
22
  AsyncStageable,
23
23
  Reading,
@@ -97,32 +97,37 @@ class Signal(Device, Generic[SignalDatatypeT]):
97
97
 
98
98
 
99
99
  class _SignalCache(Generic[SignalDatatypeT]):
100
- def __init__(self, backend: SignalBackend[SignalDatatypeT], signal: Signal):
101
- self._signal = signal
100
+ def __init__(self, backend: SignalBackend[SignalDatatypeT], signal: Signal) -> None:
101
+ self._signal: Signal[Any] = signal
102
102
  self._staged = False
103
103
  self._listeners: dict[Callback, bool] = {}
104
104
  self._valid = asyncio.Event()
105
105
  self._reading: Reading[SignalDatatypeT] | None = None
106
- self.backend = backend
106
+ self.backend: SignalBackend[SignalDatatypeT] = backend
107
107
  signal.log.debug(f"Making subscription on source {signal.source}")
108
108
  backend.set_callback(self._callback)
109
109
 
110
- def close(self):
110
+ def close(self) -> None:
111
111
  self.backend.set_callback(None)
112
112
  self._signal.log.debug(f"Closing subscription on source {self._signal.source}")
113
113
 
114
+ def _ensure_reading(self) -> Reading[SignalDatatypeT]:
115
+ if not self._reading:
116
+ msg = "Monitor not working"
117
+ raise RuntimeError(msg)
118
+ return self._reading
119
+
114
120
  async def get_reading(self) -> Reading[SignalDatatypeT]:
115
121
  await self._valid.wait()
116
- assert self._reading is not None, "Monitor not working"
117
- return self._reading
122
+ return self._ensure_reading()
118
123
 
119
124
  async def get_value(self) -> SignalDatatypeT:
120
- reading = await self.get_reading()
125
+ reading: Reading[SignalDatatypeT] = await self.get_reading()
121
126
  return reading["value"]
122
127
 
123
- def _callback(self, reading: Reading[SignalDatatypeT]):
128
+ def _callback(self, reading: Reading[SignalDatatypeT]) -> None:
124
129
  self._signal.log.debug(
125
- f"Updated subscription: reading of source {self._signal.source} changed"
130
+ f"Updated subscription: reading of source {self._signal.source} changed "
126
131
  f"from {self._reading} to {reading}"
127
132
  )
128
133
  self._reading = reading
@@ -134,12 +139,10 @@ class _SignalCache(Generic[SignalDatatypeT]):
134
139
  self,
135
140
  function: Callback[dict[str, Reading[SignalDatatypeT]] | SignalDatatypeT],
136
141
  want_value: bool,
137
- ):
138
- assert self._reading, "Monitor not working"
139
- if want_value:
140
- function(self._reading["value"])
141
- else:
142
- function({self._signal.name: self._reading})
142
+ ) -> None:
143
+ function(self._ensure_reading()["value"]) if want_value else function(
144
+ {self._signal.name: self._ensure_reading()}
145
+ )
143
146
 
144
147
  def subscribe(self, function: Callback, want_value: bool) -> None:
145
148
  self._listeners[function] = want_value
@@ -150,7 +153,7 @@ class _SignalCache(Generic[SignalDatatypeT]):
150
153
  self._listeners.pop(function)
151
154
  return self._staged or bool(self._listeners)
152
155
 
153
- def set_staged(self, staged: bool):
156
+ def set_staged(self, staged: bool) -> bool:
154
157
  self._staged = staged
155
158
  return self._staged or bool(self._listeners)
156
159
 
@@ -167,7 +170,10 @@ class SignalR(Signal[SignalDatatypeT], AsyncReadable, AsyncStageable, Subscribab
167
170
  if cached is None:
168
171
  cached = self._cache is not None
169
172
  if cached:
170
- assert self._cache, f"{self.source} not being monitored"
173
+ if not self._cache:
174
+ msg = f"{self.source} not being monitored"
175
+ raise RuntimeError(msg)
176
+ # assert self._cache, f"{self.source} not being monitored"
171
177
  return self._cache
172
178
  else:
173
179
  return self._connector.backend
@@ -301,130 +307,11 @@ def soft_signal_r_and_setter(
301
307
  return (signal, backend.set_value)
302
308
 
303
309
 
304
- def _generate_assert_error_msg(name: str, expected_result, actual_result) -> str:
305
- WARNING = "\033[93m"
306
- FAIL = "\033[91m"
307
- ENDC = "\033[0m"
308
- return (
309
- f"Expected {WARNING}{name}{ENDC} to produce"
310
- + f"\n{FAIL}{expected_result}{ENDC}"
311
- + f"\nbut actually got \n{FAIL}{actual_result}{ENDC}"
312
- )
313
-
314
-
315
- async def assert_value(signal: SignalR[SignalDatatypeT], value: Any) -> None:
316
- """Assert a signal's value and compare it an expected signal.
317
-
318
- Parameters
319
- ----------
320
- signal:
321
- signal with get_value.
322
- value:
323
- The expected value from the signal.
324
-
325
- Notes
326
- -----
327
- Example usage::
328
- await assert_value(signal, value)
329
-
330
- """
331
- actual_value = await signal.get_value()
332
- assert actual_value == value, _generate_assert_error_msg(
333
- name=signal.name,
334
- expected_result=value,
335
- actual_result=actual_value,
336
- )
337
-
338
-
339
- async def assert_reading(
340
- readable: AsyncReadable, expected_reading: Mapping[str, Reading]
341
- ) -> None:
342
- """Assert readings from readable.
343
-
344
- Parameters
345
- ----------
346
- readable:
347
- Callable with readable.read function that generate readings.
348
-
349
- reading:
350
- The expected readings from the readable.
351
-
352
- Notes
353
- -----
354
- Example usage::
355
- await assert_reading(readable, reading)
356
-
357
- """
358
- actual_reading = await readable.read()
359
- assert expected_reading == actual_reading, _generate_assert_error_msg(
360
- name=readable.name,
361
- expected_result=expected_reading,
362
- actual_result=actual_reading,
363
- )
364
-
365
-
366
- async def assert_configuration(
367
- configurable: AsyncConfigurable,
368
- configuration: Mapping[str, Reading],
369
- ) -> None:
370
- """Assert readings from Configurable.
371
-
372
- Parameters
373
- ----------
374
- configurable:
375
- Configurable with Configurable.read function that generate readings.
376
-
377
- configuration:
378
- The expected readings from configurable.
379
-
380
- Notes
381
- -----
382
- Example usage::
383
- await assert_configuration(configurable configuration)
384
-
385
- """
386
- actual_configurable = await configurable.read_configuration()
387
- assert configuration == actual_configurable, _generate_assert_error_msg(
388
- name=configurable.name,
389
- expected_result=configuration,
390
- actual_result=actual_configurable,
391
- )
392
-
393
-
394
- def assert_emitted(docs: Mapping[str, list[dict]], **numbers: int):
395
- """Assert emitted document generated by running a Bluesky plan
396
-
397
- Parameters
398
- ----------
399
- Doc:
400
- A dictionary
401
-
402
- numbers:
403
- expected emission in kwarg from
404
-
405
- Notes
406
- -----
407
- Example usage::
408
- assert_emitted(docs, start=1, descriptor=1,
409
- resource=1, datum=1, event=1, stop=1)
410
- """
411
- assert list(docs) == list(numbers), _generate_assert_error_msg(
412
- name="documents",
413
- expected_result=list(numbers),
414
- actual_result=list(docs),
415
- )
416
- actual_numbers = {name: len(d) for name, d in docs.items()}
417
- assert actual_numbers == numbers, _generate_assert_error_msg(
418
- name="emitted",
419
- expected_result=numbers,
420
- actual_result=actual_numbers,
421
- )
422
-
423
-
424
310
  async def observe_value(
425
311
  signal: SignalR[SignalDatatypeT],
426
312
  timeout: float | None = None,
427
313
  done_status: Status | None = None,
314
+ done_timeout: float | None = None,
428
315
  ) -> AsyncGenerator[SignalDatatypeT, None]:
429
316
  """Subscribe to the value of a signal so it can be iterated from.
430
317
 
@@ -439,9 +326,17 @@ async def observe_value(
439
326
  done_status:
440
327
  If this status is complete, stop observing and make the iterator return.
441
328
  If it raises an exception then this exception will be raised by the iterator.
329
+ done_timeout:
330
+ If given, the maximum time to watch a signal, in seconds. If the loop is still
331
+ being watched after this length, raise asyncio.TimeoutError. This should be used
332
+ instead of on an 'asyncio.wait_for' timeout
442
333
 
443
334
  Notes
444
335
  -----
336
+ Due to a rare condition with busy signals, it is not recommended to use this
337
+ function with asyncio.timeout, including in an 'asyncio.wait_for' loop. Instead,
338
+ this timeout should be given to the done_timeout parameter.
339
+
445
340
  Example usage::
446
341
 
447
342
  async for value in observe_value(sig):
@@ -449,15 +344,26 @@ async def observe_value(
449
344
  """
450
345
 
451
346
  async for _, value in observe_signals_value(
452
- signal, timeout=timeout, done_status=done_status
347
+ signal,
348
+ timeout=timeout,
349
+ done_status=done_status,
350
+ done_timeout=done_timeout,
453
351
  ):
454
352
  yield value
455
353
 
456
354
 
355
+ def _get_iteration_timeout(
356
+ timeout: float | None, overall_deadline: float | None
357
+ ) -> float | None:
358
+ overall_deadline = overall_deadline - time.monotonic() if overall_deadline else None
359
+ return min([x for x in [overall_deadline, timeout] if x is not None], default=None)
360
+
361
+
457
362
  async def observe_signals_value(
458
363
  *signals: SignalR[SignalDatatypeT],
459
364
  timeout: float | None = None,
460
365
  done_status: Status | None = None,
366
+ done_timeout: float | None = None,
461
367
  ) -> AsyncGenerator[tuple[SignalR[SignalDatatypeT], SignalDatatypeT], None]:
462
368
  """Subscribe to the value of a signal so it can be iterated from.
463
369
 
@@ -472,6 +378,10 @@ async def observe_signals_value(
472
378
  done_status:
473
379
  If this status is complete, stop observing and make the iterator return.
474
380
  If it raises an exception then this exception will be raised by the iterator.
381
+ done_timeout:
382
+ If given, the maximum time to watch a signal, in seconds. If the loop is still
383
+ being watched after this length, raise asyncio.TimeoutError. This should be used
384
+ instead of on an 'asyncio.wait_for' timeout
475
385
 
476
386
  Notes
477
387
  -----
@@ -486,12 +396,6 @@ async def observe_signals_value(
486
396
  q: asyncio.Queue[tuple[SignalR[SignalDatatypeT], SignalDatatypeT] | Status] = (
487
397
  asyncio.Queue()
488
398
  )
489
- if timeout is None:
490
- get_value = q.get
491
- else:
492
-
493
- async def get_value():
494
- return await asyncio.wait_for(q.get(), timeout)
495
399
 
496
400
  cbs: dict[SignalR, Callback] = {}
497
401
  for signal in signals:
@@ -504,13 +408,17 @@ async def observe_signals_value(
504
408
 
505
409
  if done_status is not None:
506
410
  done_status.add_callback(q.put_nowait)
507
-
411
+ overall_deadline = time.monotonic() + done_timeout if done_timeout else None
508
412
  try:
509
413
  while True:
510
- # yield here in case something else is filling the queue
511
- # like in test_observe_value_times_out_with_no_external_task()
512
- await asyncio.sleep(0)
513
- item = await get_value()
414
+ if overall_deadline and time.monotonic() >= overall_deadline:
415
+ raise asyncio.TimeoutError(
416
+ f"observe_value was still observing signals "
417
+ f"{[signal.source for signal in signals]} after "
418
+ f"timeout {done_timeout}s"
419
+ )
420
+ iteration_timeout = _get_iteration_timeout(timeout, overall_deadline)
421
+ item = await asyncio.wait_for(q.get(), iteration_timeout)
514
422
  if done_status and item is done_status:
515
423
  if exc := done_status.exception():
516
424
  raise exc
@@ -696,3 +604,35 @@ async def set_and_wait_for_value(
696
604
  status_timeout,
697
605
  wait_for_set_completion,
698
606
  )
607
+
608
+
609
+ def walk_rw_signals(device: Device, path_prefix: str = "") -> dict[str, SignalRW[Any]]:
610
+ """Retrieve all SignalRWs from a device.
611
+
612
+ Stores retrieved signals with their dotted attribute paths in a dictionary. Used as
613
+ part of saving and loading a device.
614
+
615
+ Parameters
616
+ ----------
617
+ device : Device
618
+ Ophyd device to retrieve read-write signals from.
619
+
620
+ path_prefix : str
621
+ For internal use, leave blank when calling the method.
622
+
623
+ Returns
624
+ -------
625
+ SignalRWs : dict
626
+ A dictionary matching the string attribute path of a SignalRW with the
627
+ signal itself.
628
+
629
+ """
630
+ signals: dict[str, SignalRW[Any]] = {}
631
+
632
+ for attr_name, attr in device.children():
633
+ dot_path = f"{path_prefix}{attr_name}"
634
+ if type(attr) is SignalRW:
635
+ signals[dot_path] = attr
636
+ attr_signals = walk_rw_signals(attr, path_prefix=dot_path + ".")
637
+ signals.update(attr_signals)
638
+ return signals
@@ -10,7 +10,10 @@ from ._table import Table
10
10
  from ._utils import Callback, StrictEnum, T
11
11
 
12
12
  DTypeScalar_co = TypeVar("DTypeScalar_co", covariant=True, bound=np.generic)
13
- Array1D = np.ndarray[tuple[int], np.dtype[DTypeScalar_co]]
13
+ # To be a 1D array shape should really be tuple[int], but np.array()
14
+ # currently produces tuple[int, ...] even when it has 1D input args
15
+ # https://github.com/numpy/numpy/issues/28077#issuecomment-2566485178
16
+ Array1D = np.ndarray[tuple[int, ...], np.dtype[DTypeScalar_co]]
14
17
  Primitive = bool | int | float | str
15
18
  # NOTE: if you change this union then update the docs to match
16
19
  SignalDatatype = (
@@ -175,7 +175,8 @@ class SoftSignalBackend(SignalBackend[SignalDatatypeT]):
175
175
  return self.reading["value"]
176
176
 
177
177
  def set_callback(self, callback: Callback[Reading[SignalDatatypeT]] | None) -> None:
178
+ if callback and self.callback:
179
+ raise RuntimeError("Cannot set a callback when one is already set")
178
180
  if callback:
179
- assert not self.callback, "Cannot set a callback when one is already set"
180
181
  callback(self.reading)
181
182
  self.callback = callback
@@ -39,6 +39,11 @@ class Table(BaseModel):
39
39
  # so it is strictly checked against the BaseModel we are supplied.
40
40
  model_config = ConfigDict(extra="allow")
41
41
 
42
+ # Add an init method to match the above model config, otherwise the type
43
+ # checker will not think we can pass arbitrary kwargs into the base class init
44
+ def __init__(self, **kwargs):
45
+ super().__init__(**kwargs)
46
+
42
47
  @classmethod
43
48
  def __init_subclass__(cls):
44
49
  # But forbit extra in subclasses so it gets validated
@@ -78,9 +83,6 @@ class Table(BaseModel):
78
83
  }
79
84
  )
80
85
 
81
- def __eq__(self, value: object) -> bool:
82
- return super().__eq__(value)
83
-
84
86
  def numpy_dtype(self) -> np.dtype:
85
87
  dtype = []
86
88
  for k, v in self:
@@ -99,8 +101,10 @@ class Table(BaseModel):
99
101
  v = v[selection]
100
102
  if array is None:
101
103
  array = np.empty(v.shape, dtype=self.numpy_dtype())
102
- array[k] = v
103
- assert array is not None
104
+ array[k] = v # type: ignore
105
+ if array is None:
106
+ msg = "No arrays found in table"
107
+ raise ValueError(msg)
104
108
  return array
105
109
 
106
110
  @model_validator(mode="before")
@@ -123,10 +127,12 @@ class Table(BaseModel):
123
127
  # Convert to correct dtype, but only if we don't lose precision
124
128
  # as a result
125
129
  cast_value = np.array(data_value).astype(expected_dtype)
126
- assert np.array_equal(data_value, cast_value), (
127
- f"{field_name}: Cannot cast {data_value} to {expected_dtype} "
128
- "without losing precision"
129
- )
130
+ if not np.array_equal(data_value, cast_value):
131
+ msg = (
132
+ f"{field_name}: Cannot cast {data_value} to {expected_dtype} "
133
+ "without losing precision"
134
+ )
135
+ raise ValueError(msg)
130
136
  data_dict[field_name] = cast_value
131
137
  return data_dict
132
138
 
@@ -135,7 +141,9 @@ class Table(BaseModel):
135
141
  lengths: dict[int, set[str]] = {}
136
142
  for field_name, field_value in self:
137
143
  lengths.setdefault(len(field_value), set()).add(field_name)
138
- assert len(lengths) <= 1, f"Columns should be same length, got {lengths=}"
144
+ if len(lengths) > 1:
145
+ msg = f"Columns should be same length, got {lengths=}"
146
+ raise ValueError(msg)
139
147
  return self
140
148
 
141
149
  def __len__(self) -> int:
@@ -16,13 +16,38 @@ Callback = Callable[[T], None]
16
16
  DEFAULT_TIMEOUT = 10.0
17
17
  ErrorText = str | Mapping[str, Exception]
18
18
 
19
+ logger = logging.getLogger("ophyd_async")
19
20
 
20
- class StrictEnum(str, Enum):
21
+
22
+ class StrictEnumMeta(EnumMeta):
23
+ def __new__(metacls, *args, **kwargs):
24
+ ret = super().__new__(metacls, *args, **kwargs)
25
+ lowercase_names = [x.name for x in ret if not x.name.isupper()] # type: ignore
26
+ if lowercase_names:
27
+ raise TypeError(f"Names {lowercase_names} should be uppercase")
28
+ return ret
29
+
30
+
31
+ class StrictEnum(str, Enum, metaclass=StrictEnumMeta):
21
32
  """All members should exist in the Backend, and there will be no extras"""
22
33
 
23
34
 
24
- class SubsetEnumMeta(EnumMeta):
35
+ class SubsetEnumMeta(StrictEnumMeta):
25
36
  def __call__(self, value, *args, **kwargs): # type: ignore
37
+ """
38
+ Returns given value if it is a string and not a member of the enum.
39
+ If the value is not a string or is an enum member, default enum behavior
40
+ is applied. Type checking will complain if provided arbitrary string.
41
+
42
+ Returns:
43
+ Union[str, SubsetEnum]: If the value is a string and not a member of the
44
+ enum, the string is returned as is. Otherwise, the corresponding enum
45
+ member is returned.
46
+
47
+ Raises:
48
+ ValueError: If the value is not a string and cannot be converted to an enum
49
+ member.
50
+ """
26
51
  if isinstance(value, str) and not isinstance(value, self):
27
52
  return value
28
53
  return super().__call__(value, *args, **kwargs)
@@ -85,7 +110,7 @@ class NotConnected(Exception):
85
110
  def format_error_string(self, indent="") -> str:
86
111
  if not isinstance(self._errors, dict) and not isinstance(self._errors, str):
87
112
  raise RuntimeError(
88
- f"Unexpected type `{type(self._errors)}` " "expected `str` or `dict`"
113
+ f"Unexpected type `{type(self._errors)}` expected `str` or `dict`"
89
114
  )
90
115
 
91
116
  if isinstance(self._errors, str):
@@ -105,7 +130,7 @@ class NotConnected(Exception):
105
130
  ) -> NotConnected:
106
131
  for name, exception in exceptions.items():
107
132
  if not isinstance(exception, NotConnected):
108
- logging.exception(
133
+ logger.exception(
109
134
  f"device `{name}` raised unexpected exception "
110
135
  f"{type(exception).__name__}",
111
136
  exc_info=exception,
@@ -180,7 +205,7 @@ def get_enum_cls(datatype: type | None) -> type[StrictEnum] | None:
180
205
  if datatype and issubclass(datatype, Enum):
181
206
  if not issubclass(datatype, StrictEnum):
182
207
  raise TypeError(
183
- f"{datatype} should inherit from .SubsetEnum "
208
+ f"{datatype} should inherit from ophyd_async.core.SubsetEnum "
184
209
  "or ophyd_async.core.StrictEnum"
185
210
  )
186
211
  return datatype
@@ -0,0 +1,64 @@
1
+ import warnings
2
+ from enum import Enum
3
+ from pathlib import Path
4
+ from typing import Any
5
+
6
+ import numpy as np
7
+ import numpy.typing as npt
8
+ import yaml
9
+ from pydantic import BaseModel
10
+
11
+ from ._settings import SettingsProvider
12
+
13
+
14
+ def ndarray_representer(dumper: yaml.Dumper, array: npt.NDArray[Any]) -> yaml.Node:
15
+ return dumper.represent_sequence(
16
+ "tag:yaml.org,2002:seq", array.tolist(), flow_style=True
17
+ )
18
+
19
+
20
+ def pydantic_model_abstraction_representer(
21
+ dumper: yaml.Dumper, model: BaseModel
22
+ ) -> yaml.Node:
23
+ return dumper.represent_data(model.model_dump(mode="python"))
24
+
25
+
26
+ def enum_representer(dumper: yaml.Dumper, enum: Enum) -> yaml.Node:
27
+ return dumper.represent_data(enum.value)
28
+
29
+
30
+ class YamlSettingsProvider(SettingsProvider):
31
+ def __init__(self, directory: Path | str):
32
+ self._directory = Path(directory)
33
+
34
+ def _file_path(self, name: str) -> Path:
35
+ return self._directory / (name + ".yaml")
36
+
37
+ async def store(self, name: str, data: dict[str, Any]):
38
+ yaml.add_representer(np.ndarray, ndarray_representer, Dumper=yaml.Dumper)
39
+ yaml.add_multi_representer(
40
+ BaseModel,
41
+ pydantic_model_abstraction_representer,
42
+ Dumper=yaml.Dumper,
43
+ )
44
+ yaml.add_multi_representer(Enum, enum_representer, Dumper=yaml.Dumper)
45
+ with open(self._file_path(name), "w") as file:
46
+ yaml.dump(data, file)
47
+
48
+ async def retrieve(self, name: str) -> dict[str, Any]:
49
+ with open(self._file_path(name)) as file:
50
+ data = yaml.full_load(file)
51
+ if isinstance(data, list):
52
+ warnings.warn(
53
+ DeprecationWarning(
54
+ "Found old save file. Re-save your yaml settings file "
55
+ f"{self._file_path(name)} using "
56
+ "ophyd_async.plan_stubs.store_settings"
57
+ ),
58
+ stacklevel=2,
59
+ )
60
+ merge = {}
61
+ for d in data:
62
+ merge.update(d)
63
+ return merge
64
+ return data
@@ -0,0 +1,9 @@
1
+ from ._andor import Andor2Detector
2
+ from ._andor_controller import Andor2Controller
3
+ from ._andor_io import Andor2DriverIO
4
+
5
+ __all__ = [
6
+ "Andor2Detector",
7
+ "Andor2Controller",
8
+ "Andor2DriverIO",
9
+ ]
@@ -0,0 +1,45 @@
1
+ from collections.abc import Sequence
2
+
3
+ from ophyd_async.core import PathProvider
4
+ from ophyd_async.core._signal import SignalR
5
+ from ophyd_async.epics import adcore
6
+
7
+ from ._andor_controller import Andor2Controller
8
+ from ._andor_io import Andor2DriverIO
9
+
10
+
11
+ class Andor2Detector(adcore.AreaDetector[Andor2Controller]):
12
+ """
13
+ Andor 2 area detector device (CCD detector 56fps with full chip readout).
14
+ Andor model:DU897_BV.
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ prefix: str,
20
+ path_provider: PathProvider,
21
+ drv_suffix="cam1:",
22
+ writer_cls: type[adcore.ADWriter] = adcore.ADHDFWriter,
23
+ fileio_suffix: str | None = None,
24
+ name: str = "",
25
+ config_sigs: Sequence[SignalR] = (),
26
+ plugins: dict[str, adcore.NDPluginBaseIO] | None = None,
27
+ ):
28
+ driver = Andor2DriverIO(prefix + drv_suffix)
29
+ controller = Andor2Controller(driver)
30
+
31
+ writer = writer_cls.with_io(
32
+ prefix,
33
+ path_provider,
34
+ dataset_source=driver,
35
+ fileio_suffix=fileio_suffix,
36
+ plugins=plugins,
37
+ )
38
+
39
+ super().__init__(
40
+ controller=controller,
41
+ writer=writer,
42
+ plugins=plugins,
43
+ name=name,
44
+ config_sigs=config_sigs,
45
+ )
@@ -0,0 +1,49 @@
1
+ import asyncio
2
+
3
+ from ophyd_async.core import (
4
+ DetectorTrigger,
5
+ TriggerInfo,
6
+ )
7
+ from ophyd_async.epics import adcore
8
+
9
+ from ._andor_io import Andor2DriverIO, Andor2TriggerMode
10
+
11
+ _MIN_DEAD_TIME = 0.1
12
+ _MAX_NUM_IMAGE = 999_999
13
+
14
+
15
+ class Andor2Controller(adcore.ADBaseController[Andor2DriverIO]):
16
+ def __init__(
17
+ self,
18
+ driver: Andor2DriverIO,
19
+ good_states: frozenset[adcore.DetectorState] = adcore.DEFAULT_GOOD_STATES,
20
+ ) -> None:
21
+ super().__init__(driver, good_states=good_states)
22
+
23
+ def get_deadtime(self, exposure: float | None) -> float:
24
+ return _MIN_DEAD_TIME + (exposure or 0)
25
+
26
+ async def prepare(self, trigger_info: TriggerInfo):
27
+ await self.set_exposure_time_and_acquire_period_if_supplied(
28
+ trigger_info.livetime
29
+ )
30
+ await asyncio.gather(
31
+ self.driver.trigger_mode.set(self._get_trigger_mode(trigger_info.trigger)),
32
+ self.driver.num_images.set(
33
+ trigger_info.total_number_of_triggers or _MAX_NUM_IMAGE
34
+ ),
35
+ self.driver.image_mode.set(adcore.ImageMode.MULTIPLE),
36
+ )
37
+
38
+ def _get_trigger_mode(self, trigger: DetectorTrigger) -> Andor2TriggerMode:
39
+ supported_trigger_types = {
40
+ DetectorTrigger.INTERNAL: Andor2TriggerMode.INTERNAL,
41
+ DetectorTrigger.EDGE_TRIGGER: Andor2TriggerMode.EXT_TRIGGER,
42
+ }
43
+ if trigger not in supported_trigger_types:
44
+ raise ValueError(
45
+ f"{self.__class__.__name__} only supports the following trigger "
46
+ f"types: {supported_trigger_types} but was asked to "
47
+ f"use {trigger}"
48
+ )
49
+ return supported_trigger_types[trigger]