dls-dodal 1.33.0__py3-none-any.whl → 1.35.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dls_dodal-1.33.0.dist-info → dls_dodal-1.35.0.dist-info}/METADATA +3 -3
- dls_dodal-1.35.0.dist-info/RECORD +147 -0
- {dls_dodal-1.33.0.dist-info → dls_dodal-1.35.0.dist-info}/WHEEL +1 -1
- dodal/__init__.py +8 -0
- dodal/_version.py +2 -2
- dodal/beamline_specific_utils/i03.py +6 -2
- dodal/beamlines/__init__.py +2 -3
- dodal/beamlines/i03.py +41 -9
- dodal/beamlines/i04.py +26 -4
- dodal/beamlines/i10.py +257 -0
- dodal/beamlines/i22.py +25 -13
- dodal/beamlines/i24.py +11 -11
- dodal/beamlines/p38.py +24 -13
- dodal/common/beamlines/beamline_utils.py +1 -2
- dodal/common/crystal_metadata.py +61 -0
- dodal/common/signal_utils.py +10 -14
- dodal/common/types.py +2 -7
- dodal/devices/CTAB.py +1 -1
- dodal/devices/aperture.py +1 -1
- dodal/devices/aperturescatterguard.py +20 -8
- dodal/devices/apple2_undulator.py +603 -0
- dodal/devices/areadetector/plugins/CAM.py +29 -0
- dodal/devices/areadetector/plugins/MJPG.py +51 -106
- dodal/devices/attenuator.py +1 -1
- dodal/devices/backlight.py +11 -11
- dodal/devices/cryostream.py +3 -5
- dodal/devices/dcm.py +26 -2
- dodal/devices/detector/detector_motion.py +3 -5
- dodal/devices/diamond_filter.py +46 -0
- dodal/devices/eiger.py +6 -2
- dodal/devices/eiger_odin.py +48 -39
- dodal/devices/fast_grid_scan.py +1 -1
- dodal/devices/fluorescence_detector_motion.py +5 -7
- dodal/devices/focusing_mirror.py +26 -19
- dodal/devices/hutch_shutter.py +4 -5
- dodal/devices/i10/i10_apple2.py +399 -0
- dodal/devices/i10/i10_setting_data.py +7 -0
- dodal/devices/i22/dcm.py +50 -83
- dodal/devices/i22/fswitch.py +5 -5
- dodal/devices/i24/aperture.py +3 -5
- dodal/devices/i24/beamstop.py +3 -5
- dodal/devices/i24/dcm.py +1 -1
- dodal/devices/i24/dual_backlight.py +9 -11
- dodal/devices/i24/pmac.py +35 -46
- dodal/devices/i24/vgonio.py +16 -0
- dodal/devices/ipin.py +5 -3
- dodal/devices/linkam3.py +7 -7
- dodal/devices/oav/oav_calculations.py +22 -0
- dodal/devices/oav/oav_detector.py +118 -83
- dodal/devices/oav/oav_parameters.py +50 -104
- dodal/devices/oav/oav_to_redis_forwarder.py +77 -35
- dodal/devices/oav/pin_image_recognition/__init__.py +9 -7
- dodal/devices/oav/{grid_overlay.py → snapshots/grid_overlay.py} +16 -59
- dodal/devices/oav/snapshots/snapshot_with_beam_centre.py +64 -0
- dodal/devices/oav/snapshots/snapshot_with_grid.py +57 -0
- dodal/devices/oav/utils.py +28 -27
- dodal/devices/p99/sample_stage.py +3 -5
- dodal/devices/pgm.py +40 -0
- dodal/devices/qbpm.py +18 -0
- dodal/devices/robot.py +5 -5
- dodal/devices/smargon.py +3 -3
- dodal/devices/synchrotron.py +9 -4
- dodal/devices/tetramm.py +9 -9
- dodal/devices/thawer.py +13 -7
- dodal/devices/undulator.py +7 -6
- dodal/devices/util/adjuster_plans.py +1 -1
- dodal/devices/util/epics_util.py +1 -1
- dodal/devices/util/lookup_tables.py +4 -5
- dodal/devices/watsonmarlow323_pump.py +45 -0
- dodal/devices/webcam.py +9 -2
- dodal/devices/xbpm_feedback.py +3 -5
- dodal/devices/xspress3/xspress3.py +8 -9
- dodal/devices/xspress3/xspress3_channel.py +3 -5
- dodal/devices/zebra.py +12 -8
- dodal/devices/zebra_controlled_shutter.py +5 -6
- dodal/devices/zocalo/__init__.py +2 -2
- dodal/devices/zocalo/zocalo_constants.py +3 -0
- dodal/devices/zocalo/zocalo_interaction.py +2 -1
- dodal/devices/zocalo/zocalo_results.py +105 -89
- dodal/plans/data_session_metadata.py +2 -2
- dodal/plans/motor_util_plans.py +11 -9
- dodal/utils.py +11 -0
- dls_dodal-1.33.0.dist-info/RECORD +0 -136
- dodal/beamlines/i04_1.py +0 -140
- dodal/devices/i24/i24_vgonio.py +0 -17
- dodal/devices/oav/oav_errors.py +0 -35
- {dls_dodal-1.33.0.dist-info → dls_dodal-1.35.0.dist-info}/LICENSE +0 -0
- {dls_dodal-1.33.0.dist-info → dls_dodal-1.35.0.dist-info}/entry_points.txt +0 -0
- {dls_dodal-1.33.0.dist-info → dls_dodal-1.35.0.dist-info}/top_level.txt +0 -0
|
@@ -1,15 +1,10 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import xml.etree.ElementTree as et
|
|
3
3
|
from collections import ChainMap
|
|
4
|
+
from dataclasses import dataclass
|
|
4
5
|
from typing import Any
|
|
5
6
|
from xml.etree.ElementTree import Element
|
|
6
7
|
|
|
7
|
-
from dodal.devices.oav.oav_errors import (
|
|
8
|
-
OAVError_BeamPositionNotFound,
|
|
9
|
-
OAVError_ZoomLevelNotFound,
|
|
10
|
-
)
|
|
11
|
-
from dodal.log import LOGGER
|
|
12
|
-
|
|
13
8
|
# GDA currently assumes this aspect ratio for the OAV window size.
|
|
14
9
|
# For some beamline this doesn't affect anything as the actual OAV aspect ratio
|
|
15
10
|
# matches. Others need to take it into account to rescale the values stored in
|
|
@@ -109,106 +104,57 @@ class OAVParameters:
|
|
|
109
104
|
return self.max_tip_distance / micronsPerPixel
|
|
110
105
|
|
|
111
106
|
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
107
|
+
@dataclass
|
|
108
|
+
class ZoomParams:
|
|
109
|
+
microns_per_pixel: tuple[float, float]
|
|
110
|
+
crosshair: tuple[int, int]
|
|
116
111
|
|
|
117
|
-
def __init__(
|
|
118
|
-
self,
|
|
119
|
-
zoom_params_file,
|
|
120
|
-
display_config,
|
|
121
|
-
):
|
|
122
|
-
self.zoom_params_file: str = zoom_params_file
|
|
123
|
-
self.display_config: str = display_config
|
|
124
|
-
|
|
125
|
-
def update_on_zoom(self, value, xsize, ysize, *args, **kwargs):
|
|
126
|
-
xsize, ysize = int(xsize), int(ysize)
|
|
127
|
-
if isinstance(value, str) and value.endswith("x"):
|
|
128
|
-
value = value.strip("x")
|
|
129
|
-
zoom = float(value)
|
|
130
|
-
self.load_microns_per_pixel(zoom, xsize, ysize)
|
|
131
|
-
self.beam_centre_i, self.beam_centre_j = self.get_beam_position_from_zoom(
|
|
132
|
-
zoom, xsize, ysize
|
|
133
|
-
)
|
|
134
|
-
|
|
135
|
-
def load_microns_per_pixel(self, zoom: float, xsize: int, ysize: int) -> None:
|
|
136
|
-
"""
|
|
137
|
-
Loads the microns per x pixel and y pixel for a given zoom level. These are
|
|
138
|
-
currently generated by GDA, though hyperion could generate them in future.
|
|
139
|
-
"""
|
|
140
|
-
tree = et.parse(self.zoom_params_file)
|
|
141
|
-
self.micronsPerXPixel = self.micronsPerYPixel = None
|
|
142
|
-
root = tree.getroot()
|
|
143
|
-
levels = root.findall(".//zoomLevel")
|
|
144
|
-
for node in levels:
|
|
145
|
-
if _get_element_as_float(node, "level") == zoom:
|
|
146
|
-
self.micronsPerXPixel = (
|
|
147
|
-
_get_element_as_float(node, "micronsPerXPixel")
|
|
148
|
-
* DEFAULT_OAV_WINDOW[0]
|
|
149
|
-
/ xsize
|
|
150
|
-
)
|
|
151
|
-
self.micronsPerYPixel = (
|
|
152
|
-
_get_element_as_float(node, "micronsPerYPixel")
|
|
153
|
-
* DEFAULT_OAV_WINDOW[1]
|
|
154
|
-
/ ysize
|
|
155
|
-
)
|
|
156
|
-
if self.micronsPerXPixel is None or self.micronsPerYPixel is None:
|
|
157
|
-
raise OAVError_ZoomLevelNotFound(
|
|
158
|
-
f"""
|
|
159
|
-
Could not find the micronsPer[X,Y]Pixel parameters in
|
|
160
|
-
{self.zoom_params_file} for zoom level {zoom}.
|
|
161
|
-
"""
|
|
162
|
-
)
|
|
163
112
|
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
Extracts the beam location in pixels `xCentre` `yCentre`, for a requested zoom \
|
|
169
|
-
level. The beam location is manually inputted by the beamline operator on GDA \
|
|
170
|
-
by clicking where on screen a scintillator lights up, and stored in the \
|
|
171
|
-
display.configuration file.
|
|
172
|
-
"""
|
|
173
|
-
crosshair_x_line = None
|
|
174
|
-
crosshair_y_line = None
|
|
175
|
-
with open(self.display_config) as f:
|
|
176
|
-
file_lines = f.readlines()
|
|
177
|
-
for i in range(len(file_lines)):
|
|
178
|
-
if file_lines[i].startswith("zoomLevel = " + str(zoom)):
|
|
179
|
-
crosshair_x_line = file_lines[i + 1]
|
|
180
|
-
crosshair_y_line = file_lines[i + 2]
|
|
181
|
-
break
|
|
182
|
-
|
|
183
|
-
if crosshair_x_line is None or crosshair_y_line is None:
|
|
184
|
-
raise OAVError_BeamPositionNotFound(
|
|
185
|
-
f"Could not extract beam position at zoom level {zoom}"
|
|
186
|
-
)
|
|
113
|
+
class OAVConfig:
|
|
114
|
+
""" Read the OAV config files and return a dictionary of {'zoom_level': ZoomParams}\
|
|
115
|
+
with information about microns per pixels and crosshairs.
|
|
116
|
+
"""
|
|
187
117
|
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
)
|
|
191
|
-
beam_centre_j = (
|
|
192
|
-
int(crosshair_y_line.split(" = ")[1]) * ysize / DEFAULT_OAV_WINDOW[1]
|
|
193
|
-
)
|
|
194
|
-
LOGGER.info(f"Beam centre: {beam_centre_i, beam_centre_j}")
|
|
195
|
-
return int(beam_centre_i), int(beam_centre_j)
|
|
118
|
+
def __init__(self, zoom_params_file: str, display_config_file: str):
|
|
119
|
+
self.zoom_params = self._get_zoom_params(zoom_params_file)
|
|
120
|
+
self.display_config = self._get_display_config(display_config_file)
|
|
196
121
|
|
|
197
|
-
def
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
Calculates the distance between the beam centre and the given (horizontal, vertical).
|
|
202
|
-
|
|
203
|
-
Args:
|
|
204
|
-
horizontal_pixels (int): The x (camera coordinates) value in pixels.
|
|
205
|
-
vertical_pixels (int): The y (camera coordinates) value in pixels.
|
|
206
|
-
Returns:
|
|
207
|
-
The distance between the beam centre and the (horizontal, vertical) point in pixels as a tuple
|
|
208
|
-
(horizontal_distance, vertical_distance).
|
|
209
|
-
"""
|
|
122
|
+
def _get_display_config(self, display_config_file: str):
|
|
123
|
+
with open(display_config_file) as f:
|
|
124
|
+
file_lines = f.readlines()
|
|
125
|
+
return file_lines
|
|
210
126
|
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
)
|
|
127
|
+
def _get_zoom_params(self, zoom_params_file: str):
|
|
128
|
+
tree = et.parse(zoom_params_file)
|
|
129
|
+
root = tree.getroot()
|
|
130
|
+
return root.findall(".//zoomLevel")
|
|
131
|
+
|
|
132
|
+
def _read_zoom_params(self) -> dict:
|
|
133
|
+
um_per_pix = {}
|
|
134
|
+
for node in self.zoom_params:
|
|
135
|
+
zoom = str(_get_element_as_float(node, "level"))
|
|
136
|
+
um_pix_x = _get_element_as_float(node, "micronsPerXPixel")
|
|
137
|
+
um_pix_y = _get_element_as_float(node, "micronsPerYPixel")
|
|
138
|
+
um_per_pix[zoom] = (um_pix_x, um_pix_y)
|
|
139
|
+
return um_per_pix
|
|
140
|
+
|
|
141
|
+
def _read_display_config(self) -> dict:
|
|
142
|
+
crosshairs = {}
|
|
143
|
+
for i in range(len(self.display_config)):
|
|
144
|
+
if self.display_config[i].startswith("zoomLevel"):
|
|
145
|
+
zoom = self.display_config[i].split(" = ")[1].strip()
|
|
146
|
+
x = int(self.display_config[i + 1].split(" = ")[1])
|
|
147
|
+
y = int(self.display_config[i + 2].split(" = ")[1])
|
|
148
|
+
crosshairs[zoom] = (x, y)
|
|
149
|
+
return crosshairs
|
|
150
|
+
|
|
151
|
+
def get_parameters(self) -> dict[str, ZoomParams]:
|
|
152
|
+
config = {}
|
|
153
|
+
um_xy = self._read_zoom_params()
|
|
154
|
+
bc_xy = self._read_display_config()
|
|
155
|
+
for zoom_key in list(bc_xy.keys()):
|
|
156
|
+
config[zoom_key] = ZoomParams(
|
|
157
|
+
microns_per_pixel=um_xy[zoom_key],
|
|
158
|
+
crosshair=bc_xy[zoom_key],
|
|
159
|
+
)
|
|
160
|
+
return config
|
|
@@ -1,21 +1,20 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
-
import io
|
|
3
|
-
import pickle
|
|
4
|
-
import uuid
|
|
5
2
|
from collections.abc import Awaitable, Callable
|
|
6
3
|
from datetime import timedelta
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from uuid import uuid4
|
|
7
6
|
|
|
8
|
-
import numpy as np
|
|
9
7
|
from aiohttp import ClientResponse, ClientSession
|
|
10
8
|
from bluesky.protocols import Flyable, Stoppable
|
|
11
9
|
from ophyd_async.core import (
|
|
12
10
|
AsyncStatus,
|
|
11
|
+
DeviceVector,
|
|
13
12
|
StandardReadable,
|
|
13
|
+
observe_value,
|
|
14
14
|
soft_signal_r_and_setter,
|
|
15
15
|
soft_signal_rw,
|
|
16
16
|
)
|
|
17
|
-
from ophyd_async.epics.
|
|
18
|
-
from PIL import Image
|
|
17
|
+
from ophyd_async.epics.core import epics_signal_r
|
|
19
18
|
from redis.asyncio import StrictRedis
|
|
20
19
|
|
|
21
20
|
from dodal.log import LOGGER
|
|
@@ -30,6 +29,22 @@ async def get_next_jpeg(response: ClientResponse) -> bytes:
|
|
|
30
29
|
return line + await response.content.readuntil(JPEG_STOP_BYTE)
|
|
31
30
|
|
|
32
31
|
|
|
32
|
+
class Source(Enum):
|
|
33
|
+
FULL_SCREEN = 0
|
|
34
|
+
ROI = 1
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class OAVSource(StandardReadable):
|
|
38
|
+
def __init__(
|
|
39
|
+
self,
|
|
40
|
+
prefix: str,
|
|
41
|
+
oav_name: str,
|
|
42
|
+
):
|
|
43
|
+
self.url = epics_signal_r(str, f"{prefix}MJPG_URL_RBV")
|
|
44
|
+
self.oav_name = oav_name
|
|
45
|
+
super().__init__()
|
|
46
|
+
|
|
47
|
+
|
|
33
48
|
class OAVToRedisForwarder(StandardReadable, Flyable, Stoppable):
|
|
34
49
|
"""Forwards OAV image data to redis. To use call:
|
|
35
50
|
|
|
@@ -41,6 +56,9 @@ class OAVToRedisForwarder(StandardReadable, Flyable, Stoppable):
|
|
|
41
56
|
|
|
42
57
|
DATA_EXPIRY_DAYS = 7
|
|
43
58
|
|
|
59
|
+
# This timeout is the maximum time that the forwarder can be streaming for
|
|
60
|
+
TIMEOUT = 30
|
|
61
|
+
|
|
44
62
|
def __init__(
|
|
45
63
|
self,
|
|
46
64
|
prefix: str,
|
|
@@ -59,59 +77,80 @@ class OAVToRedisForwarder(StandardReadable, Flyable, Stoppable):
|
|
|
59
77
|
redis_db: int which redis database to connect to, defaults to 0
|
|
60
78
|
name: str the name of this device
|
|
61
79
|
"""
|
|
62
|
-
self.
|
|
80
|
+
self.counter = epics_signal_r(int, f"{prefix}CAM:ArrayCounter_RBV")
|
|
63
81
|
|
|
64
|
-
|
|
65
|
-
|
|
82
|
+
self.sources = DeviceVector(
|
|
83
|
+
{
|
|
84
|
+
Source.ROI.value: OAVSource(f"{prefix}MJPG:", "roi"),
|
|
85
|
+
Source.FULL_SCREEN.value: OAVSource(f"{prefix}XTAL:", "fullscreen"),
|
|
86
|
+
}
|
|
87
|
+
)
|
|
88
|
+
self.selected_source = soft_signal_rw(int)
|
|
66
89
|
|
|
67
90
|
self.forwarding_task = None
|
|
68
91
|
self.redis_client = StrictRedis(
|
|
69
92
|
host=redis_host, password=redis_password, db=redis_db
|
|
70
93
|
)
|
|
71
94
|
|
|
72
|
-
self._stop_flag =
|
|
95
|
+
self._stop_flag = asyncio.Event()
|
|
73
96
|
|
|
74
97
|
self.sample_id = soft_signal_rw(int, initial_value=0)
|
|
75
98
|
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
99
|
+
with self.add_children_as_readables():
|
|
100
|
+
# The uuid that images are being saved under, this should be monitored for
|
|
101
|
+
# callbacks to correlate the data
|
|
102
|
+
self.uuid, self.uuid_setter = soft_signal_r_and_setter(str)
|
|
79
103
|
|
|
80
104
|
super().__init__(name=name)
|
|
81
105
|
|
|
82
|
-
async def _get_frame_and_put_to_redis(
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
"""
|
|
106
|
+
async def _get_frame_and_put_to_redis(
|
|
107
|
+
self, redis_uuid: str, response: ClientResponse
|
|
108
|
+
):
|
|
109
|
+
"""Stores the raw bytes of the jpeg image in redis. Murko ultimately wants a
|
|
110
|
+
pickled numpy array of pixel values but raw byes are more space efficient. There
|
|
111
|
+
may be better ways of doing this, see https://github.com/DiamondLightSource/mx-bluesky/issues/592"""
|
|
86
112
|
jpeg_bytes = await get_next_jpeg(response)
|
|
87
|
-
self.uuid_setter(
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
await self.redis_client.
|
|
92
|
-
await self.redis_client.expire(sample_id, timedelta(days=self.DATA_EXPIRY_DAYS))
|
|
93
|
-
LOGGER.debug(f"Sent frame to redis key {sample_id} with uuid {image_uuid}")
|
|
113
|
+
self.uuid_setter(redis_uuid)
|
|
114
|
+
sample_id = await self.sample_id.get_value()
|
|
115
|
+
redis_key = f"murko:{sample_id}:raw"
|
|
116
|
+
await self.redis_client.hset(redis_key, redis_uuid, jpeg_bytes) # type: ignore
|
|
117
|
+
await self.redis_client.expire(redis_key, timedelta(days=self.DATA_EXPIRY_DAYS))
|
|
94
118
|
|
|
95
119
|
async def _open_connection_and_do_function(
|
|
96
|
-
self, function_to_do: Callable[[ClientResponse,
|
|
120
|
+
self, function_to_do: Callable[[ClientResponse, OAVSource], Awaitable]
|
|
97
121
|
):
|
|
98
|
-
|
|
122
|
+
source_idx = await self.selected_source.get_value()
|
|
123
|
+
LOGGER.info(
|
|
124
|
+
f"Forwarding data from sample {await self.sample_id.get_value()} and OAV {source_idx}"
|
|
125
|
+
)
|
|
126
|
+
source = self.sources[source_idx]
|
|
127
|
+
stream_url = await source.url.get_value()
|
|
99
128
|
async with ClientSession() as session:
|
|
100
129
|
async with session.get(stream_url) as response:
|
|
101
|
-
await function_to_do(response,
|
|
130
|
+
await function_to_do(response, source)
|
|
131
|
+
|
|
132
|
+
async def _stream_to_redis(self, response: ClientResponse, source: OAVSource):
|
|
133
|
+
"""Uses the update of the frame counter as a trigger to pull an image off the OAV
|
|
134
|
+
and into redis.
|
|
102
135
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
136
|
+
The frame counter is continually increasing on the timescales we store data and
|
|
137
|
+
so can be used as a uuid. If the OAV is updating too quickly we may drop frames
|
|
138
|
+
but in this case a best effort on getting as many frames as possible is sufficient.
|
|
139
|
+
"""
|
|
140
|
+
done_status = AsyncStatus(
|
|
141
|
+
asyncio.wait_for(self._stop_flag.wait(), timeout=self.TIMEOUT)
|
|
142
|
+
)
|
|
143
|
+
async for frame_count in observe_value(self.counter, done_status=done_status):
|
|
144
|
+
redis_uuid = f"{source.oav_name}-{frame_count}-{uuid4()}"
|
|
145
|
+
await self._get_frame_and_put_to_redis(redis_uuid, response)
|
|
107
146
|
|
|
108
|
-
async def _confirm_mjpg_stream(self, response,
|
|
147
|
+
async def _confirm_mjpg_stream(self, response: ClientResponse, source: OAVSource):
|
|
109
148
|
if response.content_type != "multipart/x-mixed-replace":
|
|
110
|
-
raise ValueError(f"{
|
|
149
|
+
raise ValueError(f"{await source.url.get_value()} is not an MJPG stream")
|
|
111
150
|
|
|
112
151
|
@AsyncStatus.wrap
|
|
113
152
|
async def kickoff(self):
|
|
114
|
-
self._stop_flag
|
|
153
|
+
self._stop_flag.clear()
|
|
115
154
|
await self._open_connection_and_do_function(self._confirm_mjpg_stream)
|
|
116
155
|
self.forwarding_task = asyncio.create_task(
|
|
117
156
|
self._open_connection_and_do_function(self._stream_to_redis)
|
|
@@ -125,5 +164,8 @@ class OAVToRedisForwarder(StandardReadable, Flyable, Stoppable):
|
|
|
125
164
|
@AsyncStatus.wrap
|
|
126
165
|
async def stop(self, success=True):
|
|
127
166
|
if self.forwarding_task:
|
|
128
|
-
|
|
167
|
+
LOGGER.info(
|
|
168
|
+
f"Stopping forwarding for source id {await self.selected_source.get_value()}"
|
|
169
|
+
)
|
|
170
|
+
self._stop_flag.set()
|
|
129
171
|
await self.forwarding_task
|
|
@@ -4,14 +4,15 @@ import time
|
|
|
4
4
|
import numpy as np
|
|
5
5
|
from numpy.typing import NDArray
|
|
6
6
|
from ophyd_async.core import (
|
|
7
|
+
Array1D,
|
|
7
8
|
AsyncStatus,
|
|
8
|
-
HintedSignal,
|
|
9
9
|
StandardReadable,
|
|
10
|
+
StandardReadableFormat,
|
|
10
11
|
observe_value,
|
|
11
12
|
soft_signal_r_and_setter,
|
|
12
13
|
soft_signal_rw,
|
|
13
14
|
)
|
|
14
|
-
from ophyd_async.epics.
|
|
15
|
+
from ophyd_async.epics.core import epics_signal_r
|
|
15
16
|
|
|
16
17
|
from dodal.devices.oav.pin_image_recognition.utils import (
|
|
17
18
|
ARRAY_PROCESSING_FUNCTIONS_MAP,
|
|
@@ -22,7 +23,8 @@ from dodal.devices.oav.pin_image_recognition.utils import (
|
|
|
22
23
|
)
|
|
23
24
|
from dodal.log import LOGGER
|
|
24
25
|
|
|
25
|
-
Tip
|
|
26
|
+
# Tip position in x, y pixel coordinates
|
|
27
|
+
Tip = Array1D[np.int32]
|
|
26
28
|
|
|
27
29
|
|
|
28
30
|
class InvalidPinException(Exception):
|
|
@@ -45,7 +47,7 @@ class PinTipDetection(StandardReadable):
|
|
|
45
47
|
no tip is found after this time it will not error but instead return {INVALID_POSITION}.
|
|
46
48
|
"""
|
|
47
49
|
|
|
48
|
-
INVALID_POSITION = (
|
|
50
|
+
INVALID_POSITION = np.array([np.iinfo(np.int32).min, np.iinfo(np.int32).min])
|
|
49
51
|
|
|
50
52
|
def __init__(self, prefix: str, name: str = ""):
|
|
51
53
|
self._prefix: str = prefix
|
|
@@ -84,16 +86,16 @@ class PinTipDetection(StandardReadable):
|
|
|
84
86
|
self.triggered_top_edge,
|
|
85
87
|
self.triggered_bottom_edge,
|
|
86
88
|
],
|
|
87
|
-
|
|
89
|
+
format=StandardReadableFormat.HINTED_SIGNAL,
|
|
88
90
|
)
|
|
89
91
|
|
|
90
92
|
super().__init__(name=name)
|
|
91
93
|
|
|
92
94
|
def _set_triggered_values(self, results: SampleLocation):
|
|
93
|
-
|
|
94
|
-
if tip == self.INVALID_POSITION:
|
|
95
|
+
if results.tip_x is None or results.tip_y is None:
|
|
95
96
|
raise InvalidPinException
|
|
96
97
|
else:
|
|
98
|
+
tip = np.array([results.tip_x, results.tip_y])
|
|
97
99
|
self._tip_setter(tip)
|
|
98
100
|
self._top_edge_setter(results.edge_top)
|
|
99
101
|
self._bottom_edge_setter(results.edge_bottom)
|
|
@@ -1,14 +1,8 @@
|
|
|
1
|
-
# type: ignore # OAV will soon be ophyd-async, see https://github.com/DiamondLightSource/dodal/issues/716
|
|
2
1
|
from enum import Enum
|
|
3
2
|
from functools import partial
|
|
4
|
-
from os.path import join as path_join
|
|
5
3
|
|
|
6
|
-
from ophyd import Component, Signal
|
|
7
4
|
from PIL import Image, ImageDraw
|
|
8
5
|
|
|
9
|
-
from dodal.devices.areadetector.plugins.MJPG import MJPG
|
|
10
|
-
from dodal.log import LOGGER
|
|
11
|
-
|
|
12
6
|
|
|
13
7
|
class Orientation(Enum):
|
|
14
8
|
horizontal = 0
|
|
@@ -20,7 +14,7 @@ def _add_parallel_lines_to_image(
|
|
|
20
14
|
start_x: int,
|
|
21
15
|
start_y: int,
|
|
22
16
|
line_length: int,
|
|
23
|
-
spacing:
|
|
17
|
+
spacing: float,
|
|
24
18
|
num_lines: int,
|
|
25
19
|
orientation=Orientation.horizontal,
|
|
26
20
|
):
|
|
@@ -38,7 +32,7 @@ def _add_parallel_lines_to_image(
|
|
|
38
32
|
start_x (int): The x coordinate (in pixels) of the start of the initial line.
|
|
39
33
|
start_y (int): The y coordinate (in pixels) of the start of the initial line.
|
|
40
34
|
line_length (int): The length of each of the parallel lines in pixels.
|
|
41
|
-
spacing (
|
|
35
|
+
spacing (float): The spacing, in pixels, between each parallel line. Strictly, \
|
|
42
36
|
there are spacing-1 pixels between each line
|
|
43
37
|
num_lines (int): The total number of parallel lines to draw.
|
|
44
38
|
orientation (Orientation): The orientation (horizontal or vertical) of the \
|
|
@@ -46,13 +40,13 @@ def _add_parallel_lines_to_image(
|
|
|
46
40
|
lines = [
|
|
47
41
|
(
|
|
48
42
|
(
|
|
49
|
-
(start_x, start_y + i * spacing),
|
|
50
|
-
(start_x + line_length, start_y + i * spacing),
|
|
43
|
+
(start_x, start_y + int(i * spacing)),
|
|
44
|
+
(start_x + line_length, start_y + int(i * spacing)),
|
|
51
45
|
)
|
|
52
46
|
if orientation == Orientation.horizontal
|
|
53
47
|
else (
|
|
54
|
-
(start_x + i * spacing, start_y),
|
|
55
|
-
(start_x + i * spacing, start_y + line_length),
|
|
48
|
+
(start_x + int(i * spacing), start_y),
|
|
49
|
+
(start_x + int(i * spacing), start_y + line_length),
|
|
56
50
|
)
|
|
57
51
|
)
|
|
58
52
|
for i in range(num_lines)
|
|
@@ -76,7 +70,7 @@ def add_grid_border_overlay_to_image(
|
|
|
76
70
|
image: Image.Image,
|
|
77
71
|
top_left_x: int,
|
|
78
72
|
top_left_y: int,
|
|
79
|
-
box_width:
|
|
73
|
+
box_width: float,
|
|
80
74
|
num_boxes_x: int,
|
|
81
75
|
num_boxes_y: int,
|
|
82
76
|
):
|
|
@@ -84,16 +78,16 @@ def add_grid_border_overlay_to_image(
|
|
|
84
78
|
image,
|
|
85
79
|
start_x=top_left_x,
|
|
86
80
|
start_y=top_left_y,
|
|
87
|
-
line_length=num_boxes_y * box_width,
|
|
88
|
-
spacing=num_boxes_x * box_width,
|
|
81
|
+
line_length=int(num_boxes_y * box_width),
|
|
82
|
+
spacing=int(num_boxes_x * box_width),
|
|
89
83
|
num_lines=2,
|
|
90
84
|
)
|
|
91
85
|
_add_horizontal_parallel_lines_to_image(
|
|
92
86
|
image,
|
|
93
87
|
start_x=top_left_x,
|
|
94
88
|
start_y=top_left_y,
|
|
95
|
-
line_length=num_boxes_x * box_width,
|
|
96
|
-
spacing=num_boxes_y * box_width,
|
|
89
|
+
line_length=int(num_boxes_x * box_width),
|
|
90
|
+
spacing=int(num_boxes_y * box_width),
|
|
97
91
|
num_lines=2,
|
|
98
92
|
)
|
|
99
93
|
|
|
@@ -102,60 +96,23 @@ def add_grid_overlay_to_image(
|
|
|
102
96
|
image: Image.Image,
|
|
103
97
|
top_left_x: int,
|
|
104
98
|
top_left_y: int,
|
|
105
|
-
box_width:
|
|
99
|
+
box_width: float,
|
|
106
100
|
num_boxes_x: int,
|
|
107
101
|
num_boxes_y: int,
|
|
108
102
|
):
|
|
109
103
|
_add_vertical_parallel_lines_to_image(
|
|
110
104
|
image,
|
|
111
|
-
start_x=top_left_x + box_width,
|
|
105
|
+
start_x=int(top_left_x + box_width),
|
|
112
106
|
start_y=top_left_y,
|
|
113
|
-
line_length=num_boxes_y * box_width,
|
|
107
|
+
line_length=int(num_boxes_y * box_width),
|
|
114
108
|
spacing=box_width,
|
|
115
109
|
num_lines=num_boxes_x - 1,
|
|
116
110
|
)
|
|
117
111
|
_add_horizontal_parallel_lines_to_image(
|
|
118
112
|
image,
|
|
119
113
|
start_x=top_left_x,
|
|
120
|
-
start_y=top_left_y + box_width,
|
|
121
|
-
line_length=num_boxes_x * box_width,
|
|
114
|
+
start_y=int(top_left_y + box_width),
|
|
115
|
+
line_length=int(num_boxes_x * box_width),
|
|
122
116
|
spacing=box_width,
|
|
123
117
|
num_lines=num_boxes_y - 1,
|
|
124
118
|
)
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
class SnapshotWithGrid(MJPG):
|
|
128
|
-
top_left_x = Component(Signal)
|
|
129
|
-
top_left_y = Component(Signal)
|
|
130
|
-
box_width = Component(Signal)
|
|
131
|
-
num_boxes_x = Component(Signal)
|
|
132
|
-
num_boxes_y = Component(Signal)
|
|
133
|
-
|
|
134
|
-
last_path_outer = Component(Signal)
|
|
135
|
-
last_path_full_overlay = Component(Signal)
|
|
136
|
-
|
|
137
|
-
def post_processing(self, image: Image.Image):
|
|
138
|
-
# Save an unmodified image with no suffix
|
|
139
|
-
self._save_image(image)
|
|
140
|
-
|
|
141
|
-
top_left_x = self.top_left_x.get()
|
|
142
|
-
top_left_y = self.top_left_y.get()
|
|
143
|
-
box_width = self.box_width.get()
|
|
144
|
-
num_boxes_x = self.num_boxes_x.get()
|
|
145
|
-
num_boxes_y = self.num_boxes_y.get()
|
|
146
|
-
assert isinstance(filename_str := self.filename.get(), str)
|
|
147
|
-
assert isinstance(directory_str := self.directory.get(), str)
|
|
148
|
-
add_grid_border_overlay_to_image(
|
|
149
|
-
image, top_left_x, top_left_y, box_width, num_boxes_x, num_boxes_y
|
|
150
|
-
)
|
|
151
|
-
path = path_join(directory_str, f"{filename_str}_outer_overlay.png")
|
|
152
|
-
self.last_path_outer.put(path)
|
|
153
|
-
LOGGER.info(f"Saving grid outer edge at {path}")
|
|
154
|
-
image.save(path)
|
|
155
|
-
add_grid_overlay_to_image(
|
|
156
|
-
image, top_left_x, top_left_y, box_width, num_boxes_x, num_boxes_y
|
|
157
|
-
)
|
|
158
|
-
path = path_join(directory_str, f"{filename_str}_grid_overlay.png")
|
|
159
|
-
self.last_path_full_overlay.put(path)
|
|
160
|
-
LOGGER.info(f"Saving full grid overlay at {path}")
|
|
161
|
-
image.save(path)
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
from ophyd_async.core import Reference, SignalR
|
|
2
|
+
from PIL import Image, ImageDraw
|
|
3
|
+
|
|
4
|
+
from dodal.devices.areadetector.plugins.MJPG import MJPG
|
|
5
|
+
|
|
6
|
+
CROSSHAIR_LENGTH_PX = 20
|
|
7
|
+
CROSSHAIR_OUTLINE_COLOUR = "Black"
|
|
8
|
+
CROSSHAIR_FILL_COLOUR = "White"
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def draw_crosshair(image: Image.Image, beam_x: int, beam_y: int):
|
|
12
|
+
draw = ImageDraw.Draw(image)
|
|
13
|
+
OUTLINE_WIDTH = 1
|
|
14
|
+
HALF_LEN = CROSSHAIR_LENGTH_PX / 2
|
|
15
|
+
draw.rectangle(
|
|
16
|
+
[
|
|
17
|
+
beam_x - OUTLINE_WIDTH,
|
|
18
|
+
beam_y - HALF_LEN - OUTLINE_WIDTH,
|
|
19
|
+
beam_x + OUTLINE_WIDTH,
|
|
20
|
+
beam_y + HALF_LEN + OUTLINE_WIDTH,
|
|
21
|
+
],
|
|
22
|
+
fill=CROSSHAIR_OUTLINE_COLOUR,
|
|
23
|
+
)
|
|
24
|
+
draw.rectangle(
|
|
25
|
+
[
|
|
26
|
+
beam_x - HALF_LEN - OUTLINE_WIDTH,
|
|
27
|
+
beam_y - OUTLINE_WIDTH,
|
|
28
|
+
beam_x + HALF_LEN + OUTLINE_WIDTH,
|
|
29
|
+
beam_y + OUTLINE_WIDTH,
|
|
30
|
+
],
|
|
31
|
+
fill=CROSSHAIR_OUTLINE_COLOUR,
|
|
32
|
+
)
|
|
33
|
+
draw.line(
|
|
34
|
+
((beam_x, beam_y - HALF_LEN), (beam_x, beam_y + HALF_LEN)),
|
|
35
|
+
fill=CROSSHAIR_FILL_COLOUR,
|
|
36
|
+
)
|
|
37
|
+
draw.line(
|
|
38
|
+
((beam_x - HALF_LEN, beam_y), (beam_x + HALF_LEN, beam_y)),
|
|
39
|
+
fill=CROSSHAIR_FILL_COLOUR,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class SnapshotWithBeamCentre(MJPG):
|
|
44
|
+
"""A child of MJPG which, when triggered, draws an outlined crosshair at the beam
|
|
45
|
+
centre in the image and saves the image to disk."""
|
|
46
|
+
|
|
47
|
+
def __init__(
|
|
48
|
+
self,
|
|
49
|
+
prefix: str,
|
|
50
|
+
beam_x_signal: SignalR,
|
|
51
|
+
beam_y_signal: SignalR,
|
|
52
|
+
name: str = "",
|
|
53
|
+
) -> None:
|
|
54
|
+
with self.add_children_as_readables():
|
|
55
|
+
self._beam_centre_i_ref = Reference(beam_x_signal)
|
|
56
|
+
self._beam_centre_j_ref = Reference(beam_y_signal)
|
|
57
|
+
super().__init__(prefix, name)
|
|
58
|
+
|
|
59
|
+
async def post_processing(self, image: Image.Image):
|
|
60
|
+
beam_x = await self._beam_centre_i_ref().get_value()
|
|
61
|
+
beam_y = await self._beam_centre_j_ref().get_value()
|
|
62
|
+
draw_crosshair(image, beam_x, beam_y)
|
|
63
|
+
|
|
64
|
+
await self._save_image(image)
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
from os.path import join as path_join
|
|
2
|
+
|
|
3
|
+
from ophyd_async.core import soft_signal_rw
|
|
4
|
+
from PIL.Image import Image
|
|
5
|
+
|
|
6
|
+
from dodal.devices.areadetector.plugins.MJPG import IMG_FORMAT, MJPG, asyncio_save_image
|
|
7
|
+
from dodal.devices.oav.snapshots.grid_overlay import (
|
|
8
|
+
add_grid_border_overlay_to_image,
|
|
9
|
+
add_grid_overlay_to_image,
|
|
10
|
+
)
|
|
11
|
+
from dodal.log import LOGGER
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class SnapshotWithGrid(MJPG):
|
|
15
|
+
def __init__(self, prefix: str, name: str = "") -> None:
|
|
16
|
+
with self.add_children_as_readables():
|
|
17
|
+
self.top_left_x = soft_signal_rw(float)
|
|
18
|
+
self.top_left_y = soft_signal_rw(float)
|
|
19
|
+
self.box_width = soft_signal_rw(float)
|
|
20
|
+
self.num_boxes_x = soft_signal_rw(int)
|
|
21
|
+
self.num_boxes_y = soft_signal_rw(int)
|
|
22
|
+
|
|
23
|
+
self.last_path_outer = soft_signal_rw(str)
|
|
24
|
+
self.last_path_full_overlay = soft_signal_rw(str)
|
|
25
|
+
|
|
26
|
+
super().__init__(prefix, name)
|
|
27
|
+
|
|
28
|
+
async def post_processing(self, image: Image):
|
|
29
|
+
# Save an unmodified image with no suffix
|
|
30
|
+
await self._save_image(image)
|
|
31
|
+
|
|
32
|
+
top_left_x = await self.top_left_x.get_value()
|
|
33
|
+
top_left_y = await self.top_left_y.get_value()
|
|
34
|
+
box_width = await self.box_width.get_value()
|
|
35
|
+
num_boxes_x = await self.num_boxes_x.get_value()
|
|
36
|
+
num_boxes_y = await self.num_boxes_y.get_value()
|
|
37
|
+
|
|
38
|
+
assert isinstance(filename_str := await self.filename.get_value(), str)
|
|
39
|
+
assert isinstance(directory_str := await self.directory.get_value(), str)
|
|
40
|
+
|
|
41
|
+
add_grid_border_overlay_to_image(
|
|
42
|
+
image, int(top_left_x), int(top_left_y), box_width, num_boxes_x, num_boxes_y
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
path = path_join(directory_str, f"{filename_str}_outer_overlay.{IMG_FORMAT}")
|
|
46
|
+
await self.last_path_outer.set(path, wait=True)
|
|
47
|
+
LOGGER.info(f"Saving grid outer edge at {path}")
|
|
48
|
+
await asyncio_save_image(image, path)
|
|
49
|
+
|
|
50
|
+
add_grid_overlay_to_image(
|
|
51
|
+
image, int(top_left_x), int(top_left_y), box_width, num_boxes_x, num_boxes_y
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
path = path_join(directory_str, f"{filename_str}_grid_overlay.{IMG_FORMAT}")
|
|
55
|
+
await self.last_path_full_overlay.set(path, wait=True)
|
|
56
|
+
LOGGER.info(f"Saving full grid overlay at {path}")
|
|
57
|
+
await asyncio_save_image(image, path)
|