ewoksid02 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ewoksid02/__init__.py +0 -0
- ewoksid02/ocl/__init__.py +0 -0
- ewoksid02/resources/__init__.py +8 -0
- ewoksid02/resources/saxs_loop.json +96 -0
- ewoksid02/resources/template_saxs.yaml +37 -0
- ewoksid02/scripts/__init__.py +0 -0
- ewoksid02/scripts/__main__.py +70 -0
- ewoksid02/scripts/parsers.py +224 -0
- ewoksid02/scripts/saxs/__init__.py +0 -0
- ewoksid02/scripts/saxs/main.py +255 -0
- ewoksid02/scripts/saxs/slurm_python_post_script.py +3 -0
- ewoksid02/scripts/saxs/slurm_python_pre_script.py +5 -0
- ewoksid02/scripts/utils.py +21 -0
- ewoksid02/scripts/xpcs/__init__.py +0 -0
- ewoksid02/scripts/xpcs/__main__.py +3 -0
- ewoksid02/tasks/__init__.py +7 -0
- ewoksid02/tasks/averagetask.py +179 -0
- ewoksid02/tasks/azimuthaltask.py +272 -0
- ewoksid02/tasks/cavingtask.py +170 -0
- ewoksid02/tasks/dahuprocessingtask.py +71 -0
- ewoksid02/tasks/end.py +35 -0
- ewoksid02/tasks/id02processingtask.py +2582 -0
- ewoksid02/tasks/looptask.py +672 -0
- ewoksid02/tasks/metadatatask.py +879 -0
- ewoksid02/tasks/normalizationtask.py +204 -0
- ewoksid02/tasks/scalerstask.py +46 -0
- ewoksid02/tasks/secondaryscatteringtask.py +159 -0
- ewoksid02/tasks/sumtask.py +45 -0
- ewoksid02/tests/__init__.py +3 -0
- ewoksid02/tests/conftest.py +639 -0
- ewoksid02/tests/debug.py +64 -0
- ewoksid02/tests/test_2scat_node.py +119 -0
- ewoksid02/tests/test_ave_node.py +106 -0
- ewoksid02/tests/test_azim_node.py +89 -0
- ewoksid02/tests/test_cave_node.py +118 -0
- ewoksid02/tests/test_norm_node.py +190 -0
- ewoksid02/tests/test_saxs.py +69 -0
- ewoksid02/tests/test_sumtask.py +10 -0
- ewoksid02/tests/utils.py +514 -0
- ewoksid02/utils/__init__.py +22 -0
- ewoksid02/utils/average.py +158 -0
- ewoksid02/utils/blissdata.py +1157 -0
- ewoksid02/utils/caving.py +851 -0
- ewoksid02/utils/cupyutils.py +42 -0
- ewoksid02/utils/io.py +722 -0
- ewoksid02/utils/normalization.py +804 -0
- ewoksid02/utils/pyfai.py +424 -0
- ewoksid02/utils/secondaryscattering.py +597 -0
- ewoksid02-0.1.0.dist-info/METADATA +76 -0
- ewoksid02-0.1.0.dist-info/RECORD +54 -0
- ewoksid02-0.1.0.dist-info/WHEEL +5 -0
- ewoksid02-0.1.0.dist-info/entry_points.txt +5 -0
- ewoksid02-0.1.0.dist-info/licenses/LICENSE.md +20 -0
- ewoksid02-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1157 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
import time
|
|
4
|
+
from importlib.metadata import version
|
|
5
|
+
from packaging.version import Version
|
|
6
|
+
from typing import Any, Dict, Optional, Tuple
|
|
7
|
+
import h5py
|
|
8
|
+
import numpy
|
|
9
|
+
import numpy as np
|
|
10
|
+
from blissdata.beacon.data import BeaconData
|
|
11
|
+
from blissdata.h5api import dynamic_hdf5
|
|
12
|
+
|
|
13
|
+
if Version(version("blissdata")) >= Version("2.3.0"):
|
|
14
|
+
from blissdata.exceptions import (
|
|
15
|
+
IndexNoMoreThereError,
|
|
16
|
+
IndexNotYetThereError,
|
|
17
|
+
IndexWontBeThereError,
|
|
18
|
+
)
|
|
19
|
+
else:
|
|
20
|
+
from blissdata.redis_engine.exceptions import (
|
|
21
|
+
IndexNoMoreThereError,
|
|
22
|
+
IndexNotYetThereError,
|
|
23
|
+
IndexWontBeThereError,
|
|
24
|
+
)
|
|
25
|
+
from blissdata.redis_engine.scan import Scan
|
|
26
|
+
from blissdata.redis_engine.store import DataStore
|
|
27
|
+
from silx.io.h5py_utils import open_item as open_item_silx
|
|
28
|
+
|
|
29
|
+
logging.basicConfig(level=logging.INFO)
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
logger.setLevel(logging.INFO)
|
|
32
|
+
|
|
33
|
+
HEADERS_KEY_MONITOR = "HSI1"
|
|
34
|
+
HEADERS_KEY_EXPOSURE_TIME = "HSTime"
|
|
35
|
+
|
|
36
|
+
LIMA_URL_TEMPLATE_ID02 = (
|
|
37
|
+
"{dirname}/{images_prefix}{{file_index}}.h5::/entry_0000/measurement/data"
|
|
38
|
+
)
|
|
39
|
+
IMAGE_PREFIX_TEMPLATE_ID02 = "{collection_name}_{img_acq_device}_{scan_number}_"
|
|
40
|
+
FILE_SCAN_FORMAT_ID02 = "{COLLECTION_DATASET}_{SCAN:05}_{DETECTOR}"
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def get_datastore(beacon_host: str = None) -> DataStore:
|
|
44
|
+
"""Returns the datastore object from blissdata
|
|
45
|
+
|
|
46
|
+
Inputs:
|
|
47
|
+
- beacon_host (str) : hostname and beacon port
|
|
48
|
+
"""
|
|
49
|
+
try:
|
|
50
|
+
os.environ["BEACON_HOST"] = beacon_host
|
|
51
|
+
datastore = DataStore(url=BeaconData().get_redis_data_db())
|
|
52
|
+
return datastore
|
|
53
|
+
except Exception:
|
|
54
|
+
return
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def load_scan(
|
|
58
|
+
scan_memory_url: str, wait_until_start: bool = True, beacon_host: str = None
|
|
59
|
+
) -> Scan:
|
|
60
|
+
"""
|
|
61
|
+
Loads a scan from the data store using the provided scan memory URL.
|
|
62
|
+
|
|
63
|
+
Inputs:
|
|
64
|
+
- scan_memory_url (str): The URL of the scan memory to load.
|
|
65
|
+
- wait_until_start (bool, optional): Whether to wait until the scan starts. Defaults to True.
|
|
66
|
+
- beacon_host (str) : hostname and beacon port
|
|
67
|
+
Outputs:
|
|
68
|
+
- Scan: The loaded scan object.
|
|
69
|
+
"""
|
|
70
|
+
datastore = get_datastore(beacon_host=beacon_host)
|
|
71
|
+
if not datastore:
|
|
72
|
+
return
|
|
73
|
+
if Version(version("blissdata")) >= Version("2.0.0"):
|
|
74
|
+
scan = datastore.load_scan(key=scan_memory_url)
|
|
75
|
+
else:
|
|
76
|
+
scan = datastore.load_scan(key=scan_memory_url, scan_cls=Scan)
|
|
77
|
+
if wait_until_start:
|
|
78
|
+
while scan.state < 2:
|
|
79
|
+
scan.update(block=False)
|
|
80
|
+
return scan
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def get_stream(
|
|
84
|
+
stream_name: str = None,
|
|
85
|
+
detector_name: str = None,
|
|
86
|
+
scan: Scan = None,
|
|
87
|
+
scan_memory_url: str = None,
|
|
88
|
+
beacon_host: str = None,
|
|
89
|
+
):
|
|
90
|
+
"""
|
|
91
|
+
Retrieves the (Lima) stream for a specific detector from the scan memory.
|
|
92
|
+
It can also be another stream (for scalers process for example)
|
|
93
|
+
|
|
94
|
+
Inputs:
|
|
95
|
+
- stream_name (str) : Name of stream to slice
|
|
96
|
+
- detector_name (str): The name of the detector.
|
|
97
|
+
- scan (Scan): blissdata.redis_engine.scan.Scan object
|
|
98
|
+
- scan_memory_url (str): The URL of the scan memory.
|
|
99
|
+
- beacon_host (str) : hostname and beacon port
|
|
100
|
+
Outputs:
|
|
101
|
+
- LimaStream: The Lima stream object for the specified detector.
|
|
102
|
+
"""
|
|
103
|
+
if not stream_name and detector_name:
|
|
104
|
+
stream_name = f"{detector_name}:image"
|
|
105
|
+
|
|
106
|
+
if not scan:
|
|
107
|
+
scan = load_scan(
|
|
108
|
+
scan_memory_url=scan_memory_url,
|
|
109
|
+
wait_until_start=True,
|
|
110
|
+
beacon_host=beacon_host,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
if stream_name.split(":")[-1] == "image":
|
|
114
|
+
if Version(version("blissdata")) >= Version("2.0.0"):
|
|
115
|
+
stream = scan.streams.get(stream_name)
|
|
116
|
+
else:
|
|
117
|
+
from blissdata.stream import LimaStream
|
|
118
|
+
|
|
119
|
+
stream_ref = scan.streams.get(stream_name)
|
|
120
|
+
if stream_ref is None:
|
|
121
|
+
return
|
|
122
|
+
stream = LimaStream(stream=stream_ref)
|
|
123
|
+
else:
|
|
124
|
+
stream = scan.streams.get(stream_name)
|
|
125
|
+
return stream
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def get_lima_url_template_args_id02(
|
|
129
|
+
scan_number: int,
|
|
130
|
+
detector_name: str,
|
|
131
|
+
collection_name: str = None,
|
|
132
|
+
scan_number_format: str = "%05d",
|
|
133
|
+
image_prefix_template: str = IMAGE_PREFIX_TEMPLATE_ID02,
|
|
134
|
+
) -> Optional[Dict[str, str]]:
|
|
135
|
+
|
|
136
|
+
lima_url_template_args = {
|
|
137
|
+
"images_prefix": image_prefix_template.format(
|
|
138
|
+
collection_name=collection_name,
|
|
139
|
+
img_acq_device=detector_name,
|
|
140
|
+
scan_number=scan_number_format % scan_number,
|
|
141
|
+
)
|
|
142
|
+
}
|
|
143
|
+
return lima_url_template_args
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def get_length_dataset_dynamic_file(
|
|
147
|
+
filename_data: str,
|
|
148
|
+
scan_nb: int,
|
|
149
|
+
detector_name: str,
|
|
150
|
+
lima_url_template="",
|
|
151
|
+
lima_url_template_args={},
|
|
152
|
+
subscan=1,
|
|
153
|
+
):
|
|
154
|
+
params_dynamic_file = {
|
|
155
|
+
"file": filename_data,
|
|
156
|
+
"lima_names": [detector_name],
|
|
157
|
+
"lima_url_template": lima_url_template,
|
|
158
|
+
"lima_url_template_args": lima_url_template_args,
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
with dynamic_hdf5.File(**params_dynamic_file) as root:
|
|
162
|
+
lima_dataset = root[f"{scan_nb}.{subscan}/instrument/{detector_name}/data"]
|
|
163
|
+
length_dataset = len(lima_dataset)
|
|
164
|
+
return length_dataset
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def get_length_dataset_static_file(
|
|
168
|
+
filename_data: str,
|
|
169
|
+
data_path: str,
|
|
170
|
+
):
|
|
171
|
+
with h5py.File(filename_data, "r") as f:
|
|
172
|
+
if data_path in f:
|
|
173
|
+
return len(f[data_path])
|
|
174
|
+
else:
|
|
175
|
+
logger.error(f"{data_path} not found in {filename_data}")
|
|
176
|
+
return
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def track_length_dataset_dynamic_file(
|
|
180
|
+
lima_name,
|
|
181
|
+
scan_memory_url,
|
|
182
|
+
beacon_host=None,
|
|
183
|
+
lima_url_template="",
|
|
184
|
+
lima_url_template_args={},
|
|
185
|
+
subscan=1,
|
|
186
|
+
**kwargs,
|
|
187
|
+
):
|
|
188
|
+
scan = load_scan(scan_memory_url=scan_memory_url, beacon_host=beacon_host)
|
|
189
|
+
while scan.state < 2:
|
|
190
|
+
scan.update(block=False)
|
|
191
|
+
|
|
192
|
+
master_filename = scan.info["filename"]
|
|
193
|
+
scan_nb = scan.info["scan_nb"]
|
|
194
|
+
nb_points = scan.info["npoints"]
|
|
195
|
+
|
|
196
|
+
params_dynamic_file = {
|
|
197
|
+
"file": master_filename,
|
|
198
|
+
"lima_names": [lima_name],
|
|
199
|
+
"lima_url_template": lima_url_template,
|
|
200
|
+
"lima_url_template_args": lima_url_template_args,
|
|
201
|
+
**kwargs,
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
wait = True
|
|
205
|
+
while wait:
|
|
206
|
+
with dynamic_hdf5.File(**params_dynamic_file) as root:
|
|
207
|
+
lima_dataset = root[f"{scan_nb}.{subscan}/instrument/{lima_name}/data"]
|
|
208
|
+
|
|
209
|
+
length = len(lima_dataset)
|
|
210
|
+
if length == nb_points:
|
|
211
|
+
wait = False
|
|
212
|
+
elif scan.state == 4:
|
|
213
|
+
length = len(lima_dataset)
|
|
214
|
+
wait = False
|
|
215
|
+
else:
|
|
216
|
+
scan.update(block=False)
|
|
217
|
+
time.sleep(1)
|
|
218
|
+
yield length
|
|
219
|
+
|
|
220
|
+
final_length = get_length_dataset_dynamic_file(
|
|
221
|
+
detector_name=lima_name,
|
|
222
|
+
scan_memory_url=scan_memory_url,
|
|
223
|
+
lima_url_template=lima_url_template,
|
|
224
|
+
lima_url_template_args=lima_url_template_args,
|
|
225
|
+
subscan=subscan,
|
|
226
|
+
**kwargs,
|
|
227
|
+
)
|
|
228
|
+
if final_length != length:
|
|
229
|
+
return final_length
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def get_length_lima_stream(
|
|
233
|
+
detector_name,
|
|
234
|
+
scan: Scan = None,
|
|
235
|
+
scan_memory_url: str = None,
|
|
236
|
+
beacon_host: str = None,
|
|
237
|
+
lima_url_template="",
|
|
238
|
+
lima_url_template_args={},
|
|
239
|
+
subscan=1,
|
|
240
|
+
):
|
|
241
|
+
limastream = get_stream(
|
|
242
|
+
scan=scan,
|
|
243
|
+
scan_memory_url=scan_memory_url,
|
|
244
|
+
detector_name=detector_name,
|
|
245
|
+
beacon_host=beacon_host,
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
# To overcome Lima bug with memory when mask processing is active
|
|
249
|
+
try:
|
|
250
|
+
# _ = limastream[0]
|
|
251
|
+
# Check the len of a limastream does not crash regarding the Lima Bug
|
|
252
|
+
last_index_available = len(limastream)
|
|
253
|
+
except Exception:
|
|
254
|
+
logger.warning("Data is no more available from Lima memory")
|
|
255
|
+
if not scan:
|
|
256
|
+
scan = load_scan(scan_memory_url=scan_memory_url, beacon_host=beacon_host)
|
|
257
|
+
last_index_available = get_length_dataset_dynamic_file(
|
|
258
|
+
filename_data=scan.info["filename"],
|
|
259
|
+
scan_nb=scan.info["scan_nb"],
|
|
260
|
+
detector_name=detector_name,
|
|
261
|
+
lima_url_template=lima_url_template,
|
|
262
|
+
lima_url_template_args=lima_url_template_args,
|
|
263
|
+
subscan=subscan,
|
|
264
|
+
)
|
|
265
|
+
return last_index_available
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
def track_length_dataset(
|
|
269
|
+
lima_name,
|
|
270
|
+
scan_memory_url,
|
|
271
|
+
beacon_host=None,
|
|
272
|
+
lima_url_template="",
|
|
273
|
+
lima_url_template_args={},
|
|
274
|
+
subscan=1,
|
|
275
|
+
**kwargs,
|
|
276
|
+
):
|
|
277
|
+
scan = load_scan(scan_memory_url=scan_memory_url, beacon_host=beacon_host)
|
|
278
|
+
nb_points = scan.info["npoints"]
|
|
279
|
+
|
|
280
|
+
limastream_params = {
|
|
281
|
+
"scan_memory_url": scan_memory_url,
|
|
282
|
+
"detector_name": lima_name,
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
wait = True
|
|
286
|
+
memory_available = True
|
|
287
|
+
last_index_available = 0
|
|
288
|
+
|
|
289
|
+
while wait:
|
|
290
|
+
if memory_available:
|
|
291
|
+
limastream = get_stream(**limastream_params)
|
|
292
|
+
try:
|
|
293
|
+
_ = limastream[0]
|
|
294
|
+
last_index_available = len(limastream)
|
|
295
|
+
logger.info("Data is available from Lima memory")
|
|
296
|
+
except IndexNotYetThereError:
|
|
297
|
+
pass
|
|
298
|
+
except IndexNoMoreThereError:
|
|
299
|
+
logger.warning("Data is no more available from Lima memory")
|
|
300
|
+
memory_available = False
|
|
301
|
+
except RuntimeError:
|
|
302
|
+
pass
|
|
303
|
+
|
|
304
|
+
if last_index_available == nb_points:
|
|
305
|
+
wait = False
|
|
306
|
+
elif scan.state == 4:
|
|
307
|
+
last_index_available = len(limastream)
|
|
308
|
+
wait = False
|
|
309
|
+
else:
|
|
310
|
+
scan.update(block=False)
|
|
311
|
+
time.sleep(1)
|
|
312
|
+
else:
|
|
313
|
+
logger.info("Data is only available in the files")
|
|
314
|
+
params_dynamic_file = {
|
|
315
|
+
"file": scan.info["filename"],
|
|
316
|
+
"lima_names": [lima_name],
|
|
317
|
+
"lima_url_template": lima_url_template,
|
|
318
|
+
"lima_url_template_args": lima_url_template_args,
|
|
319
|
+
**kwargs,
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
with dynamic_hdf5.File(**params_dynamic_file) as root:
|
|
323
|
+
scan_nb = scan.info["scan_nb"]
|
|
324
|
+
lima_dataset = root[f"{scan_nb}.{subscan}/instrument/{lima_name}/data"]
|
|
325
|
+
|
|
326
|
+
last_index_available = len(lima_dataset)
|
|
327
|
+
if last_index_available == nb_points:
|
|
328
|
+
wait = False
|
|
329
|
+
elif scan.state == 4:
|
|
330
|
+
last_index_available = len(lima_dataset)
|
|
331
|
+
wait = False
|
|
332
|
+
else:
|
|
333
|
+
scan.update(block=False)
|
|
334
|
+
time.sleep(1)
|
|
335
|
+
|
|
336
|
+
yield last_index_available
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def track_dataset(
|
|
340
|
+
lima_name,
|
|
341
|
+
scan_memory_url,
|
|
342
|
+
beacon_host=None,
|
|
343
|
+
lima_url_template="",
|
|
344
|
+
lima_url_template_args={},
|
|
345
|
+
subscan=1,
|
|
346
|
+
max_slice_size=10,
|
|
347
|
+
start_from_memory=True,
|
|
348
|
+
**kwargs,
|
|
349
|
+
):
|
|
350
|
+
scan = load_scan(scan_memory_url=scan_memory_url, beacon_host=beacon_host)
|
|
351
|
+
nb_points = scan.info["npoints"]
|
|
352
|
+
|
|
353
|
+
limastream_params = {
|
|
354
|
+
"scan_memory_url": scan_memory_url,
|
|
355
|
+
"detector_name": lima_name,
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
wait = True
|
|
359
|
+
memory_available = start_from_memory
|
|
360
|
+
last_index_read = 0
|
|
361
|
+
while wait:
|
|
362
|
+
dataset = None
|
|
363
|
+
|
|
364
|
+
if memory_available:
|
|
365
|
+
try:
|
|
366
|
+
scan.update(block=False)
|
|
367
|
+
limastream = get_stream(**limastream_params)
|
|
368
|
+
except Exception:
|
|
369
|
+
wait = False
|
|
370
|
+
continue
|
|
371
|
+
|
|
372
|
+
try:
|
|
373
|
+
_ = limastream[0]
|
|
374
|
+
last_index_available = len(limastream)
|
|
375
|
+
slice_end = min(last_index_read + max_slice_size, last_index_available)
|
|
376
|
+
dataset = limastream[last_index_read:slice_end]
|
|
377
|
+
if dataset and len(dataset) == 0:
|
|
378
|
+
continue
|
|
379
|
+
|
|
380
|
+
logger.info(
|
|
381
|
+
f"Data retrieved from Lima memory: {slice_end - last_index_read} frames:"
|
|
382
|
+
)
|
|
383
|
+
last_index_read = slice_end
|
|
384
|
+
except IndexNotYetThereError:
|
|
385
|
+
continue
|
|
386
|
+
except IndexNoMoreThereError:
|
|
387
|
+
logger.warning(
|
|
388
|
+
"Data is no more available from Lima memory. Switching to h5api..."
|
|
389
|
+
)
|
|
390
|
+
memory_available = False
|
|
391
|
+
continue
|
|
392
|
+
except RuntimeError:
|
|
393
|
+
continue
|
|
394
|
+
|
|
395
|
+
if last_index_read == nb_points:
|
|
396
|
+
wait = False
|
|
397
|
+
elif scan.state == 4:
|
|
398
|
+
limastream = get_stream(**limastream_params)
|
|
399
|
+
try:
|
|
400
|
+
length = len(limastream)
|
|
401
|
+
if length == last_index_read:
|
|
402
|
+
wait = False
|
|
403
|
+
except Exception:
|
|
404
|
+
memory_available = False
|
|
405
|
+
continue
|
|
406
|
+
|
|
407
|
+
if not memory_available:
|
|
408
|
+
params_dynamic_file = {
|
|
409
|
+
"file": scan.info["filename"],
|
|
410
|
+
"lima_names": [lima_name],
|
|
411
|
+
"lima_url_template": lima_url_template,
|
|
412
|
+
"lima_url_template_args": lima_url_template_args,
|
|
413
|
+
**kwargs,
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
with dynamic_hdf5.File(**params_dynamic_file) as root:
|
|
417
|
+
scan_nb = scan.info["scan_nb"]
|
|
418
|
+
lima_dataset = root[f"{scan_nb}.{subscan}/instrument/{lima_name}/data"]
|
|
419
|
+
|
|
420
|
+
length = len(lima_dataset)
|
|
421
|
+
if length > last_index_read:
|
|
422
|
+
slice_end = min(last_index_read + max_slice_size, length)
|
|
423
|
+
dataset = lima_dataset[last_index_read:slice_end]
|
|
424
|
+
logger.info(f"Data retrieved from hdf5 file: {len(dataset)} frames")
|
|
425
|
+
last_index_read = slice_end
|
|
426
|
+
|
|
427
|
+
if last_index_read == nb_points:
|
|
428
|
+
wait = False
|
|
429
|
+
elif scan.state == 4 and last_index_read == len(lima_dataset):
|
|
430
|
+
wait = False
|
|
431
|
+
else:
|
|
432
|
+
scan.update(block=False)
|
|
433
|
+
|
|
434
|
+
yield dataset
|
|
435
|
+
|
|
436
|
+
|
|
437
|
+
def get_available_dataset(
|
|
438
|
+
lima_name: str,
|
|
439
|
+
scan_memory_url: str = "",
|
|
440
|
+
lima_url_template: str = "",
|
|
441
|
+
lima_url_template_args: Dict[str, Any] = {},
|
|
442
|
+
scan_nb: int = None,
|
|
443
|
+
subscan: int = 1,
|
|
444
|
+
last_index_read: int = 0,
|
|
445
|
+
max_slice_size: int = 10,
|
|
446
|
+
start_from_memory: bool = True,
|
|
447
|
+
range_index_read: Optional[Tuple[int, int]] = None,
|
|
448
|
+
data_filename: str = None,
|
|
449
|
+
) -> Optional[np.ndarray]:
|
|
450
|
+
"""
|
|
451
|
+
Retrieves the available dataset from either Lima memory or an HDF5 file.
|
|
452
|
+
|
|
453
|
+
Args:
|
|
454
|
+
lima_name (str): Name of the detector.
|
|
455
|
+
scan_memory_url (str): URL to the scan memory.
|
|
456
|
+
lima_url_template (str, optional): Template for Lima file URLs. Defaults to "".
|
|
457
|
+
lima_url_template_args (dict, optional): Arguments for the Lima URL template. Defaults to {}.
|
|
458
|
+
subscan (int, optional): Subscan number. Defaults to 1.
|
|
459
|
+
last_index_read (int, optional): Last index read from the dataset. Defaults to 0.
|
|
460
|
+
max_slice_size (int, optional): Maximum number of frames to read in one slice. Defaults to 10.
|
|
461
|
+
start_from_memory (bool, optional): Whether to start reading from memory. Defaults to True.
|
|
462
|
+
Returns:
|
|
463
|
+
Optional[np.ndarray]: The retrieved dataset, or None if no data is available.
|
|
464
|
+
"""
|
|
465
|
+
if scan_memory_url:
|
|
466
|
+
return _slice_dataset_online(
|
|
467
|
+
scan_memory_url=scan_memory_url,
|
|
468
|
+
detector_name=lima_name,
|
|
469
|
+
lima_url_template=lima_url_template,
|
|
470
|
+
lima_url_template_args=lima_url_template_args,
|
|
471
|
+
subscan=subscan,
|
|
472
|
+
last_index_read=last_index_read,
|
|
473
|
+
max_slice_size=max_slice_size,
|
|
474
|
+
start_from_memory=start_from_memory,
|
|
475
|
+
range_index_read=range_index_read,
|
|
476
|
+
)
|
|
477
|
+
else:
|
|
478
|
+
return _slice_dataset_offline(
|
|
479
|
+
filename_data=data_filename,
|
|
480
|
+
scan_nb=scan_nb,
|
|
481
|
+
subscan=subscan,
|
|
482
|
+
detector_name=lima_name,
|
|
483
|
+
last_index_read=last_index_read,
|
|
484
|
+
max_slice_size=max_slice_size,
|
|
485
|
+
range_index_read=range_index_read,
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
|
|
489
|
+
def _get_nb_frames_available_from_stream(
|
|
490
|
+
detector_name: str = None,
|
|
491
|
+
stream_name: str = None,
|
|
492
|
+
scan: Scan = None,
|
|
493
|
+
scan_memory_url: str = None,
|
|
494
|
+
beacon_host: str = None,
|
|
495
|
+
index_range_last: tuple = None,
|
|
496
|
+
):
|
|
497
|
+
if not scan:
|
|
498
|
+
scan = load_scan(scan_memory_url=scan_memory_url, beacon_host=beacon_host)
|
|
499
|
+
if index_range_last is None:
|
|
500
|
+
last_index = 0
|
|
501
|
+
else:
|
|
502
|
+
last_index = index_range_last[-1]
|
|
503
|
+
|
|
504
|
+
wait_for_data = True
|
|
505
|
+
while wait_for_data:
|
|
506
|
+
try:
|
|
507
|
+
scan.update(block=False)
|
|
508
|
+
except Exception as e:
|
|
509
|
+
# Handle canceled or non-existent scans
|
|
510
|
+
logger.warning(f"Scan canceled or does not exist. Exiting. {e}")
|
|
511
|
+
wait_for_data = False
|
|
512
|
+
continue
|
|
513
|
+
|
|
514
|
+
stream = get_stream(
|
|
515
|
+
stream_name=stream_name,
|
|
516
|
+
detector_name=detector_name,
|
|
517
|
+
scan=scan,
|
|
518
|
+
scan_memory_url=scan_memory_url,
|
|
519
|
+
beacon_host=beacon_host,
|
|
520
|
+
)
|
|
521
|
+
nb_available_frames = len(stream)
|
|
522
|
+
|
|
523
|
+
if last_index >= nb_available_frames:
|
|
524
|
+
continue
|
|
525
|
+
|
|
526
|
+
if nb_available_frames > 0:
|
|
527
|
+
wait_for_data = False
|
|
528
|
+
|
|
529
|
+
return nb_available_frames
|
|
530
|
+
|
|
531
|
+
|
|
532
|
+
def _get_nb_frames_available_from_file(
|
|
533
|
+
filename_data: str,
|
|
534
|
+
h5path: str,
|
|
535
|
+
):
|
|
536
|
+
params = {
|
|
537
|
+
"filename": filename_data,
|
|
538
|
+
"name": "/",
|
|
539
|
+
}
|
|
540
|
+
with open_item_silx(**params) as root:
|
|
541
|
+
if h5path not in root:
|
|
542
|
+
logger.error(
|
|
543
|
+
f"Attempt to read data offline but {h5path} not in {filename_data}"
|
|
544
|
+
)
|
|
545
|
+
return
|
|
546
|
+
|
|
547
|
+
h5item = root[h5path]
|
|
548
|
+
if isinstance(h5item, h5py.Dataset):
|
|
549
|
+
# Normally is a lima dataset
|
|
550
|
+
nb_frames_available = len(h5item)
|
|
551
|
+
elif isinstance(h5item, h5py.Group):
|
|
552
|
+
# In the case of scalers, we have to read the limits looking at all the counters
|
|
553
|
+
nb_frames_available = min(
|
|
554
|
+
[len(h5item[i]) for i in h5item if isinstance(h5item[i], h5py.Dataset)]
|
|
555
|
+
)
|
|
556
|
+
else:
|
|
557
|
+
logger.error(f"{h5item} not valid to read data")
|
|
558
|
+
return
|
|
559
|
+
return nb_frames_available
|
|
560
|
+
|
|
561
|
+
|
|
562
|
+
def _get_new_slice_limits(
|
|
563
|
+
detector_name: str = None,
|
|
564
|
+
stream_name: str = None,
|
|
565
|
+
scan: Scan = None,
|
|
566
|
+
scan_memory_url: str = None,
|
|
567
|
+
beacon_host: str = None,
|
|
568
|
+
filename_data: str = None,
|
|
569
|
+
h5path: str = None,
|
|
570
|
+
index_range=None,
|
|
571
|
+
index_range_last=None,
|
|
572
|
+
max_slice_size: int = 50,
|
|
573
|
+
):
|
|
574
|
+
# Get the minimum index to read
|
|
575
|
+
if index_range_last is None:
|
|
576
|
+
# Nothing has been read yet
|
|
577
|
+
if index_range is None:
|
|
578
|
+
# No specific range requested
|
|
579
|
+
slice_init = 0
|
|
580
|
+
else:
|
|
581
|
+
# There is a requested range
|
|
582
|
+
slice_init = index_range[0]
|
|
583
|
+
else:
|
|
584
|
+
# Continue from the previous last index
|
|
585
|
+
slice_init = index_range_last[-1]
|
|
586
|
+
slice_end = slice_init + max_slice_size
|
|
587
|
+
|
|
588
|
+
if scan_memory_url or scan:
|
|
589
|
+
if not scan:
|
|
590
|
+
scan = load_scan(scan_memory_url=scan_memory_url, beacon_host=beacon_host)
|
|
591
|
+
|
|
592
|
+
stream_params = {
|
|
593
|
+
"stream_name": stream_name,
|
|
594
|
+
"detector_name": detector_name,
|
|
595
|
+
"scan": scan,
|
|
596
|
+
"scan_memory_url": scan_memory_url,
|
|
597
|
+
"beacon_host": beacon_host,
|
|
598
|
+
}
|
|
599
|
+
|
|
600
|
+
wait_for_data = True
|
|
601
|
+
logger.info("Waiting for data...")
|
|
602
|
+
while wait_for_data:
|
|
603
|
+
try:
|
|
604
|
+
scan.update(block=False)
|
|
605
|
+
except Exception as e:
|
|
606
|
+
# Handle canceled or non-existent scans
|
|
607
|
+
logger.warning(f"Scan canceled or does not exist. Exiting. {e}")
|
|
608
|
+
wait_for_data = False
|
|
609
|
+
return
|
|
610
|
+
|
|
611
|
+
if scan.state == 4:
|
|
612
|
+
stream = get_stream(**stream_params)
|
|
613
|
+
if not stream:
|
|
614
|
+
logger.error(f"No detected stream for {stream_name}. Aborting.")
|
|
615
|
+
wait_for_data = False
|
|
616
|
+
return
|
|
617
|
+
nb_available_frames = len(stream)
|
|
618
|
+
|
|
619
|
+
# The scan is over, check the length and out of the loop
|
|
620
|
+
wait_for_data = False
|
|
621
|
+
continue
|
|
622
|
+
else:
|
|
623
|
+
# The scan is not over, if there are no new frames, continue waiting
|
|
624
|
+
stream = get_stream(**stream_params)
|
|
625
|
+
if not stream:
|
|
626
|
+
continue
|
|
627
|
+
nb_available_frames = len(stream)
|
|
628
|
+
if nb_available_frames > slice_init:
|
|
629
|
+
# There are new frames, out of the loop
|
|
630
|
+
wait_for_data = False
|
|
631
|
+
continue
|
|
632
|
+
else:
|
|
633
|
+
continue
|
|
634
|
+
|
|
635
|
+
else:
|
|
636
|
+
params = {
|
|
637
|
+
"filename": filename_data,
|
|
638
|
+
"name": "/",
|
|
639
|
+
}
|
|
640
|
+
with open_item_silx(**params) as root:
|
|
641
|
+
if h5path not in root:
|
|
642
|
+
logger.error(
|
|
643
|
+
f"Attempt to read data offline but {h5path} not in {filename_data}"
|
|
644
|
+
)
|
|
645
|
+
return
|
|
646
|
+
|
|
647
|
+
h5item = root[h5path]
|
|
648
|
+
if isinstance(h5item, h5py.Dataset):
|
|
649
|
+
# Normally is a lima dataset
|
|
650
|
+
nb_available_frames = len(h5item)
|
|
651
|
+
elif isinstance(h5item, h5py.Group):
|
|
652
|
+
# In the case of scalers, we have to read the limits looking at all the counters
|
|
653
|
+
nb_available_frames = min(
|
|
654
|
+
[
|
|
655
|
+
len(h5item[i])
|
|
656
|
+
for i in h5item
|
|
657
|
+
if isinstance(h5item[i], h5py.Dataset)
|
|
658
|
+
]
|
|
659
|
+
)
|
|
660
|
+
else:
|
|
661
|
+
logger.error(f"{h5item} not valid to read data")
|
|
662
|
+
return
|
|
663
|
+
|
|
664
|
+
slice_end = min(slice_end, nb_available_frames)
|
|
665
|
+
if index_range is not None:
|
|
666
|
+
slice_end = min(slice_end, index_range[-1])
|
|
667
|
+
if slice_init >= slice_end:
|
|
668
|
+
return
|
|
669
|
+
return [slice_init, slice_end]
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
def track_slice_limits(
|
|
673
|
+
detector_name: str = None,
|
|
674
|
+
scan: Scan = None,
|
|
675
|
+
scan_memory_url: str = None,
|
|
676
|
+
beacon_host: str = None,
|
|
677
|
+
filename_data: str = None,
|
|
678
|
+
h5path: str = None,
|
|
679
|
+
index_range=None,
|
|
680
|
+
max_slice_size: int = 50,
|
|
681
|
+
):
|
|
682
|
+
new_slice_limits = None
|
|
683
|
+
while True:
|
|
684
|
+
new_slice_limits = _get_new_slice_limits(
|
|
685
|
+
detector_name=detector_name,
|
|
686
|
+
scan=scan,
|
|
687
|
+
scan_memory_url=scan_memory_url,
|
|
688
|
+
beacon_host=beacon_host,
|
|
689
|
+
filename_data=filename_data,
|
|
690
|
+
h5path=h5path,
|
|
691
|
+
max_slice_size=max_slice_size,
|
|
692
|
+
index_range=index_range,
|
|
693
|
+
index_range_last=new_slice_limits,
|
|
694
|
+
)
|
|
695
|
+
if new_slice_limits is None:
|
|
696
|
+
break
|
|
697
|
+
yield new_slice_limits
|
|
698
|
+
|
|
699
|
+
|
|
700
|
+
def _slice_dataset_online(
|
|
701
|
+
stream_name: str = None,
|
|
702
|
+
detector_name: str = None,
|
|
703
|
+
scan: Scan = None,
|
|
704
|
+
scan_memory_url: str = None,
|
|
705
|
+
beacon_host: str = None,
|
|
706
|
+
lima_url_template: str = "",
|
|
707
|
+
lima_url_template_args: Dict[str, Any] = {},
|
|
708
|
+
subscan: int = 1,
|
|
709
|
+
index_range: tuple = None,
|
|
710
|
+
start_from_memory: bool = True,
|
|
711
|
+
) -> dict:
|
|
712
|
+
"""
|
|
713
|
+
Reads a dataset from an online source, either from Lima memory or through dynamic hdf5 file.
|
|
714
|
+
|
|
715
|
+
This method attempts to retrieve data from Lima memory first. If the data is no longer
|
|
716
|
+
available in memory, it falls back to reading from an HDF5 file.
|
|
717
|
+
|
|
718
|
+
Args:
|
|
719
|
+
scan_memory_url (str): URL to the scan memory.
|
|
720
|
+
lima_name (str): Name of the detector.
|
|
721
|
+
lima_url_template (str, optional): Template for Lima file URLs. Defaults to "".
|
|
722
|
+
lima_url_template_args (Dict[str, Any], optional): Arguments for the Lima URL template. Defaults to an empty dictionary.
|
|
723
|
+
subscan (int, optional): Subscan number. Defaults to 1.
|
|
724
|
+
last_index_read (int, optional): The last index read from the dataset. Defaults to 0.
|
|
725
|
+
max_slice_size (int, optional): Maximum number of frames to read in one slice. Defaults to 10.
|
|
726
|
+
start_from_memory (bool, optional): Whether to start reading from Lima memory. Defaults to True.
|
|
727
|
+
range_index_read (tuple[int, int] | None, optional): Range of indices to read. If None, reads all available data. Defaults to None.
|
|
728
|
+
|
|
729
|
+
Returns:
|
|
730
|
+
None: The method does not return a value directly. Instead, it processes the dataset
|
|
731
|
+
and logs the retrieved data.
|
|
732
|
+
"""
|
|
733
|
+
if not stream_name and detector_name:
|
|
734
|
+
stream_name = f"{detector_name}:image"
|
|
735
|
+
|
|
736
|
+
if not scan:
|
|
737
|
+
scan = load_scan(scan_memory_url=scan_memory_url, beacon_host=beacon_host)
|
|
738
|
+
|
|
739
|
+
memory_available = start_from_memory
|
|
740
|
+
dataset = None
|
|
741
|
+
wait_for_data = True
|
|
742
|
+
|
|
743
|
+
while wait_for_data:
|
|
744
|
+
if memory_available:
|
|
745
|
+
try:
|
|
746
|
+
scan.update(block=False)
|
|
747
|
+
stream = get_stream(
|
|
748
|
+
stream_name=stream_name,
|
|
749
|
+
scan=scan,
|
|
750
|
+
scan_memory_url=scan_memory_url,
|
|
751
|
+
beacon_host=beacon_host,
|
|
752
|
+
)
|
|
753
|
+
except Exception as e:
|
|
754
|
+
# Handle canceled or non-existent scans
|
|
755
|
+
logger.warning(f"Scan canceled or does not exist. Exiting. {e}")
|
|
756
|
+
wait_for_data = False
|
|
757
|
+
continue
|
|
758
|
+
|
|
759
|
+
try:
|
|
760
|
+
# Check if the requested frame is available in memory
|
|
761
|
+
if index_range is None:
|
|
762
|
+
slice_init = 0
|
|
763
|
+
slice_end = len(stream)
|
|
764
|
+
else:
|
|
765
|
+
slice_init, slice_end = index_range
|
|
766
|
+
# _ = stream[slice_init]
|
|
767
|
+
# _ = stream[slice_end]
|
|
768
|
+
dataset = stream[slice_init:slice_end]
|
|
769
|
+
if dataset is not None:
|
|
770
|
+
if len(dataset) == 0:
|
|
771
|
+
dataset = None
|
|
772
|
+
continue
|
|
773
|
+
if len(dataset) > 0:
|
|
774
|
+
wait_for_data = False
|
|
775
|
+
logger.info(
|
|
776
|
+
f"Data retrieved from {detector_name} Lima memory: {len(dataset)} frames between {slice_init} -> {slice_end}"
|
|
777
|
+
)
|
|
778
|
+
else:
|
|
779
|
+
if scan.state == 4:
|
|
780
|
+
wait_for_data = False
|
|
781
|
+
continue
|
|
782
|
+
except IndexNotYetThereError:
|
|
783
|
+
# Frame not yet available
|
|
784
|
+
continue
|
|
785
|
+
except IndexNoMoreThereError:
|
|
786
|
+
logger.warning(
|
|
787
|
+
f"Data is no more available from {detector_name} Lima memory. Switching to h5api..."
|
|
788
|
+
)
|
|
789
|
+
memory_available = False
|
|
790
|
+
continue
|
|
791
|
+
except IndexWontBeThereError:
|
|
792
|
+
logger.warning(f"No more data can be retrieved from {detector_name}")
|
|
793
|
+
wait_for_data = False
|
|
794
|
+
continue
|
|
795
|
+
except Exception as e:
|
|
796
|
+
print(
|
|
797
|
+
f"Exception! Looks like {detector_name} Lima memory is not reachable. Switching to file... {e}"
|
|
798
|
+
)
|
|
799
|
+
memory_available = False
|
|
800
|
+
continue
|
|
801
|
+
|
|
802
|
+
if not memory_available:
|
|
803
|
+
params_dynamic_file = {
|
|
804
|
+
"file": scan.info["filename"],
|
|
805
|
+
"lima_names": [detector_name],
|
|
806
|
+
"lima_url_template": lima_url_template,
|
|
807
|
+
"lima_url_template_args": lima_url_template_args,
|
|
808
|
+
# "prioritize_non_native_h5items" : True,
|
|
809
|
+
}
|
|
810
|
+
|
|
811
|
+
with dynamic_hdf5.File(**params_dynamic_file) as root:
|
|
812
|
+
scan_nb = scan.info["scan_nb"]
|
|
813
|
+
dset_data = root[f"{scan_nb}.{subscan}/instrument/{detector_name}/data"]
|
|
814
|
+
|
|
815
|
+
if index_range is None:
|
|
816
|
+
slice_init = 0
|
|
817
|
+
slice_end = len(dset_data)
|
|
818
|
+
else:
|
|
819
|
+
slice_init, slice_end = index_range
|
|
820
|
+
|
|
821
|
+
dataset = dset_data[slice_init:slice_end, :, :]
|
|
822
|
+
if dataset is not None:
|
|
823
|
+
if len(dataset) == 0:
|
|
824
|
+
dataset = None
|
|
825
|
+
continue
|
|
826
|
+
if len(dataset) > 0:
|
|
827
|
+
wait_for_data = False
|
|
828
|
+
logger.info(
|
|
829
|
+
f"Data retrieved from hdf5 file: {len(dataset)} frames between {slice_init} -> {slice_end}"
|
|
830
|
+
)
|
|
831
|
+
return {"dataset": dataset, "index_range": (slice_init, slice_end)}
|
|
832
|
+
|
|
833
|
+
|
|
834
|
+
def _slice_dataset_offline(
|
|
835
|
+
filename_data: str,
|
|
836
|
+
h5path_to_data: str,
|
|
837
|
+
index_range: tuple = None,
|
|
838
|
+
) -> dict:
|
|
839
|
+
"""
|
|
840
|
+
Reads a dataset from an HDF5 file, handling different possible structures.
|
|
841
|
+
|
|
842
|
+
Args:
|
|
843
|
+
filename_data (str): Path to the HDF5 file.
|
|
844
|
+
h5path_to_data (str): Path to the H5 dataset with the data.
|
|
845
|
+
index_range (tuple) : Will attempt to slice between these two limits.
|
|
846
|
+
|
|
847
|
+
Returns:
|
|
848
|
+
numpy.ndarray: The sliced dataset, or None if no data is found.
|
|
849
|
+
"""
|
|
850
|
+
params = {
|
|
851
|
+
"filename": filename_data,
|
|
852
|
+
"name": "/",
|
|
853
|
+
}
|
|
854
|
+
with open_item_silx(**params) as root:
|
|
855
|
+
if h5path_to_data not in root:
|
|
856
|
+
return
|
|
857
|
+
dset = root[h5path_to_data]
|
|
858
|
+
nb_frames_available = len(dset)
|
|
859
|
+
if index_range is None:
|
|
860
|
+
slice_init = 0
|
|
861
|
+
slice_end = nb_frames_available
|
|
862
|
+
else:
|
|
863
|
+
slice_init = min(index_range[0], nb_frames_available)
|
|
864
|
+
slice_end = min(index_range[-1], nb_frames_available)
|
|
865
|
+
if slice_init >= slice_end:
|
|
866
|
+
logger.error(
|
|
867
|
+
f"{filename_data}:{h5path_to_data} cannot be sliced between {slice_init} -> {slice_end}"
|
|
868
|
+
)
|
|
869
|
+
return
|
|
870
|
+
logger.info(
|
|
871
|
+
f"Data retrieved from hdf5 file: {slice_end - slice_init} frames between {slice_init} -> {slice_end}"
|
|
872
|
+
)
|
|
873
|
+
return {
|
|
874
|
+
"dataset": dset[slice_init:slice_end],
|
|
875
|
+
"index_range": (slice_init, slice_end),
|
|
876
|
+
}
|
|
877
|
+
|
|
878
|
+
|
|
879
|
+
def read_dataset_offline(
|
|
880
|
+
filename_data: str,
|
|
881
|
+
detector_name: str,
|
|
882
|
+
scan_nb: int,
|
|
883
|
+
last_index_read: int,
|
|
884
|
+
max_slice_size: int = 100,
|
|
885
|
+
range_index_read: Optional[Tuple[int, int]] = None,
|
|
886
|
+
) -> tuple:
|
|
887
|
+
dataset = None
|
|
888
|
+
with h5py.File(filename_data, "r") as f:
|
|
889
|
+
path_to_data_signal = f"{scan_nb}.1/instrument/{detector_name}/data"
|
|
890
|
+
if path_to_data_signal not in f:
|
|
891
|
+
logger.error(f"Dataset {path_to_data_signal} not found in {filename_data}")
|
|
892
|
+
return
|
|
893
|
+
|
|
894
|
+
dataset = f[path_to_data_signal]
|
|
895
|
+
length = len(dataset)
|
|
896
|
+
slice_init = last_index_read
|
|
897
|
+
if range_index_read is None:
|
|
898
|
+
slice_end = min(
|
|
899
|
+
last_index_read + max_slice_size,
|
|
900
|
+
length,
|
|
901
|
+
)
|
|
902
|
+
else:
|
|
903
|
+
slice_end = min(range_index_read[-1], length)
|
|
904
|
+
data_signal = dataset[slice_init:slice_end]
|
|
905
|
+
return data_signal
|
|
906
|
+
|
|
907
|
+
|
|
908
|
+
def copy_group_excluding_dataset(src_group, dest_group, exclude_dataset):
|
|
909
|
+
for attr_name, attr_value in src_group.attrs.items():
|
|
910
|
+
dest_group.attrs[attr_name] = attr_value
|
|
911
|
+
|
|
912
|
+
for name, item in src_group.items():
|
|
913
|
+
if isinstance(item, h5py.Group):
|
|
914
|
+
# Recursively copy subgroups
|
|
915
|
+
new_subgroup = dest_group.create_group(name)
|
|
916
|
+
copy_group_excluding_dataset(item, new_subgroup, exclude_dataset)
|
|
917
|
+
elif isinstance(item, h5py.Dataset):
|
|
918
|
+
if name != exclude_dataset: # Skip the excluded dataset
|
|
919
|
+
src_group.copy(name, dest_group, name=name)
|
|
920
|
+
|
|
921
|
+
|
|
922
|
+
def do_continue_pipeline(
|
|
923
|
+
detector_name: str = None,
|
|
924
|
+
scan_memory_url=None,
|
|
925
|
+
beacon_host: str = None,
|
|
926
|
+
last_index_read=0,
|
|
927
|
+
lima_url_template="",
|
|
928
|
+
lima_url_template_args={},
|
|
929
|
+
subscan=1,
|
|
930
|
+
filename_data=None,
|
|
931
|
+
path_to_data_signal: str = None, # To be used for static files
|
|
932
|
+
) -> bool:
|
|
933
|
+
"""
|
|
934
|
+
Checks if there are still frames to be read from a running/complete scan or from a file
|
|
935
|
+
"""
|
|
936
|
+
logger.info(
|
|
937
|
+
f"Checking if there are still frames to read. Last index read: {last_index_read}"
|
|
938
|
+
)
|
|
939
|
+
if scan_memory_url:
|
|
940
|
+
return continue_pipeline_bliss(
|
|
941
|
+
scan_memory_url=scan_memory_url,
|
|
942
|
+
beacon_host=beacon_host,
|
|
943
|
+
detector_name=detector_name,
|
|
944
|
+
last_index_read=last_index_read,
|
|
945
|
+
subscan=subscan,
|
|
946
|
+
lima_url_template=lima_url_template,
|
|
947
|
+
lima_url_template_args=lima_url_template_args,
|
|
948
|
+
)
|
|
949
|
+
elif filename_data:
|
|
950
|
+
return continue_pipeline_offline(
|
|
951
|
+
filename_data=filename_data,
|
|
952
|
+
last_index_read=last_index_read,
|
|
953
|
+
path_to_data_signal=path_to_data_signal,
|
|
954
|
+
)
|
|
955
|
+
|
|
956
|
+
|
|
957
|
+
def continue_pipeline_bliss(
|
|
958
|
+
detector_name: str,
|
|
959
|
+
scan: Scan = None,
|
|
960
|
+
scan_memory_url: str = None,
|
|
961
|
+
beacon_host: str = None,
|
|
962
|
+
last_index_read: int = 0,
|
|
963
|
+
subscan: int = 1,
|
|
964
|
+
lima_url_template: str = None,
|
|
965
|
+
lima_url_template_args: dict = None,
|
|
966
|
+
):
|
|
967
|
+
try:
|
|
968
|
+
if not scan:
|
|
969
|
+
scan = load_scan(
|
|
970
|
+
scan_memory_url=scan_memory_url,
|
|
971
|
+
beacon_host=beacon_host,
|
|
972
|
+
wait_until_start=False,
|
|
973
|
+
)
|
|
974
|
+
state = scan.state
|
|
975
|
+
except Exception as e:
|
|
976
|
+
logger.error(f"scan {scan_memory_url} could not be loaded!: {e}")
|
|
977
|
+
return False
|
|
978
|
+
|
|
979
|
+
if state < 2:
|
|
980
|
+
logger.info("Scan started but acquisition did not. Wait, data is coming")
|
|
981
|
+
return True
|
|
982
|
+
elif state in (2, 3):
|
|
983
|
+
logger.info("Scan is running. Wait for data")
|
|
984
|
+
return True
|
|
985
|
+
if state == 4:
|
|
986
|
+
logger.info("Scan is complete")
|
|
987
|
+
if not scan.streams:
|
|
988
|
+
logger.warning(
|
|
989
|
+
"\n\tThe scan is complete and does not contain any streams. End of the workflow.\n\t"
|
|
990
|
+
)
|
|
991
|
+
return False
|
|
992
|
+
|
|
993
|
+
if f"{detector_name}:image" not in scan.streams:
|
|
994
|
+
logger.error(f"There is no stream {detector_name}:image in the scan")
|
|
995
|
+
return False
|
|
996
|
+
|
|
997
|
+
current_length = get_length_lima_stream(
|
|
998
|
+
scan=scan,
|
|
999
|
+
scan_memory_url=scan_memory_url,
|
|
1000
|
+
beacon_host=beacon_host,
|
|
1001
|
+
detector_name=detector_name,
|
|
1002
|
+
lima_url_template=lima_url_template,
|
|
1003
|
+
lima_url_template_args=lima_url_template_args,
|
|
1004
|
+
subscan=subscan,
|
|
1005
|
+
)
|
|
1006
|
+
if current_length is None:
|
|
1007
|
+
return False
|
|
1008
|
+
|
|
1009
|
+
logger.info(f"Current length of the dataset: {current_length}")
|
|
1010
|
+
if current_length == last_index_read:
|
|
1011
|
+
logger.info("\n\tNo more frames to read. End of the workflow\n\t")
|
|
1012
|
+
return False
|
|
1013
|
+
elif current_length > last_index_read:
|
|
1014
|
+
logger.info("There are still frames to read. Continue")
|
|
1015
|
+
return True
|
|
1016
|
+
else:
|
|
1017
|
+
logger.error(
|
|
1018
|
+
"There are more read then stored frames. Something went wrong!"
|
|
1019
|
+
)
|
|
1020
|
+
return False
|
|
1021
|
+
|
|
1022
|
+
|
|
1023
|
+
def continue_pipeline_offline(
|
|
1024
|
+
filename_data: str,
|
|
1025
|
+
last_index_read: int = 0,
|
|
1026
|
+
path_to_data_signal: str = None,
|
|
1027
|
+
):
|
|
1028
|
+
current_length = get_length_dataset_static_file(
|
|
1029
|
+
filename_data=filename_data,
|
|
1030
|
+
data_path=path_to_data_signal,
|
|
1031
|
+
)
|
|
1032
|
+
if current_length is None:
|
|
1033
|
+
return False
|
|
1034
|
+
|
|
1035
|
+
logger.info(
|
|
1036
|
+
f"Current length of the dataset: {current_length}. Last index read: {last_index_read}"
|
|
1037
|
+
)
|
|
1038
|
+
if current_length == last_index_read:
|
|
1039
|
+
logger.info("\n\tNo more frames to read. End of the workflow\n\t")
|
|
1040
|
+
return False
|
|
1041
|
+
elif current_length > last_index_read:
|
|
1042
|
+
logger.info("There are still frames to read. Continue")
|
|
1043
|
+
return True
|
|
1044
|
+
else:
|
|
1045
|
+
logger.error("There are more read then stored frames. Something went wrong!")
|
|
1046
|
+
return True
|
|
1047
|
+
|
|
1048
|
+
|
|
1049
|
+
def read_blissdata_stream(stream, range_to_read: list) -> numpy.ndarray:
|
|
1050
|
+
"""
|
|
1051
|
+
Centralized method to slice of blissdata stream
|
|
1052
|
+
"""
|
|
1053
|
+
if range_to_read is None:
|
|
1054
|
+
range_to_read = [None, None]
|
|
1055
|
+
else:
|
|
1056
|
+
attempts = 0
|
|
1057
|
+
while len(stream) < range_to_read[1] and attempts < 10:
|
|
1058
|
+
time.sleep(0.1)
|
|
1059
|
+
attempts += 1
|
|
1060
|
+
|
|
1061
|
+
if len(stream) < range_to_read[1]:
|
|
1062
|
+
logger.error(
|
|
1063
|
+
f"Requested range {range_to_read} is larger than the stream length {len(stream)}. Adjusting range."
|
|
1064
|
+
)
|
|
1065
|
+
|
|
1066
|
+
try:
|
|
1067
|
+
read_values = stream[range_to_read[0] : range_to_read[1]]
|
|
1068
|
+
return read_values
|
|
1069
|
+
except Exception as e:
|
|
1070
|
+
logger.error(f"Error reading stream {stream} in {range_to_read=}: {e}")
|
|
1071
|
+
return None
|
|
1072
|
+
|
|
1073
|
+
|
|
1074
|
+
def does_scan_contain_subscan2(scan: Scan) -> bool:
|
|
1075
|
+
nb_acq_chains = len(scan.info["acquisition_chain"])
|
|
1076
|
+
if nb_acq_chains == 1:
|
|
1077
|
+
# Standard scan
|
|
1078
|
+
return False
|
|
1079
|
+
elif nb_acq_chains == 2:
|
|
1080
|
+
# Subscan2
|
|
1081
|
+
return True
|
|
1082
|
+
else:
|
|
1083
|
+
# Unkown scenario
|
|
1084
|
+
return
|
|
1085
|
+
|
|
1086
|
+
|
|
1087
|
+
def get_streams_subscan1(scan: Scan) -> list:
|
|
1088
|
+
acquisition_chains = scan.info.get("acquisition_chain")
|
|
1089
|
+
if not acquisition_chains:
|
|
1090
|
+
logger.error(f"There is no acquisition chains in {scan}")
|
|
1091
|
+
return
|
|
1092
|
+
|
|
1093
|
+
if does_scan_contain_subscan2(scan=scan):
|
|
1094
|
+
acquisition_chain1 = acquisition_chains.get("mcs")
|
|
1095
|
+
if not acquisition_chain1:
|
|
1096
|
+
logger.error(f"No mcs acq. chain in {scan}")
|
|
1097
|
+
return []
|
|
1098
|
+
else:
|
|
1099
|
+
acquisition_chain1 = next(iter(acquisition_chains.values()))
|
|
1100
|
+
streams = [
|
|
1101
|
+
stream
|
|
1102
|
+
for stream in _get_stream_names_from_acquisition_chain(
|
|
1103
|
+
acq_chain=acquisition_chain1
|
|
1104
|
+
)
|
|
1105
|
+
if stream in scan.streams
|
|
1106
|
+
]
|
|
1107
|
+
return streams
|
|
1108
|
+
|
|
1109
|
+
|
|
1110
|
+
def get_streams_subscan2(scan: Scan) -> list:
|
|
1111
|
+
acquisition_chains = scan.info.get("acquisition_chain")
|
|
1112
|
+
if not acquisition_chains:
|
|
1113
|
+
logger.error(f"There is no acquisition chains in {scan}")
|
|
1114
|
+
return
|
|
1115
|
+
|
|
1116
|
+
if does_scan_contain_subscan2(scan=scan):
|
|
1117
|
+
acquisition_chain2 = acquisition_chains.get("sampling_timer")
|
|
1118
|
+
if not acquisition_chain2:
|
|
1119
|
+
logger.error(f"No sampling_timer acq. chain in {scan}")
|
|
1120
|
+
return []
|
|
1121
|
+
streams = [
|
|
1122
|
+
stream
|
|
1123
|
+
for stream in _get_stream_names_from_acquisition_chain(
|
|
1124
|
+
acq_chain=acquisition_chain2
|
|
1125
|
+
)
|
|
1126
|
+
if stream in scan.streams
|
|
1127
|
+
]
|
|
1128
|
+
return streams
|
|
1129
|
+
else:
|
|
1130
|
+
logger.error(f"There is no subscan2 in {scan}")
|
|
1131
|
+
return []
|
|
1132
|
+
|
|
1133
|
+
|
|
1134
|
+
# def _get_streams_subscan2(scan: Scan) -> list:
|
|
1135
|
+
# acquisition_chains = scan.info.get("acquisition_chain")
|
|
1136
|
+
# acquisition_chain2 = acquisition_chains.get("sampling_timer")
|
|
1137
|
+
# streams = [
|
|
1138
|
+
# stream
|
|
1139
|
+
# for stream in _get_stream_names_from_acquisition_chain(acq_chain=acquisition_chain2)
|
|
1140
|
+
# if stream in scan.streams
|
|
1141
|
+
# ]
|
|
1142
|
+
# return streams
|
|
1143
|
+
|
|
1144
|
+
|
|
1145
|
+
def _get_stream_names_from_acquisition_chain(
|
|
1146
|
+
acq_chain: dict, include_images: bool = False
|
|
1147
|
+
) -> list:
|
|
1148
|
+
master = acq_chain.get("master", {})
|
|
1149
|
+
if not master:
|
|
1150
|
+
return []
|
|
1151
|
+
stream_names = []
|
|
1152
|
+
for acq_chain_type in (acq_chain, master):
|
|
1153
|
+
for stream_type in ("scalars", "spectra"):
|
|
1154
|
+
stream_names += acq_chain_type.get(stream_type)
|
|
1155
|
+
if include_images:
|
|
1156
|
+
stream_names += acq_chain_type.get("images")
|
|
1157
|
+
return stream_names
|