ewoksid02 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. ewoksid02/__init__.py +0 -0
  2. ewoksid02/ocl/__init__.py +0 -0
  3. ewoksid02/resources/__init__.py +8 -0
  4. ewoksid02/resources/saxs_loop.json +96 -0
  5. ewoksid02/resources/template_saxs.yaml +37 -0
  6. ewoksid02/scripts/__init__.py +0 -0
  7. ewoksid02/scripts/__main__.py +70 -0
  8. ewoksid02/scripts/parsers.py +224 -0
  9. ewoksid02/scripts/saxs/__init__.py +0 -0
  10. ewoksid02/scripts/saxs/main.py +255 -0
  11. ewoksid02/scripts/saxs/slurm_python_post_script.py +3 -0
  12. ewoksid02/scripts/saxs/slurm_python_pre_script.py +5 -0
  13. ewoksid02/scripts/utils.py +21 -0
  14. ewoksid02/scripts/xpcs/__init__.py +0 -0
  15. ewoksid02/scripts/xpcs/__main__.py +3 -0
  16. ewoksid02/tasks/__init__.py +7 -0
  17. ewoksid02/tasks/averagetask.py +179 -0
  18. ewoksid02/tasks/azimuthaltask.py +272 -0
  19. ewoksid02/tasks/cavingtask.py +170 -0
  20. ewoksid02/tasks/dahuprocessingtask.py +71 -0
  21. ewoksid02/tasks/end.py +35 -0
  22. ewoksid02/tasks/id02processingtask.py +2582 -0
  23. ewoksid02/tasks/looptask.py +672 -0
  24. ewoksid02/tasks/metadatatask.py +879 -0
  25. ewoksid02/tasks/normalizationtask.py +204 -0
  26. ewoksid02/tasks/scalerstask.py +46 -0
  27. ewoksid02/tasks/secondaryscatteringtask.py +159 -0
  28. ewoksid02/tasks/sumtask.py +45 -0
  29. ewoksid02/tests/__init__.py +3 -0
  30. ewoksid02/tests/conftest.py +639 -0
  31. ewoksid02/tests/debug.py +64 -0
  32. ewoksid02/tests/test_2scat_node.py +119 -0
  33. ewoksid02/tests/test_ave_node.py +106 -0
  34. ewoksid02/tests/test_azim_node.py +89 -0
  35. ewoksid02/tests/test_cave_node.py +118 -0
  36. ewoksid02/tests/test_norm_node.py +190 -0
  37. ewoksid02/tests/test_saxs.py +69 -0
  38. ewoksid02/tests/test_sumtask.py +10 -0
  39. ewoksid02/tests/utils.py +514 -0
  40. ewoksid02/utils/__init__.py +22 -0
  41. ewoksid02/utils/average.py +158 -0
  42. ewoksid02/utils/blissdata.py +1157 -0
  43. ewoksid02/utils/caving.py +851 -0
  44. ewoksid02/utils/cupyutils.py +42 -0
  45. ewoksid02/utils/io.py +722 -0
  46. ewoksid02/utils/normalization.py +804 -0
  47. ewoksid02/utils/pyfai.py +424 -0
  48. ewoksid02/utils/secondaryscattering.py +597 -0
  49. ewoksid02-0.1.0.dist-info/METADATA +76 -0
  50. ewoksid02-0.1.0.dist-info/RECORD +54 -0
  51. ewoksid02-0.1.0.dist-info/WHEEL +5 -0
  52. ewoksid02-0.1.0.dist-info/entry_points.txt +5 -0
  53. ewoksid02-0.1.0.dist-info/licenses/LICENSE.md +20 -0
  54. ewoksid02-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,10 @@
1
+ import pytest
2
+
3
+ from ewoksid02.tasks.sumtask import SumTask, SumTask1, SumTask2
4
+
5
+
6
+ @pytest.mark.parametrize("Task", [SumTask, SumTask1, SumTask2])
7
+ def test_sum_task(Task):
8
+ task = Task(inputs={"a": 1, "b": 2})
9
+ task.run()
10
+ assert task.outputs.result == 3
@@ -0,0 +1,514 @@
1
+ import h5py
2
+ import numpy
3
+ from ewoks import execute_graph
4
+ from silx.io.url import DataUrl
5
+ import logging
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ def check_h5groups_equivalent(
11
+ url_reference,
12
+ url_test,
13
+ ):
14
+ dataurl_reference = DataUrl(url_reference)
15
+ dataurl_test = DataUrl(url_test)
16
+
17
+ with h5py.File(dataurl_reference.file_path(), "r") as file_reference:
18
+ with h5py.File(dataurl_test.file_path(), "r") as file_test:
19
+ reference_group = file_reference[dataurl_reference.data_path()]
20
+ test_group = file_test[dataurl_test.data_path()]
21
+
22
+ def check_equivalence(name_reference_item, h5item_reference):
23
+ if "TitleExtension" in name_reference_item:
24
+ # Only the ewoks files before ~October25 do not contain TitleExtension
25
+ # After that, the header includes TitleExtensionTemplate #TODO add the option TitleExtensionTemplate
26
+ return
27
+
28
+ if "interpreted" in h5item_reference.name:
29
+ if name_reference_item == "epoch":
30
+ # ewoksid02 does not generate epoch group, it's artificial
31
+ return
32
+
33
+ # When it's an offline processing, the counter names in interpreted group cannot be rebuilt fully because this information is not in the RAW_DATA file (only in blissdata)
34
+ if name_reference_item not in test_group:
35
+ logger.warning(
36
+ f"{name_reference_item} not in {dataurl_test.path()}, but could have a different name"
37
+ )
38
+ return
39
+
40
+ assert (
41
+ name_reference_item in test_group
42
+ ), f"{name_reference_item} not in {dataurl_test.path()}"
43
+ logger.debug(f"{name_reference_item} found in {test_group}")
44
+
45
+ if isinstance(h5item_reference, h5py.Dataset):
46
+ check_equivalence_datasets(
47
+ dataset_test=test_group[name_reference_item],
48
+ dataset_reference=h5item_reference,
49
+ )
50
+
51
+ def check_equivalence_datasets(
52
+ dataset_test: h5py.Dataset, dataset_reference: h5py.Dataset
53
+ ):
54
+
55
+ data_reference = dataset_reference[()]
56
+ data_test = dataset_test[()]
57
+
58
+ if isinstance(data_reference, numpy.ndarray):
59
+ if dataurl_reference.data_slice():
60
+ data_reference = data_reference[
61
+ dataurl_reference.data_slice()[
62
+ 0
63
+ ] : dataurl_reference.data_slice()[1]
64
+ ]
65
+ if dataurl_test.data_slice():
66
+ data_test = data_test[
67
+ dataurl_test.data_slice()[0] : dataurl_test.data_slice()[1]
68
+ ]
69
+
70
+ if any(_ in dataset_test.name for _ in ("HS32C", "HS32V")):
71
+ # Because in ewoksid02, we save also aux1, aux2 counters
72
+ data_test = numpy.concatenate(
73
+ (data_test[:, 0:10], data_test[:, 12:]), axis=1
74
+ )
75
+ data_reference = numpy.concatenate(
76
+ (data_reference[:, 0:10], data_reference[:, 12:]), axis=1
77
+ )
78
+
79
+ if "HS32N" in dataset_test.name:
80
+ # HS32N is the only array (so far) made of string elements
81
+ data_test = numpy.array([_.decode() for _ in data_test])
82
+ data_reference = numpy.array(
83
+ [_.decode() for _ in data_reference]
84
+ )
85
+ for i, j in zip(data_test, data_reference):
86
+ assert i == j
87
+ return
88
+
89
+ assert numpy.allclose(
90
+ data_test, data_reference, equal_nan=True
91
+ ), f"{dataset_reference.name} is not equivalent"
92
+ elif isinstance(data_reference, bytes):
93
+ assert (
94
+ data_reference.decode() == data_test.decode()
95
+ ), f"{dataset_reference.name} is not equivalent"
96
+ else:
97
+ assert (
98
+ data_reference == data_test
99
+ ), f"{dataset_reference.name} is not equivalent"
100
+ logger.debug(f"{dataset_reference.name} are equivalent")
101
+
102
+ if isinstance(reference_group, h5py.Dataset) and isinstance(
103
+ test_group, h5py.Dataset
104
+ ):
105
+ check_equivalence_datasets(
106
+ dataset_test=test_group,
107
+ dataset_reference=reference_group,
108
+ )
109
+ elif isinstance(reference_group, h5py.Group):
110
+ reference_group.visititems(func=check_equivalence)
111
+
112
+
113
+ def check_h5groups_common(
114
+ filename_reference: str,
115
+ filename_test: str,
116
+ detector_name: str = "eiger2",
117
+ ):
118
+
119
+ check_h5groups_equivalent(
120
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/TFG/&slice=0,2",
121
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/TFG/",
122
+ )
123
+ check_h5groups_equivalent(
124
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/parameters/&slice=0,2",
125
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/parameters/",
126
+ )
127
+ check_h5groups_equivalent(
128
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/{detector_name}",
129
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/{detector_name}",
130
+ )
131
+ check_h5groups_equivalent(
132
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/MCS/ExposureTime/&slice=0,2",
133
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/MCS/ExposureTime",
134
+ )
135
+ check_h5groups_equivalent(
136
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/MCS/HS32C/&slice=0,2",
137
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/MCS/HS32C",
138
+ )
139
+ check_h5groups_equivalent(
140
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/MCS/HS32F",
141
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/MCS/HS32F",
142
+ )
143
+ check_h5groups_equivalent(
144
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/MCS/HS32N",
145
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/MCS/HS32N",
146
+ )
147
+ check_h5groups_equivalent(
148
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/MCS/HS32V/&slice=0,2",
149
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/MCS/HS32V",
150
+ )
151
+ check_h5groups_equivalent(
152
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/MCS/HS32Z",
153
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/MCS/HS32Z",
154
+ )
155
+ check_h5groups_equivalent(
156
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/MCS/Intensity0ShutCor/&slice=0,2",
157
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/MCS/Intensity0ShutCor",
158
+ )
159
+ check_h5groups_equivalent(
160
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/MCS/Intensity0UnCor/&slice=0,2",
161
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/MCS/Intensity0UnCor",
162
+ )
163
+ check_h5groups_equivalent(
164
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/MCS/Intensity1ShutCor/&slice=0,2",
165
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/MCS/Intensity1ShutCor",
166
+ )
167
+ check_h5groups_equivalent(
168
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/MCS/Intensity1UnCor/&slice=0,2",
169
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/MCS/Intensity1UnCor",
170
+ )
171
+ check_h5groups_equivalent(
172
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/MCS/raw/&slice=0,2",
173
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/MCS/raw/&slice=0,2",
174
+ )
175
+ check_h5groups_equivalent(
176
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/MCS/interpreted/&slice=0,2",
177
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/MCS/interpreted",
178
+ )
179
+ for grp in (
180
+ "HSI0",
181
+ "HSI1",
182
+ "HSI0Factor",
183
+ "HSI1Factor",
184
+ "HSTime",
185
+ "ShutterClosingTime",
186
+ "ShutterOpeningTime",
187
+ ):
188
+ check_h5groups_equivalent(
189
+ url_reference=f"silx://{filename_reference}?path=/entry_0000/PyFAI/MCS/{grp}",
190
+ url_test=f"silx://{filename_test}?path=/entry_0000/PyFAI/MCS/{grp}",
191
+ )
192
+
193
+
194
+ def check_allclose(
195
+ url_reference,
196
+ url_test,
197
+ ):
198
+ dataurl_reference = DataUrl(url_reference)
199
+ dataurl_test = DataUrl(url_test)
200
+ with h5py.File(dataurl_reference.file_path(), "r") as file_reference:
201
+ with h5py.File(dataurl_test.file_path(), "r") as file_tested:
202
+ data_reference = file_reference[dataurl_reference.data_path()][()]
203
+ data_test = file_tested[dataurl_test.data_path()][()]
204
+ if dataurl_reference.data_slice():
205
+ data_reference = data_reference[
206
+ dataurl_reference.data_slice()[0] : dataurl_reference.data_slice()[
207
+ 1
208
+ ]
209
+ ]
210
+ if dataurl_test.data_slice():
211
+ data_test = data_test[
212
+ dataurl_test.data_slice()[0] : dataurl_test.data_slice()[1]
213
+ ]
214
+
215
+ assert numpy.allclose(data_reference, data_test, equal_nan=True)
216
+
217
+
218
+ # def allclose_data_sigma(
219
+ # filename_processed_reference,
220
+ # filename_processed_test,
221
+ # range_index_read,
222
+ # processing_type,
223
+ # # atol=1e-6,
224
+ # ):
225
+ # path_to_data_signal_processed = f"entry_0000/PyFAI/result_{processing_type}/data"
226
+ # path_to_data_sigma_processed = (
227
+ # f"entry_0000/PyFAI/result_{processing_type}/data_errors"
228
+ # )
229
+ # with h5py.File(filename_processed_reference, "r") as file_reference:
230
+ # with h5py.File(filename_processed_test, "r") as file_tested:
231
+ # data_reference = file_reference[path_to_data_signal_processed][
232
+ # range_index_read[0] : range_index_read[1]
233
+ # ]
234
+ # data_errors_reference = file_reference[path_to_data_sigma_processed][
235
+ # range_index_read[0] : range_index_read[1]
236
+ # ]
237
+
238
+ # data_processed = file_tested[path_to_data_signal_processed][:]
239
+ # data_errors_processed = file_tested[path_to_data_sigma_processed][:]
240
+
241
+ # assert numpy.allclose(data_reference, data_processed, equal_nan=True)
242
+ # assert numpy.allclose(
243
+ # data_errors_reference, data_errors_processed, equal_nan=True
244
+ # )
245
+
246
+
247
+ # def h5py_equivalence(
248
+ # filename_processed_reference,
249
+ # filename_processed_test,
250
+ # path_to_parameters="/entry_0000/PyFAI/parameters",
251
+ # path_to_mcs="/entry_0000/PyFAI/MCS",
252
+ # path_to_tfg="/entry_0000/PyFAI/TFG",
253
+ # nb_frames=None,
254
+ # ):
255
+ # """
256
+ # Compare two HDF5 files for equivalence.
257
+ # """
258
+ # # if task is not None:
259
+ # # file_reference = task.filename_metadata
260
+ # # file_output = task.processing_filename
261
+ # # path_to_parameters = task.path_to_parameters
262
+ # # path_to_mcs = task.path_to_mcs
263
+ # # path_to_tfg = task.path_to_tfg
264
+ # # nb_frames = task.range_index_read[1] - task.range_index_read[0]
265
+
266
+ # def check_existence(name):
267
+ # SKIP_GROUPS = [
268
+ # "entry_0000/PyFAI/result_",
269
+ # "entry_0000/PyFAI/MCS/raw",
270
+ # "entry_0000/PyFAI/MCS/interpreted",
271
+ # # "entry_0000/PyFAI/MCS/HS32C",
272
+ # "TitleExtension",
273
+ # "entry_0000/ewoks",
274
+ # "entry_0000/pyFAI/benchmark",
275
+ # ]
276
+ # for gr in SKIP_GROUPS:
277
+ # if gr in name:
278
+ # return
279
+ # if "ave_limits" in name:
280
+ # return
281
+ # if name not in fproc:
282
+ # assert False, f"Object {name} not found in processed file"
283
+
284
+ # with h5py.File(filename_processed_reference, "r") as fref:
285
+ # with h5py.File(filename_processed_test, "r") as fproc:
286
+ # # Traverse all objects (groups and datasets) in the reference file
287
+ # fref.visit(check_existence)
288
+
289
+ # parameters_group_equivalence(
290
+ # file_reference=filename_processed_reference,
291
+ # file_output=filename_processed_test,
292
+ # path_to_parameters=path_to_parameters,
293
+ # )
294
+
295
+ # MCS_group_equivalence(
296
+ # file_reference=filename_processed_reference,
297
+ # file_output=filename_processed_test,
298
+ # path_to_mcs=path_to_mcs,
299
+ # nb_frames=nb_frames,
300
+ # )
301
+
302
+ # interpreted_group_equivalence(
303
+ # file_reference=filename_processed_reference,
304
+ # file_output=filename_processed_test,
305
+ # path_to_interpreted=f"{path_to_mcs}/interpreted",
306
+ # )
307
+
308
+ # raw_group_equivalence(
309
+ # file_reference=filename_processed_reference,
310
+ # file_output=filename_processed_test,
311
+ # path_to_raw=f"{path_to_mcs}/raw",
312
+ # )
313
+
314
+ # subscan1_group_equivalence(
315
+ # file_reference=filename_processed_reference,
316
+ # file_output=filename_processed_test,
317
+ # path_to_subscan1=f"{path_to_mcs}/raw/subscan_1",
318
+ # )
319
+
320
+ # tfg_group_equivalence(
321
+ # file_reference=filename_processed_reference,
322
+ # file_output=filename_processed_test,
323
+ # path_to_tfg=path_to_tfg,
324
+ # )
325
+
326
+
327
+ # def parameters_group_equivalence(
328
+ # file_reference,
329
+ # file_output,
330
+ # path_to_parameters,
331
+ # ):
332
+ # """
333
+ # Compare two HDF5 groups for equivalence.
334
+ # """
335
+ # with h5py.File(file_reference, "r") as fref:
336
+ # with h5py.File(file_output, "r") as fproc:
337
+ # for key in fref[path_to_parameters]:
338
+ # if key == "TitleExtension":
339
+ # continue
340
+ # assert key in fproc[path_to_parameters].keys()
341
+ # value_reference = fref[path_to_parameters][key][()]
342
+ # value_processed = fproc[path_to_parameters][key][()]
343
+ # assert value_reference == value_processed
344
+
345
+
346
+ # def MCS_group_equivalence(
347
+ # file_reference,
348
+ # file_output,
349
+ # path_to_mcs,
350
+ # nb_frames,
351
+ # ):
352
+ # """
353
+ # Compare all datasets in the MCS group of two HDF5 files for equivalence.
354
+ # """
355
+ # with h5py.File(file_reference, "r") as fref:
356
+ # with h5py.File(file_output, "r") as fproc:
357
+ # for key in fref[path_to_mcs]:
358
+ # if isinstance(fref[path_to_mcs][key], h5py.Group):
359
+ # continue
360
+ # assert key in fproc[path_to_mcs].keys()
361
+
362
+ # value_processed = fproc[path_to_mcs][key][()]
363
+
364
+ # if isinstance(value_processed, bytes):
365
+ # assert value_processed == fref[path_to_mcs][key][()]
366
+ # continue
367
+
368
+ # if value_processed.ndim > 0 and len(value_processed) == nb_frames:
369
+ # if fref[path_to_mcs][key].ndim == 2:
370
+ # value_reference = fref[path_to_mcs][key][
371
+ # 0 : len(value_processed), :
372
+ # ]
373
+ # elif fref[path_to_mcs][key].ndim == 1:
374
+ # value_reference = fref[path_to_mcs][key][
375
+ # 0 : len(value_processed)
376
+ # ]
377
+ # else:
378
+ # value_reference = numpy.array(fref[path_to_mcs][key][()])
379
+
380
+ # if value_processed.dtype == "O":
381
+ # assert numpy.array_equal(
382
+ # value_processed, value_reference
383
+ # ), f"Dataset {key} is not equal in the two files"
384
+ # else:
385
+ # assert numpy.array_equal(
386
+ # value_processed, value_reference, equal_nan=True
387
+ # ), f"Dataset {key} is not equal in the two files"
388
+
389
+
390
+ # def interpreted_group_equivalence(
391
+ # file_reference,
392
+ # file_output,
393
+ # path_to_interpreted,
394
+ # ):
395
+ # """
396
+ # Compare all datasets in the interpreted group of two HDF5 files for equivalence.
397
+ # """
398
+ # with h5py.File(file_reference, "r") as fref:
399
+ # with h5py.File(file_output, "r") as fproc:
400
+ # for key in fproc[path_to_interpreted]:
401
+ # value_processed = fproc[path_to_interpreted][key][()]
402
+ # value_reference = fref[path_to_interpreted][key][
403
+ # 0 : len(value_processed)
404
+ # ]
405
+ # assert numpy.array_equal(
406
+ # value_processed, value_reference, equal_nan=True
407
+ # ), f"Dataset {key} is not equal in the two files"
408
+
409
+
410
+ # def raw_group_equivalence(
411
+ # file_reference,
412
+ # file_output,
413
+ # path_to_raw,
414
+ # ):
415
+ # """
416
+ # Compare all datasets in the raw group of two HDF5 files for equivalence.
417
+ # """
418
+ # with h5py.File(file_reference, "r") as fref:
419
+ # with h5py.File(file_output, "r") as fproc:
420
+ # for key in fref[path_to_raw]:
421
+ # if isinstance(fref[path_to_raw][key], h5py.Group):
422
+ # continue
423
+
424
+ # value_processed = fproc[path_to_raw][key][()]
425
+ # value_reference = fref[path_to_raw][key][0 : len(value_processed)]
426
+ # assert numpy.array_equal(
427
+ # value_processed, value_reference, equal_nan=True
428
+ # ), f"Dataset {key} is not equal in the two files"
429
+
430
+
431
+ # def subscan1_group_equivalence(
432
+ # file_reference,
433
+ # file_output,
434
+ # path_to_subscan1,
435
+ # ):
436
+ # """
437
+ # Compare all datasets in the subscan1 group of two HDF5 files for equivalence.
438
+ # """
439
+ # with h5py.File(file_reference, "r") as fref:
440
+ # with h5py.File(file_output, "r") as fproc:
441
+ # for key in fref[path_to_subscan1]:
442
+ # value_processed = fproc[path_to_subscan1][key][()]
443
+ # value_reference = fref[path_to_subscan1][key][0 : len(value_processed)]
444
+ # assert numpy.array_equal(
445
+ # value_processed, value_reference, equal_nan=True
446
+ # ), f"Dataset {key} is not equal in the two files"
447
+
448
+
449
+ # def tfg_group_equivalence(
450
+ # file_reference,
451
+ # file_output,
452
+ # path_to_tfg,
453
+ # ):
454
+ # """
455
+ # Compare all datasets in the tfg group of two HDF5 files for equivalence.
456
+ # """
457
+ # with h5py.File(file_reference, "r") as fref:
458
+ # with h5py.File(file_output, "r") as fproc:
459
+ # for key in fref[path_to_tfg]:
460
+ # value_processed = fproc[path_to_tfg][key][()]
461
+ # if isinstance(value_processed, bytes):
462
+ # assert value_processed == fref[path_to_tfg][key][()]
463
+ # continue
464
+
465
+ # value_reference = fref[path_to_tfg][key][0 : len(value_processed)]
466
+ # assert numpy.array_equal(
467
+ # value_processed, value_reference, equal_nan=True
468
+ # ), f"Dataset {key} is not equal in the two files"
469
+
470
+
471
+ # def check_result(
472
+ # reference_filename,
473
+ # output_filename,
474
+ # range_index_read,
475
+ # processing_type,
476
+ # **kwargs,
477
+ # ):
478
+ # path_to_data_signal = f"entry_0000/PyFAI/result_{processing_type}/data"
479
+ # with h5py.File(reference_filename, "r") as file_reference:
480
+ # with h5py.File(output_filename, "r") as file_processed:
481
+ # dset_signal_reference = file_reference[path_to_data_signal]
482
+
483
+ # if dset_signal_reference.ndim == 3:
484
+ # data_reference = dset_signal_reference[
485
+ # range_index_read[0] : range_index_read[1], :, :
486
+ # ]
487
+
488
+ # elif dset_signal_reference.ndim == 2:
489
+ # data_reference = dset_signal_reference[
490
+ # range_index_read[0] : range_index_read[1], :
491
+ # ]
492
+
493
+ # data_processed = file_processed[path_to_data_signal][:]
494
+ # assert numpy.allclose(
495
+ # data_reference, data_processed, equal_nan=True
496
+ # ), f"Data signal is not equal: {data_reference.mean()} != {data_processed.mean()}"
497
+
498
+
499
+ def execute_ewoks(
500
+ graph,
501
+ inputs,
502
+ engine="ppf",
503
+ pool_type="thread",
504
+ ):
505
+ """
506
+ Execute a graph with the given inputs and return the result.
507
+ """
508
+ result = execute_graph(
509
+ graph=graph,
510
+ inputs=inputs,
511
+ engine=engine,
512
+ pool_type=pool_type,
513
+ )
514
+ return result
@@ -0,0 +1,22 @@
1
+ #
2
+ from pathlib import Path
3
+
4
+ from ..resources import TEMPLATE_SAXS
5
+
6
+ AVAILABLE_TEMPLATES = {
7
+ "saxs": {
8
+ "path": str(TEMPLATE_SAXS),
9
+ "usage": "Loop SAXS integration",
10
+ "directory": Path.home() / "ewoksid02_templates",
11
+ "future_path": Path.home()
12
+ / "ewoksid02_templates"
13
+ / "ewoksid02_template_saxs.yaml",
14
+ },
15
+ }
16
+
17
+
18
+ TEMPLATE_MESSAGE = "Available templates:\n"
19
+ for key, value in AVAILABLE_TEMPLATES.items():
20
+ TEMPLATE_MESSAGE += (
21
+ f"\t* {key}: {value['usage']}\n\t Copied to: {value['future_path']}\n"
22
+ )