ewoksid02 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. ewoksid02/__init__.py +0 -0
  2. ewoksid02/ocl/__init__.py +0 -0
  3. ewoksid02/resources/__init__.py +8 -0
  4. ewoksid02/resources/saxs_loop.json +96 -0
  5. ewoksid02/resources/template_saxs.yaml +37 -0
  6. ewoksid02/scripts/__init__.py +0 -0
  7. ewoksid02/scripts/__main__.py +70 -0
  8. ewoksid02/scripts/parsers.py +224 -0
  9. ewoksid02/scripts/saxs/__init__.py +0 -0
  10. ewoksid02/scripts/saxs/main.py +255 -0
  11. ewoksid02/scripts/saxs/slurm_python_post_script.py +3 -0
  12. ewoksid02/scripts/saxs/slurm_python_pre_script.py +5 -0
  13. ewoksid02/scripts/utils.py +21 -0
  14. ewoksid02/scripts/xpcs/__init__.py +0 -0
  15. ewoksid02/scripts/xpcs/__main__.py +3 -0
  16. ewoksid02/tasks/__init__.py +7 -0
  17. ewoksid02/tasks/averagetask.py +179 -0
  18. ewoksid02/tasks/azimuthaltask.py +272 -0
  19. ewoksid02/tasks/cavingtask.py +170 -0
  20. ewoksid02/tasks/dahuprocessingtask.py +71 -0
  21. ewoksid02/tasks/end.py +35 -0
  22. ewoksid02/tasks/id02processingtask.py +2582 -0
  23. ewoksid02/tasks/looptask.py +672 -0
  24. ewoksid02/tasks/metadatatask.py +879 -0
  25. ewoksid02/tasks/normalizationtask.py +204 -0
  26. ewoksid02/tasks/scalerstask.py +46 -0
  27. ewoksid02/tasks/secondaryscatteringtask.py +159 -0
  28. ewoksid02/tasks/sumtask.py +45 -0
  29. ewoksid02/tests/__init__.py +3 -0
  30. ewoksid02/tests/conftest.py +639 -0
  31. ewoksid02/tests/debug.py +64 -0
  32. ewoksid02/tests/test_2scat_node.py +119 -0
  33. ewoksid02/tests/test_ave_node.py +106 -0
  34. ewoksid02/tests/test_azim_node.py +89 -0
  35. ewoksid02/tests/test_cave_node.py +118 -0
  36. ewoksid02/tests/test_norm_node.py +190 -0
  37. ewoksid02/tests/test_saxs.py +69 -0
  38. ewoksid02/tests/test_sumtask.py +10 -0
  39. ewoksid02/tests/utils.py +514 -0
  40. ewoksid02/utils/__init__.py +22 -0
  41. ewoksid02/utils/average.py +158 -0
  42. ewoksid02/utils/blissdata.py +1157 -0
  43. ewoksid02/utils/caving.py +851 -0
  44. ewoksid02/utils/cupyutils.py +42 -0
  45. ewoksid02/utils/io.py +722 -0
  46. ewoksid02/utils/normalization.py +804 -0
  47. ewoksid02/utils/pyfai.py +424 -0
  48. ewoksid02/utils/secondaryscattering.py +597 -0
  49. ewoksid02-0.1.0.dist-info/METADATA +76 -0
  50. ewoksid02-0.1.0.dist-info/RECORD +54 -0
  51. ewoksid02-0.1.0.dist-info/WHEEL +5 -0
  52. ewoksid02-0.1.0.dist-info/entry_points.txt +5 -0
  53. ewoksid02-0.1.0.dist-info/licenses/LICENSE.md +20 -0
  54. ewoksid02-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,597 @@
1
+ import time
2
+ from typing import Optional, Tuple
3
+
4
+ import numexpr
5
+ import numpy
6
+ from scipy.signal import convolve as oaconvolve_numpy
7
+
8
+ from .io import get_persistent_array_window_wagon, get_persistent_array_mask
9
+
10
+ try:
11
+ import cupy
12
+ from cupyx.scipy.signal import oaconvolve as oaconvolve_cupy
13
+ from .cupyutils import log_allocated_gpu_memory
14
+ except ImportError:
15
+ CUPY_AVAILABLE = False
16
+ CUPY_MEM_POOL = None
17
+ cupy = numpy
18
+ else:
19
+ CUPY_AVAILABLE = True
20
+ CUPY_MEM_POOL = cupy.get_default_memory_pool()
21
+ import logging
22
+
23
+ from .caving import (
24
+ _process_data_caving_cupy,
25
+ process_data_caving,
26
+ _mask_caving,
27
+ )
28
+
29
+ logger = logging.getLogger(__name__)
30
+ logger.setLevel(logging.INFO)
31
+
32
+
33
+ def shift_window(
34
+ array_window: numpy.ndarray,
35
+ cx: int,
36
+ cy: int,
37
+ ) -> Tuple[numpy.ndarray, numpy.ndarray]:
38
+ """
39
+ Shift the window data to align with the specified center coordinates and apply a mask.
40
+
41
+ Inputs:
42
+ - array_window (numpy.ndarray): The input window data to be shifted.
43
+ - cx (int): X-coordinate of the direct beam.
44
+ - cy (int): Y-coordinate of the direct beam.
45
+ Outputs:
46
+ Tuple[numpy.ndarray, numpy.ndarray]: The shifted window data and the mask.
47
+ """
48
+ y_size, x_size = array_window.shape
49
+ y, x = numpy.meshgrid(numpy.arange(y_size), numpy.arange(x_size), indexing="ij")
50
+
51
+ y_center_mask, x_center_mask = numpy.unravel_index(
52
+ numpy.argmax(array_window), shape=array_window.shape
53
+ )
54
+ deltax = int(cx) - x_center_mask
55
+ deltay = int(cy) - y_center_mask
56
+
57
+ x_clipped = numpy.clip(x - deltax, 0, x_size - 5)
58
+ y_clipped = numpy.clip(y - deltay, 0, y_size - 5)
59
+ window_shifted = array_window[y_clipped, x_clipped]
60
+
61
+ x_shifted = x - deltax
62
+ y_shifted = y - deltay
63
+
64
+ mask_x = numpy.logical_and(0.0 < x_shifted, x_shifted < x.max())
65
+ mask_y = numpy.logical_and(0.0 < y_shifted, y_shifted < y.max())
66
+ mask = mask_x & mask_y
67
+
68
+ # Shifting and clipping mask are ok (in 4 shift directions)
69
+ return window_shifted, ~mask
70
+
71
+
72
+ def _process_dataset_2scat_numpy(
73
+ dataset_signal: numpy.ndarray, # 3dim
74
+ array_window: numpy.ndarray, # Original, non-shifted
75
+ Center_1: int,
76
+ Center_2: int,
77
+ WindowRoiSize: int = 120,
78
+ Dummy: Optional[int] = -10,
79
+ dataset_variance: Optional[numpy.ndarray] = None,
80
+ clip_data: bool = True,
81
+ use_numexpr: bool = False,
82
+ pre_caving: bool = False,
83
+ log: bool = False,
84
+ **kwargs: Optional[dict],
85
+ ) -> Tuple[Optional[numpy.ndarray], Optional[numpy.ndarray]]:
86
+ if log:
87
+ logger.info("Using numpy for secondary scattering correction")
88
+ t0_ = time.perf_counter()
89
+ Center_1 = int(Center_1)
90
+ Center_2 = int(Center_2)
91
+
92
+ if pre_caving:
93
+ params_caving_numpy = {
94
+ "Center_1": Center_1,
95
+ "Center_2": Center_2,
96
+ "Dummy": Dummy,
97
+ "algorithm": "numpy",
98
+ "return_mask": False,
99
+ "log": log,
100
+ **kwargs,
101
+ }
102
+ dataset_signal = process_data_caving(
103
+ data=dataset_signal,
104
+ **params_caving_numpy,
105
+ )
106
+ if dataset_variance is not None:
107
+ dataset_variance = process_data_caving(
108
+ data=dataset_variance,
109
+ **params_caving_numpy,
110
+ )
111
+
112
+ # 1) Slice the original data around the center, this will be the convolution kernel
113
+ WindowRoiSize = int(WindowRoiSize)
114
+ t0 = time.perf_counter()
115
+ subdataset_signal = dataset_signal[
116
+ :,
117
+ Center_2 - WindowRoiSize : Center_2 + WindowRoiSize,
118
+ Center_1 - WindowRoiSize : Center_1 + WindowRoiSize,
119
+ ].copy()
120
+ t1 = time.perf_counter()
121
+
122
+ # 2) Cover the dummy values in the subdataset (dummy values jeopardize the convolution)
123
+ numpy.copyto(subdataset_signal, 0.0, where=subdataset_signal == Dummy)
124
+ t2 = time.perf_counter()
125
+
126
+ # 3) Shift the window
127
+ array_window_shifted, mask_clip = shift_window(
128
+ array_window=array_window,
129
+ cx=Center_1,
130
+ cy=Center_2,
131
+ )
132
+ t3 = time.perf_counter()
133
+
134
+ # 3) Perform the convolution all across the dataset (3-dimensional)
135
+ signal_2scat = numpy.array(
136
+ [
137
+ oaconvolve_numpy(array_window_shifted, subdata, mode="same")
138
+ for subdata in subdataset_signal
139
+ ]
140
+ )
141
+
142
+ t4 = time.perf_counter()
143
+
144
+ # 4) Calculate the corrected signal, variance and sigma
145
+ if use_numexpr:
146
+ dataset_signal_corrected = numexpr.evaluate(
147
+ "where(dataset_signal, Dummy, dataset_signal - signal_2scat)"
148
+ )
149
+ if dataset_variance is not None:
150
+ dataset_variance_corrected = numexpr.evaluate(
151
+ "where(dataset_variance, Dummy, dataset_variance + signal_2scat + 0.0)"
152
+ )
153
+ dataset_sigma_corrected = numexpr.evaluate(
154
+ "where(dataset_variance_corrected, Dummy, sqrt(dataset_variance_corrected))"
155
+ )
156
+ else:
157
+ dataset_variance_corrected = None
158
+ dataset_sigma_corrected = None
159
+ else:
160
+ dataset_signal_corrected = numpy.where(
161
+ dataset_signal == Dummy, dataset_signal, dataset_signal - signal_2scat
162
+ )
163
+
164
+ if dataset_variance is not None:
165
+ dataset_variance_corrected = numpy.where(
166
+ dataset_variance == Dummy,
167
+ dataset_variance,
168
+ dataset_variance + signal_2scat + 0.0,
169
+ )
170
+ dataset_sigma_corrected = numpy.where(
171
+ dataset_variance == Dummy,
172
+ dataset_variance,
173
+ numpy.sqrt(dataset_variance_corrected),
174
+ )
175
+ else:
176
+ dataset_variance_corrected = None
177
+ dataset_sigma_corrected = None
178
+ t5 = time.perf_counter()
179
+
180
+ # 5) Clip the data that could not be corrected
181
+ if clip_data:
182
+ if use_numexpr:
183
+ dataset_signal_corrected = numexpr.evaluate(
184
+ "where(mask_clip, Dummy, dataset_signal_corrected)"
185
+ )
186
+
187
+ if dataset_variance_corrected is not None:
188
+ dataset_variance_corrected = numexpr.evaluate(
189
+ "where(mask_clip, Dummy, dataset_variance_corrected)"
190
+ )
191
+ if dataset_sigma_corrected is not None:
192
+ dataset_sigma_corrected = numexpr.evaluate(
193
+ "where(mask_clip, Dummy, dataset_sigma_corrected)"
194
+ )
195
+ else:
196
+ numpy.copyto(dataset_signal_corrected, Dummy, where=mask_clip)
197
+ if dataset_variance_corrected is not None:
198
+ numpy.copyto(dataset_variance_corrected, Dummy, where=mask_clip)
199
+ if dataset_sigma_corrected is not None:
200
+ numpy.copyto(dataset_sigma_corrected, Dummy, where=mask_clip)
201
+ t6 = time.perf_counter()
202
+
203
+ if log:
204
+ nb_frames = len(dataset_signal)
205
+ logger.info(
206
+ f" 1) Subdata slicing per frame: {(t1 - t0) / nb_frames * 1000:.4f} ms"
207
+ )
208
+ logger.info(
209
+ f" 2) Mask subdata per frame shifting: {(t2 - t1) / nb_frames * 1000:.4f} ms"
210
+ )
211
+ logger.info(f" 3) Window shifting: {(t3 - t2) * 1000:.4f} ms")
212
+ logger.info(
213
+ f" 4) Convolution per frame: {(t4 - t3) / nb_frames * 1000:.4f} ms"
214
+ )
215
+ logger.info(
216
+ f" 5) Correction calculation per frame: {(t5 - t4) / nb_frames * 1000:.4f} ms"
217
+ )
218
+ logger.info(
219
+ f" 6) Data clipping per frame: {(t6 - t5) / nb_frames * 1000:.4f} ms"
220
+ )
221
+ logger.info(
222
+ f" 7) Total 2scat per frame: {(t6 - t0) / nb_frames * 1000:.4f} ms"
223
+ )
224
+ logger.info(
225
+ f"Total time 2scat+cave per frame: {(t6 - t0_) / nb_frames*1000:.4f} ms"
226
+ )
227
+
228
+ return (
229
+ dataset_signal_corrected,
230
+ dataset_variance_corrected,
231
+ dataset_sigma_corrected,
232
+ signal_2scat,
233
+ )
234
+
235
+
236
+ def _process_dataset_2scat_cupy(
237
+ dataset_signal: numpy.ndarray, # 3dim
238
+ array_window: numpy.ndarray, # Original, non-shifted
239
+ Center_1: int,
240
+ Center_2: int,
241
+ WindowRoiSize: int = 120,
242
+ Dummy: Optional[int] = -10,
243
+ dataset_variance: Optional[numpy.ndarray] = None,
244
+ clip_data: bool = True,
245
+ pre_caving: bool = False,
246
+ filename_mask_static: str = None,
247
+ filename_mask_reference: str = None,
248
+ flip_caving: bool = False,
249
+ flip_horizontally_preference: bool = True,
250
+ **kwargs: Optional[dict],
251
+ ) -> Tuple[Optional[numpy.ndarray], Optional[numpy.ndarray]]:
252
+ log_allocated_gpu_memory()
253
+ Center_1 = int(Center_1)
254
+ Center_2 = int(Center_2)
255
+ WindowRoiSize = int(WindowRoiSize)
256
+ if pre_caving:
257
+ data_shape = dataset_signal.shape[1:]
258
+ binning = kwargs.get("binning")
259
+
260
+ y_vector, x_vector = numpy.meshgrid(
261
+ numpy.arange(data_shape[0]),
262
+ numpy.arange(data_shape[1]),
263
+ indexing="ij",
264
+ sparse=True,
265
+ )
266
+ x_shifted = 2 * int(Center_1) - x_vector + 1
267
+ y_shifted = 2 * int(Center_2) - y_vector + 1
268
+
269
+ x_shifted_cupy = cupy.asarray(x_shifted)
270
+ y_shifted_cupy = cupy.asarray(y_shifted)
271
+ x_vector_cupy = cupy.asarray(x_vector)
272
+ y_vector_cupy = cupy.asarray(y_vector)
273
+
274
+ mask_static_cupy = None
275
+ if filename_mask_static:
276
+ mask_static_cupy = get_persistent_array_mask(
277
+ filename_mask=filename_mask_static,
278
+ data_signal_shape=data_shape,
279
+ binning=binning,
280
+ use_cupy=True,
281
+ )
282
+
283
+ mask_reference = None
284
+ if filename_mask_reference:
285
+ mask_reference = get_persistent_array_mask(
286
+ filename_mask=filename_mask_reference,
287
+ data_signal_shape=data_shape,
288
+ binning=binning,
289
+ use_cupy=False, # We don't use Cupy here
290
+ )
291
+
292
+ mask_pixels_available_centrosymmetric_cupy = _mask_caving(
293
+ data_shape,
294
+ Center_1,
295
+ Center_2,
296
+ mask_reference,
297
+ vertical_symmetry=True,
298
+ horizontal_symmetry=True,
299
+ use_cupy=True,
300
+ )
301
+
302
+ if flip_caving:
303
+ mask_pixels_available_horizontal_cupy = _mask_caving(
304
+ data_shape,
305
+ Center_1,
306
+ Center_2,
307
+ mask_reference,
308
+ vertical_symmetry=False,
309
+ horizontal_symmetry=True,
310
+ use_cupy=True,
311
+ )
312
+ mask_pixels_available_vertical_cupy = _mask_caving(
313
+ data_shape,
314
+ Center_1,
315
+ Center_2,
316
+ mask_reference,
317
+ vertical_symmetry=True,
318
+ horizontal_symmetry=False,
319
+ use_cupy=True,
320
+ )
321
+ else:
322
+ mask_pixels_available_horizontal_cupy = None
323
+ mask_pixels_available_vertical_cupy = None
324
+
325
+ params_caving_cupy = {
326
+ "mask_pixels_available_centrosymmetric_cupy": mask_pixels_available_centrosymmetric_cupy,
327
+ "x_shifted_cupy": x_shifted_cupy,
328
+ "y_shifted_cupy": y_shifted_cupy,
329
+ "Dummy": Dummy,
330
+ "mask_static_cupy": mask_static_cupy,
331
+ "flip_caving": flip_caving,
332
+ "flip_horizontally_preference": flip_horizontally_preference,
333
+ "mask_pixels_available_horizontal_cupy": mask_pixels_available_horizontal_cupy,
334
+ "mask_pixels_available_vertical_cupy": mask_pixels_available_vertical_cupy,
335
+ "x_vector_cupy": x_vector_cupy,
336
+ "y_vector_cupy": y_vector_cupy,
337
+ }
338
+
339
+ array_window_shifted, mask_clip = shift_window(
340
+ array_window=array_window,
341
+ cx=Center_1,
342
+ cy=Center_2,
343
+ )
344
+ params_2scat_cupy = {
345
+ "array_window_cupy": cupy.asarray(array_window_shifted),
346
+ "Center_1": Center_1,
347
+ "Center_2": Center_2,
348
+ "WindowRoiSize": WindowRoiSize,
349
+ "Dummy": Dummy,
350
+ "clip_data": clip_data,
351
+ "mask_clip_cupy": cupy.asarray(mask_clip),
352
+ }
353
+
354
+ dataset_signal_corrected = numpy.zeros_like(dataset_signal)
355
+ dataset_signal_2scat = numpy.zeros_like(dataset_signal)
356
+ dataset_variance_corrected = None
357
+ dataset_sigma_corrected = None
358
+ if dataset_variance is not None:
359
+ dataset_variance_corrected = numpy.zeros_like(dataset_variance)
360
+ dataset_sigma_corrected = numpy.zeros_like(dataset_variance)
361
+
362
+ for index_frame, data_signal in enumerate(dataset_signal):
363
+ data_signal_cupy = cupy.asarray(data_signal)
364
+ if dataset_variance is not None:
365
+ data_variance_cupy = cupy.asarray(dataset_variance[index_frame])
366
+ else:
367
+ data_variance_cupy = None
368
+
369
+ if pre_caving:
370
+ data_signal_cupy = _process_data_caving_cupy(
371
+ data_cupy=data_signal_cupy,
372
+ **params_caving_cupy,
373
+ )
374
+ if data_variance_cupy is not None:
375
+ data_variance_cupy = _process_data_caving_cupy(
376
+ data_cupy=data_variance_cupy,
377
+ **params_caving_cupy,
378
+ )
379
+
380
+ (
381
+ data_signal_corrected_cupy,
382
+ data_variance_corrected_cupy,
383
+ data_sigma_corrected_cupy,
384
+ signal_2scat_cupy,
385
+ ) = _process_data_2scat_cupy(
386
+ data_signal=data_signal_cupy,
387
+ data_variance=data_variance_cupy,
388
+ **params_2scat_cupy,
389
+ )
390
+
391
+ dataset_signal_corrected[index_frame] = data_signal_corrected_cupy.get()
392
+ dataset_signal_2scat[index_frame] = signal_2scat_cupy.get()
393
+ if data_variance_corrected_cupy is not None:
394
+ dataset_variance_corrected[index_frame] = data_variance_corrected_cupy.get()
395
+ if data_sigma_corrected_cupy is not None:
396
+ dataset_sigma_corrected[index_frame] = data_sigma_corrected_cupy.get()
397
+ return (
398
+ dataset_signal_corrected,
399
+ dataset_variance_corrected,
400
+ dataset_sigma_corrected,
401
+ dataset_signal_2scat,
402
+ )
403
+
404
+
405
+ def _process_data_2scat_cupy(
406
+ data_signal: numpy.ndarray,
407
+ array_window_cupy: cupy.ndarray, # already shifted window
408
+ Center_1: int,
409
+ Center_2: int,
410
+ WindowRoiSize: int = 120,
411
+ Dummy: Optional[int] = -10,
412
+ data_variance: Optional[numpy.ndarray] = None,
413
+ clip_data: bool = True,
414
+ mask_clip_cupy: Optional[cupy.ndarray] = None,
415
+ **kwargs: Optional[dict],
416
+ ) -> Tuple[Optional[numpy.ndarray], Optional[numpy.ndarray]]:
417
+ data_signal_cupy = None
418
+ if isinstance(data_signal, cupy.ndarray):
419
+ data_signal_cupy = data_signal
420
+ elif isinstance(data_signal, numpy.ndarray):
421
+ data_signal_cupy = cupy.asarray(data_signal)
422
+
423
+ data_variance_cupy = None
424
+ if data_variance is not None:
425
+ if isinstance(data_variance, cupy.ndarray):
426
+ data_variance_cupy = data_variance
427
+ elif isinstance(data_variance, numpy.ndarray):
428
+ data_variance_cupy = cupy.asarray(data_variance)
429
+
430
+ # 1) Slice the original data around the center, this will be the convolution kernel
431
+ subdata_signal_cupy = data_signal_cupy[
432
+ Center_2 - WindowRoiSize : Center_2 + WindowRoiSize,
433
+ Center_1 - WindowRoiSize : Center_1 + WindowRoiSize,
434
+ ].copy()
435
+
436
+ # 2) Cover the dummy values in the subdataset (dummy values jeopardize the convolution)
437
+ cupy.copyto(subdata_signal_cupy, 0.0, where=subdata_signal_cupy == Dummy)
438
+
439
+ # 3) Perform the convolution
440
+ signal_2scat_cupy = oaconvolve_cupy(
441
+ array_window_cupy, subdata_signal_cupy, mode="same"
442
+ )
443
+
444
+ # 4) Calculate the corrected signal, variance and sigma
445
+ data_signal_corrected_cupy = cupy.where(
446
+ data_signal_cupy == Dummy,
447
+ data_signal_cupy,
448
+ data_signal_cupy - signal_2scat_cupy,
449
+ )
450
+ if data_variance_cupy is not None:
451
+ data_variance_corrected_cupy = cupy.where(
452
+ data_variance_cupy == Dummy,
453
+ data_variance_cupy,
454
+ data_variance_cupy + signal_2scat_cupy + 0.0,
455
+ )
456
+ data_sigma_corrected_cupy = cupy.where(
457
+ data_variance_cupy == Dummy,
458
+ data_variance_cupy,
459
+ cupy.sqrt(data_variance_corrected_cupy),
460
+ )
461
+
462
+ else:
463
+ data_variance_corrected_cupy = None
464
+ data_sigma_corrected_cupy = None
465
+
466
+ # 5) Clip the data that could not be corrected
467
+ if clip_data:
468
+ cupy.copyto(data_signal_corrected_cupy, Dummy, where=mask_clip_cupy)
469
+ if data_variance_corrected_cupy is not None:
470
+ cupy.copyto(data_variance_corrected_cupy, Dummy, where=mask_clip_cupy)
471
+ if data_sigma_corrected_cupy is not None:
472
+ cupy.copyto(data_sigma_corrected_cupy, Dummy, where=mask_clip_cupy)
473
+
474
+ return (
475
+ data_signal_corrected_cupy,
476
+ data_variance_corrected_cupy,
477
+ data_sigma_corrected_cupy,
478
+ signal_2scat_cupy,
479
+ )
480
+
481
+
482
+ def process_dataset_2scat(
483
+ dataset_signal: numpy.ndarray,
484
+ filename_window_wagon: str,
485
+ Center_1: float,
486
+ Center_2: float,
487
+ WindowRoiSize: int = 120,
488
+ Dummy: Optional[int] = -10,
489
+ dataset_variance: Optional[numpy.ndarray] = None,
490
+ algorithm: str = "numpy",
491
+ clip_data: bool = True,
492
+ pre_caving: bool = True,
493
+ filename_mask_static: str = None,
494
+ filename_mask_reference: str = None,
495
+ flip_caving: bool = False,
496
+ flip_horizontally_preference: bool = True,
497
+ **kwargs,
498
+ ) -> Tuple[Optional[numpy.ndarray], Optional[numpy.ndarray]]:
499
+ """
500
+ Calculate the secondary scattering correction for the given dataset.
501
+
502
+ Parameters:
503
+ dataset (numpy.ndarray): The input dataset to be corrected.
504
+ window_pattern (str): Path to the window pattern file.
505
+ WindowRoiSize (int): Distance to extract subdata for correction.
506
+ center_x (Optional[float], optional): X-coordinate of the center. Defaults to None.
507
+ center_y (Optional[float], optional): Y-coordinate of the center. Defaults to None.
508
+ dummy (int, optional): Dummy value for masked regions. Defaults to -10.
509
+ use_cupy (bool, optional): Whether to use CuPy for GPU acceleration. Defaults to True.
510
+
511
+ Returns:
512
+ Tuple[Optional[numpy.ndarray], Optional[numpy.ndarray]]: The corrected dataset and the secondary scattering.
513
+ """
514
+ dataset_signal_corrected = None
515
+ dataset_variance_corrected = None
516
+ dataset_sigma_corrected = None
517
+ dataset_2scat_correction = None
518
+
519
+ results = (
520
+ dataset_signal_corrected,
521
+ dataset_variance_corrected,
522
+ dataset_sigma_corrected,
523
+ dataset_2scat_correction,
524
+ )
525
+
526
+ if dataset_signal is None:
527
+ logger.error("Dataset is None. Sec. scattering correction cannot be performed")
528
+ return results
529
+
530
+ if dataset_signal.ndim not in (2, 3):
531
+ logger.error(
532
+ f"Dataset with shape {dataset_signal.shape} must be 2 or 3-dimensional"
533
+ )
534
+ return results
535
+
536
+ # Load the additional data
537
+ if filename_window_wagon is None:
538
+ logger.error(
539
+ "Window pattern data is None. Cannot perform secondary scattering correction"
540
+ )
541
+ return results
542
+
543
+ binning = kwargs.get("binning")
544
+ data_signal_shape = dataset_signal[0].shape
545
+ use_cupy = True if algorithm == "cupy" else False
546
+
547
+ array_window_wagon = get_persistent_array_window_wagon(
548
+ filename_window_wagon=filename_window_wagon,
549
+ data_signal_shape=data_signal_shape,
550
+ datatype=dataset_signal.dtype,
551
+ binning=binning,
552
+ use_cupy=False, # shift_window is a numpy method
553
+ )
554
+
555
+ if array_window_wagon is None:
556
+ logger.error(
557
+ f"{filename_window_wagon} could not be loaded. Cannot perform secondary scattering correction"
558
+ )
559
+ return results
560
+
561
+ if algorithm not in ALGORITHMS_AVAILABLE:
562
+ logger.warning(
563
+ f"Algorithm '{algorithm}' is not available. Using '{DEFAULT_ALGORITHM}' instead."
564
+ )
565
+ algorithm = DEFAULT_ALGORITHM
566
+ elif algorithm == "cupy" and not CUPY_AVAILABLE:
567
+ logger.warning(f"CuPy is not available. Using {DEFAULT_ALGORITHM} instead.")
568
+ algorithm = DEFAULT_ALGORITHM
569
+ use_cupy = True if algorithm == "cupy" else False
570
+
571
+ params_2scat = {
572
+ "dataset_signal": dataset_signal,
573
+ "dataset_variance": dataset_variance,
574
+ "Center_1": Center_1,
575
+ "Center_2": Center_2,
576
+ "array_window": array_window_wagon,
577
+ "WindowRoiSize": WindowRoiSize,
578
+ "Dummy": Dummy,
579
+ "clip_data": clip_data,
580
+ "pre_caving": pre_caving,
581
+ "filename_mask_static": filename_mask_static,
582
+ "filename_mask_reference": filename_mask_reference,
583
+ "flip_caving": flip_caving,
584
+ "flip_horizontally_preference": flip_horizontally_preference,
585
+ "use_cupy": use_cupy,
586
+ **kwargs,
587
+ }
588
+
589
+ results = ALGORITHMS_AVAILABLE[algorithm]["algorithm"](**params_2scat)
590
+ return results
591
+
592
+
593
+ ALGORITHMS_AVAILABLE = {
594
+ "numpy": {"algorithm": _process_dataset_2scat_numpy, "use_cupy": False},
595
+ "cupy": {"algorithm": _process_dataset_2scat_cupy, "use_cupy": True},
596
+ }
597
+ DEFAULT_ALGORITHM = "numpy"
@@ -0,0 +1,76 @@
1
+ Metadata-Version: 2.4
2
+ Name: ewoksid02
3
+ Version: 0.1.0
4
+ Summary: Data processing SAXS and XPCS workflows for ID02
5
+ Author-email: ESRF <edgar.gutierrez-fernandez@esrf.fr>
6
+ License: # MIT License
7
+
8
+ **Copyright (c) 2024 European Synchrotron Radiation Facility**
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
11
+ this software and associated documentation files (the "Software"), to deal in
12
+ the Software without restriction, including without limitation the rights to
13
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
14
+ the Software, and to permit persons to whom the Software is furnished to do so,
15
+ subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
22
+ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
23
+ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
24
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
25
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
+
27
+ Project-URL: Homepage, https://gitlab.esrf.fr/workflow/ewoksapps/ewoksid02/
28
+ Project-URL: Documentation, https://ewoksid02.readthedocs.io/
29
+ Project-URL: Repository, https://gitlab.esrf.fr/workflow/ewoksapps/ewoksid02/
30
+ Project-URL: Issues, https://gitlab.esrf.fr/workflow/ewoksapps/ewoksid02/issues
31
+ Project-URL: Changelog, https://gitlab.esrf.fr/workflow/ewoksapps/ewoksid02/-/blob/main/CHANGELOG.md
32
+ Keywords: ewoks,ID02,SAXS,XPCS
33
+ Classifier: Intended Audience :: Science/Research
34
+ Classifier: License :: OSI Approved :: MIT License
35
+ Classifier: Programming Language :: Python :: 3
36
+ Requires-Python: >=3.8
37
+ Description-Content-Type: text/markdown
38
+ License-File: LICENSE.md
39
+ Requires-Dist: ewoks
40
+ Requires-Dist: ewoksjob[blissworker]
41
+ Requires-Dist: ewoksjob[slurm]
42
+ Requires-Dist: ewoksppf
43
+ Requires-Dist: h5py
44
+ Requires-Dist: numpy<2
45
+ Requires-Dist: scipy
46
+ Requires-Dist: silx
47
+ Requires-Dist: pyfai
48
+ Requires-Dist: blissdata
49
+ Requires-Dist: numexpr!=2.8.6
50
+ Requires-Dist: psutil
51
+ Requires-Dist: pyyaml
52
+ Provides-Extra: test
53
+ Requires-Dist: pytest>=7; extra == "test"
54
+ Provides-Extra: dev
55
+ Requires-Dist: ewoksid02[test]; extra == "dev"
56
+ Requires-Dist: black>=25; extra == "dev"
57
+ Requires-Dist: flake8>=4; extra == "dev"
58
+ Provides-Extra: doc
59
+ Requires-Dist: ewoksid02[test]; extra == "doc"
60
+ Requires-Dist: sphinx>=4.5; extra == "doc"
61
+ Requires-Dist: sphinx-autodoc-typehints>=1.16; extra == "doc"
62
+ Requires-Dist: pydata-sphinx-theme; extra == "doc"
63
+ Requires-Dist: nbsphinx; extra == "doc"
64
+ Requires-Dist: ipython; extra == "doc"
65
+ Requires-Dist: ewokssphinx; extra == "doc"
66
+ Provides-Extra: cupy
67
+ Requires-Dist: cupy; extra == "cupy"
68
+ Dynamic: license-file
69
+
70
+ # ewoksid02
71
+
72
+ Data processing SAXS and XPCS workflows for ID02
73
+
74
+ ## Documentation
75
+
76
+ https://ewoksid02.readthedocs.io/