ewoksid02 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. ewoksid02/__init__.py +0 -0
  2. ewoksid02/ocl/__init__.py +0 -0
  3. ewoksid02/resources/__init__.py +8 -0
  4. ewoksid02/resources/saxs_loop.json +96 -0
  5. ewoksid02/resources/template_saxs.yaml +37 -0
  6. ewoksid02/scripts/__init__.py +0 -0
  7. ewoksid02/scripts/__main__.py +70 -0
  8. ewoksid02/scripts/parsers.py +224 -0
  9. ewoksid02/scripts/saxs/__init__.py +0 -0
  10. ewoksid02/scripts/saxs/main.py +255 -0
  11. ewoksid02/scripts/saxs/slurm_python_post_script.py +3 -0
  12. ewoksid02/scripts/saxs/slurm_python_pre_script.py +5 -0
  13. ewoksid02/scripts/utils.py +21 -0
  14. ewoksid02/scripts/xpcs/__init__.py +0 -0
  15. ewoksid02/scripts/xpcs/__main__.py +3 -0
  16. ewoksid02/tasks/__init__.py +7 -0
  17. ewoksid02/tasks/averagetask.py +179 -0
  18. ewoksid02/tasks/azimuthaltask.py +272 -0
  19. ewoksid02/tasks/cavingtask.py +170 -0
  20. ewoksid02/tasks/dahuprocessingtask.py +71 -0
  21. ewoksid02/tasks/end.py +35 -0
  22. ewoksid02/tasks/id02processingtask.py +2582 -0
  23. ewoksid02/tasks/looptask.py +672 -0
  24. ewoksid02/tasks/metadatatask.py +879 -0
  25. ewoksid02/tasks/normalizationtask.py +204 -0
  26. ewoksid02/tasks/scalerstask.py +46 -0
  27. ewoksid02/tasks/secondaryscatteringtask.py +159 -0
  28. ewoksid02/tasks/sumtask.py +45 -0
  29. ewoksid02/tests/__init__.py +3 -0
  30. ewoksid02/tests/conftest.py +639 -0
  31. ewoksid02/tests/debug.py +64 -0
  32. ewoksid02/tests/test_2scat_node.py +119 -0
  33. ewoksid02/tests/test_ave_node.py +106 -0
  34. ewoksid02/tests/test_azim_node.py +89 -0
  35. ewoksid02/tests/test_cave_node.py +118 -0
  36. ewoksid02/tests/test_norm_node.py +190 -0
  37. ewoksid02/tests/test_saxs.py +69 -0
  38. ewoksid02/tests/test_sumtask.py +10 -0
  39. ewoksid02/tests/utils.py +514 -0
  40. ewoksid02/utils/__init__.py +22 -0
  41. ewoksid02/utils/average.py +158 -0
  42. ewoksid02/utils/blissdata.py +1157 -0
  43. ewoksid02/utils/caving.py +851 -0
  44. ewoksid02/utils/cupyutils.py +42 -0
  45. ewoksid02/utils/io.py +722 -0
  46. ewoksid02/utils/normalization.py +804 -0
  47. ewoksid02/utils/pyfai.py +424 -0
  48. ewoksid02/utils/secondaryscattering.py +597 -0
  49. ewoksid02-0.1.0.dist-info/METADATA +76 -0
  50. ewoksid02-0.1.0.dist-info/RECORD +54 -0
  51. ewoksid02-0.1.0.dist-info/WHEEL +5 -0
  52. ewoksid02-0.1.0.dist-info/entry_points.txt +5 -0
  53. ewoksid02-0.1.0.dist-info/licenses/LICENSE.md +20 -0
  54. ewoksid02-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,804 @@
1
+ import logging
2
+ import time
3
+ from typing import Optional
4
+ from functools import lru_cache
5
+
6
+ import numexpr
7
+ import numpy
8
+ from pyFAI.version import MAJOR
9
+
10
+ if MAJOR >= 2025:
11
+ from pyFAI.integrator.azimuthal import AzimuthalIntegrator
12
+ else:
13
+ from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
14
+ try:
15
+ import cupy
16
+
17
+ from .cupyutils import log_allocated_gpu_memory
18
+ except ImportError:
19
+ cupy = None
20
+ CUPY_AVAILABLE = False
21
+ CUPY_MEM_POOL = None
22
+ else:
23
+ CUPY_AVAILABLE = True
24
+ CUPY_MEM_POOL = cupy.get_default_memory_pool()
25
+
26
+ from ewoksid02.utils.pyfai import _get_persistent_azimuthal_integrator
27
+
28
+ from .io import (
29
+ get_persistent_array_mask,
30
+ get_persistent_array_dark,
31
+ get_persistent_array_flat,
32
+ )
33
+
34
+ logger = logging.getLogger(__name__)
35
+ logger.setLevel(logging.INFO)
36
+
37
+
38
+ def _normalize_dataset_cupy(
39
+ dataset_signal: numpy.ndarray,
40
+ dataset_variance: Optional[numpy.ndarray] = None,
41
+ monitor_values: numpy.ndarray = None,
42
+ array_mask=None,
43
+ array_dark=None,
44
+ array_normalization=None,
45
+ dummy: float = -10,
46
+ datatype: str = "float32",
47
+ **kwargs,
48
+ ):
49
+ log_allocated_gpu_memory()
50
+ t1 = time.perf_counter()
51
+ dataset_normalized_signal = numpy.zeros_like(dataset_signal, dtype=datatype)
52
+ dataset_normalized_variance = numpy.zeros_like(dataset_signal, dtype=datatype)
53
+ dataset_normalized_sigma = numpy.zeros_like(dataset_signal, dtype=datatype)
54
+ # logger.info(f"Time to initialize numpy output arrays: {t2 - t1:.2f} seconds")
55
+
56
+ # One by one
57
+ nb_frames = len(dataset_signal)
58
+ if monitor_values is None:
59
+ monitor_values = numpy.ones(nb_frames, dtype=datatype)
60
+ elif isinstance(monitor_values, (int, float)):
61
+ monitor_values = numpy.full(nb_frames, monitor_values, dtype=datatype)
62
+
63
+ for index_frame, data_signal, monitor_value in zip(
64
+ range(nb_frames), dataset_signal, monitor_values
65
+ ):
66
+ data_signal_cu = cupy.asarray(data_signal, dtype=datatype)
67
+
68
+ # Remove dark from raw (not from norm)
69
+ if array_dark is not None:
70
+ data_signal_cu -= array_dark
71
+
72
+ if dataset_variance is not None:
73
+ data_variance_cu = cupy.asarray(
74
+ dataset_variance[index_frame], dtype=datatype
75
+ )
76
+ else:
77
+ data_variance_cu = None
78
+
79
+ # Get final normalization array
80
+ normalization_array_cu_frame = array_normalization * monitor_value
81
+
82
+ # Normalize the signal
83
+ data_signal_normalized_cu = cupy.where(
84
+ normalization_array_cu_frame == 0.0,
85
+ dummy,
86
+ data_signal_cu / normalization_array_cu_frame,
87
+ )
88
+ data_variance_normalized_cu = None
89
+ data_sigma_normalized_cu = None
90
+
91
+ # Normalize the variance and sigma
92
+ if data_variance_cu is not None:
93
+ data_variance_normalized_cu = cupy.where(
94
+ normalization_array_cu_frame == 0.0,
95
+ dummy,
96
+ data_variance_cu
97
+ / (normalization_array_cu_frame * normalization_array_cu_frame),
98
+ )
99
+ data_sigma_normalized_cu = cupy.where(
100
+ normalization_array_cu_frame == 0.0,
101
+ dummy,
102
+ cupy.sqrt(data_variance_cu) / cupy.abs(normalization_array_cu_frame),
103
+ )
104
+
105
+ # Mask all results
106
+ if array_mask is not None:
107
+ data_signal_normalized_cu = cupy.where(
108
+ array_mask, dummy, data_signal_normalized_cu
109
+ )
110
+ if data_variance_cu is not None:
111
+ data_variance_normalized_cu = cupy.where(
112
+ array_mask, dummy, data_variance_normalized_cu
113
+ )
114
+ data_sigma_normalized_cu = cupy.where(
115
+ array_mask, dummy, data_sigma_normalized_cu
116
+ )
117
+
118
+ # Allocate numpy arrays in place
119
+ dataset_normalized_signal[index_frame] = data_signal_normalized_cu.get()
120
+ if data_variance_normalized_cu is not None:
121
+ dataset_normalized_variance[index_frame] = data_variance_normalized_cu.get()
122
+ if data_sigma_normalized_cu is not None:
123
+ dataset_normalized_sigma[index_frame] = data_sigma_normalized_cu.get()
124
+
125
+ t3 = time.perf_counter()
126
+ logger.debug(f"Total time for normalization: {t3 - t1:.2f} seconds")
127
+ return (
128
+ dataset_normalized_signal,
129
+ dataset_normalized_variance,
130
+ dataset_normalized_sigma,
131
+ )
132
+
133
+
134
+ def _normalize_dataset_numpy(
135
+ dataset_signal: numpy.ndarray,
136
+ dataset_variance: Optional[numpy.ndarray] = None,
137
+ monitor_values: numpy.ndarray = None,
138
+ array_mask: numpy.ndarray = None,
139
+ array_dark: numpy.ndarray = None,
140
+ array_normalization: numpy.ndarray = None,
141
+ dummy: float = -10,
142
+ datatype: str = "float32",
143
+ **kwargs,
144
+ ):
145
+ dataset_signal_normalized = numpy.zeros_like(dataset_signal, dtype=datatype)
146
+ dataset_variance_normalized = numpy.zeros_like(dataset_signal, dtype=datatype)
147
+ dataset_sigma_normalized = numpy.zeros_like(dataset_signal, dtype=datatype)
148
+
149
+ nb_frames = len(dataset_signal)
150
+ if monitor_values is None:
151
+ monitor_values = numpy.ones(nb_frames, dtype=datatype)
152
+ elif isinstance(monitor_values, (int, float)):
153
+ monitor_values = numpy.full(nb_frames, monitor_values, dtype=datatype)
154
+ if len(monitor_values) != nb_frames:
155
+ monitor_values = monitor_values[0:nb_frames]
156
+
157
+ # Remove dark from raw (not from norm)
158
+ if array_dark is not None:
159
+ dataset_signal -= array_dark
160
+
161
+ # Get final normalization dataset with nb frames
162
+ dataset_normalization = (
163
+ numpy.broadcast_to(array_normalization, dataset_signal.shape).copy()
164
+ * monitor_values[:, None, None]
165
+ )
166
+
167
+ # Normalize the signal
168
+ dataset_signal_normalized = numpy.where(
169
+ dataset_normalization == 0.0,
170
+ dummy,
171
+ dataset_signal / dataset_normalization,
172
+ )
173
+
174
+ # Normalize the variance and sigma
175
+ if dataset_variance is not None:
176
+ dataset_variance_normalized = numpy.where(
177
+ dataset_normalization == 0.0,
178
+ dummy,
179
+ dataset_variance / (dataset_normalization * dataset_normalization),
180
+ )
181
+ dataset_sigma_normalized = numpy.where(
182
+ dataset_normalization == 0.0,
183
+ dummy,
184
+ numpy.sqrt(dataset_variance) / numpy.abs(dataset_normalization),
185
+ )
186
+ else:
187
+ dataset_variance_normalized = None
188
+ dataset_sigma_normalized = None
189
+
190
+ # Mask all results
191
+ if array_mask is not None:
192
+ dataset_signal_normalized = numpy.where(
193
+ array_mask, dummy, dataset_signal_normalized
194
+ )
195
+ if dataset_variance_normalized is not None:
196
+ dataset_variance_normalized = numpy.where(
197
+ array_mask, dummy, dataset_variance_normalized
198
+ )
199
+ if dataset_sigma_normalized is not None:
200
+ dataset_sigma_normalized = numpy.where(
201
+ array_mask, dummy, dataset_sigma_normalized
202
+ )
203
+
204
+ return (
205
+ dataset_signal_normalized,
206
+ dataset_variance_normalized,
207
+ dataset_sigma_normalized,
208
+ )
209
+
210
+
211
+ def _normalize_dataset_opencl(
212
+ detector_distortion,
213
+ dataset_signal: numpy.ndarray,
214
+ dataset_variance: Optional[numpy.ndarray] = None,
215
+ monitor_values: numpy.ndarray = None,
216
+ array_mask: numpy.ndarray = None,
217
+ array_flat: numpy.ndarray = None,
218
+ array_dark: numpy.ndarray = None,
219
+ array_polarization: numpy.ndarray = None,
220
+ array_solidangle: numpy.ndarray = None,
221
+ normalization_factor: float = 1.0,
222
+ Dummy: float = -10,
223
+ DDummy: float = 0.01,
224
+ datatype: str = "float32",
225
+ **kwargs,
226
+ ):
227
+ from pyFAI.distortion import Distortion
228
+
229
+ dataset_normalized_signal = numpy.zeros_like(dataset_signal, dtype=datatype)
230
+ dataset_normalized_variance = numpy.zeros_like(dataset_signal, dtype=datatype)
231
+ dataset_normalized_sigma = numpy.zeros_like(dataset_signal, dtype=datatype)
232
+
233
+ nb_frames = len(dataset_signal)
234
+ if monitor_values is None:
235
+ monitor_values = numpy.ones(nb_frames, dtype=datatype)
236
+ elif isinstance(monitor_values, (int, float)):
237
+ monitor_values = numpy.full(nb_frames, monitor_values, dtype=datatype)
238
+
239
+ distortion = Distortion(
240
+ detector_distortion, method="csr", device="gpu", mask=array_mask, empty=Dummy
241
+ )
242
+ distortion.reset(prepare=True) # enforce initialization
243
+
244
+ if distortion.integrator is None:
245
+ distortion.calc_init()
246
+
247
+ for index_frame, data_signal, monitor_value in zip(
248
+ range(nb_frames), dataset_signal, monitor_values
249
+ ):
250
+ variance = None
251
+ if dataset_variance is not None:
252
+ variance = dataset_variance[index_frame]
253
+ result = distortion.integrator.integrate_ng(
254
+ data=data_signal,
255
+ variance=variance,
256
+ flat=array_flat,
257
+ dark=array_dark,
258
+ solidangle=array_solidangle,
259
+ polarization=array_polarization,
260
+ dummy=Dummy,
261
+ delta_dummy=DDummy,
262
+ normalization_factor=monitor_value / normalization_factor,
263
+ out_merged=False,
264
+ )
265
+ dataset_normalized_signal[index_frame] = result.intensity.reshape(
266
+ data_signal.shape
267
+ )
268
+ if result.variance is not None:
269
+ dataset_normalized_variance[index_frame] = result.variance
270
+ if result.sigma is not None:
271
+ dataset_normalized_sigma[index_frame] = result.sigma.reshape(
272
+ data_signal.shape
273
+ )
274
+
275
+ return (
276
+ dataset_normalized_signal,
277
+ dataset_normalized_variance,
278
+ dataset_normalized_sigma,
279
+ )
280
+
281
+
282
+ def _normalize_dataset_cython(
283
+ dataset_signal: numpy.ndarray,
284
+ dataset_variance: Optional[numpy.ndarray] = None,
285
+ monitor_values: numpy.ndarray = None,
286
+ array_mask: numpy.ndarray = None,
287
+ array_flat: numpy.ndarray = None,
288
+ array_dark: numpy.ndarray = None,
289
+ array_polarization: numpy.ndarray = None,
290
+ array_solidangle: numpy.ndarray = None,
291
+ normalization_factor: float = 1.0,
292
+ Dummy: float = -10,
293
+ DDummy: float = 0.01,
294
+ datatype: str = "float32",
295
+ **kwargs,
296
+ ):
297
+ from pyFAI.ext.preproc import preproc as preproc_cy
298
+
299
+ dataset_normalized_signal = numpy.zeros_like(dataset_signal, dtype=datatype)
300
+ dataset_normalized_variance = numpy.zeros_like(dataset_signal, dtype=datatype)
301
+ dataset_normalized_sigma = numpy.zeros_like(dataset_signal, dtype=datatype)
302
+
303
+ preproc_params = {
304
+ "dark": array_dark,
305
+ "flat": array_flat,
306
+ "mask": array_mask,
307
+ "solidangle": array_solidangle,
308
+ "polarization": array_polarization,
309
+ "dummy": Dummy,
310
+ "delta_dummy": DDummy,
311
+ "empty": None,
312
+ }
313
+
314
+ nb_frames = len(dataset_signal)
315
+
316
+ if monitor_values is None:
317
+ monitor_values = numpy.ones(nb_frames, dtype=datatype)
318
+ elif isinstance(monitor_values, (int, float)):
319
+ monitor_values = numpy.full(nb_frames, monitor_values, dtype=datatype)
320
+
321
+ for index_frame, data_signal, monitor_value in zip(
322
+ range(nb_frames), dataset_signal, monitor_values
323
+ ):
324
+ if dataset_variance is None:
325
+ dataset_normalized_signal[index_frame] = preproc_cy(
326
+ raw=data_signal,
327
+ variance=None,
328
+ normalization_factor=monitor_value / normalization_factor,
329
+ **preproc_params,
330
+ )
331
+ else:
332
+ proc_data = preproc_cy(
333
+ raw=data_signal,
334
+ variance=dataset_variance[index_frame],
335
+ normalization_factor=monitor_value / normalization_factor,
336
+ **preproc_params,
337
+ )
338
+ pp_signal = proc_data[:, :, 0] # noqa
339
+ pp_variance = proc_data[:, :, 1] # noqa
340
+ pp_normalisation = proc_data[:, :, 2] # noqa
341
+
342
+ dataset_normalized_signal[index_frame] = numexpr.evaluate(
343
+ f"where(pp_normalisation == 0.0, {Dummy}, pp_signal / pp_normalisation)"
344
+ )
345
+ dataset_normalized_variance[index_frame] = numexpr.evaluate(
346
+ f"where(pp_normalisation == 0.0, {Dummy}, pp_variance / (pp_normalisation * pp_normalisation))"
347
+ )
348
+ dataset_normalized_sigma[index_frame] = numexpr.evaluate(
349
+ f"where(pp_normalisation == 0.0, {Dummy}, sqrt(pp_variance) / abs(pp_normalisation))"
350
+ )
351
+
352
+ return (
353
+ dataset_normalized_signal,
354
+ dataset_normalized_variance,
355
+ dataset_normalized_sigma,
356
+ )
357
+
358
+
359
+ ALGORITHMS_NORMALIZATION = {
360
+ "cython": {
361
+ "method": _normalize_dataset_cython,
362
+ "use_cupy": False,
363
+ "name": "cython",
364
+ },
365
+ "opencl": {
366
+ "method": _normalize_dataset_opencl,
367
+ "use_cupy": False,
368
+ "name": "opencl",
369
+ },
370
+ "numpy": {"method": _normalize_dataset_numpy, "use_cupy": False, "name": "numpy"},
371
+ "cupy": {"method": _normalize_dataset_cupy, "use_cupy": True, "name": "cupy"},
372
+ }
373
+ DEFAULT_ALGORITHM_NORMALIZATION = "numpy"
374
+
375
+
376
+ def _parse_norm_algorithm(algorithm: str) -> dict:
377
+ if algorithm not in ALGORITHMS_NORMALIZATION:
378
+ logger.warning(
379
+ f"Algorithm '{algorithm}' is not available. Using '{DEFAULT_ALGORITHM_NORMALIZATION}' instead."
380
+ )
381
+ algorithm = DEFAULT_ALGORITHM_NORMALIZATION
382
+ elif algorithm == "cupy" and not CUPY_AVAILABLE:
383
+ logger.warning(
384
+ f"CuPy is not available. Using {DEFAULT_ALGORITHM_NORMALIZATION} instead."
385
+ )
386
+ algorithm = DEFAULT_ALGORITHM_NORMALIZATION
387
+ logger.debug(f"Performing normalization with algorithm: {algorithm}")
388
+ return ALGORITHMS_NORMALIZATION[algorithm]
389
+
390
+
391
+ def _parse_datatype(datatype):
392
+ if datatype in ("float32", numpy.float32):
393
+ datatype = numpy.dtype("float32")
394
+ elif datatype in ("float64", numpy.float64):
395
+ datatype = numpy.dtype("float64")
396
+ return datatype
397
+
398
+
399
+ def get_array_polarization(
400
+ azimuthal_integrator: AzimuthalIntegrator,
401
+ polarization_factor: float = None,
402
+ polarization_axis: int = 0,
403
+ use_cupy: bool = False,
404
+ ):
405
+ array_polarization = azimuthal_integrator.polarization(
406
+ factor=polarization_factor,
407
+ axis_offset=polarization_axis,
408
+ )
409
+ if use_cupy and CUPY_AVAILABLE:
410
+ return cupy.asarray(array_polarization)
411
+ return array_polarization
412
+
413
+
414
+ @lru_cache(maxsize=5)
415
+ def _get_persistent_array_polarization(
416
+ data_signal_shape: tuple,
417
+ dist: float,
418
+ wavelength: float,
419
+ psize_1: float,
420
+ psize_2: float,
421
+ center_1: float = None,
422
+ center_2: float = None,
423
+ poni1: float = None,
424
+ poni2: float = None,
425
+ rot1: float = 0.0,
426
+ rot2: float = 0.0,
427
+ rot3: float = 0.0,
428
+ polarization_factor: float = None,
429
+ polarization_axis: int = 0,
430
+ use_cupy: bool = False,
431
+ ):
432
+ azimuthal_integrator = _get_persistent_azimuthal_integrator(
433
+ data_signal_shape=data_signal_shape,
434
+ dist=dist,
435
+ center_1=center_1,
436
+ center_2=center_2,
437
+ poni1=poni1,
438
+ poni2=poni2,
439
+ psize_1=psize_1,
440
+ psize_2=psize_2,
441
+ rot1=rot1,
442
+ rot2=rot2,
443
+ rot3=rot3,
444
+ wavelength=wavelength,
445
+ )
446
+ return get_array_polarization(
447
+ azimuthal_integrator=azimuthal_integrator,
448
+ polarization_factor=polarization_factor,
449
+ polarization_axis=polarization_axis,
450
+ use_cupy=use_cupy,
451
+ )
452
+
453
+
454
+ def get_array_solidangle(
455
+ azimuthal_integrator: AzimuthalIntegrator,
456
+ absolute: bool = True,
457
+ use_cupy: bool = False,
458
+ ):
459
+ array_solidangle = azimuthal_integrator.solidAngleArray(absolute=absolute)
460
+ if use_cupy and CUPY_AVAILABLE:
461
+ return cupy.asarray(array_solidangle)
462
+ return array_solidangle
463
+
464
+
465
+ @lru_cache(maxsize=5)
466
+ def _get_persistent_array_solidangle(
467
+ data_signal_shape: tuple,
468
+ dist: float,
469
+ wavelength: float,
470
+ psize_1: float,
471
+ psize_2: float,
472
+ center_1: float = None,
473
+ center_2: float = None,
474
+ poni1: float = None,
475
+ poni2: float = None,
476
+ rot1: float = 0.0,
477
+ rot2: float = 0.0,
478
+ rot3: float = 0.0,
479
+ absolute: bool = True,
480
+ use_cupy: bool = False,
481
+ ):
482
+ azimuthal_integrator = _get_persistent_azimuthal_integrator(
483
+ data_signal_shape=data_signal_shape,
484
+ dist=dist,
485
+ center_1=center_1,
486
+ center_2=center_2,
487
+ psize_1=psize_1,
488
+ psize_2=psize_2,
489
+ poni1=poni1,
490
+ poni2=poni2,
491
+ rot1=rot1,
492
+ rot2=rot2,
493
+ rot3=rot3,
494
+ wavelength=wavelength,
495
+ )
496
+ return get_array_solidangle(
497
+ azimuthal_integrator=azimuthal_integrator,
498
+ absolute=absolute,
499
+ use_cupy=use_cupy,
500
+ )
501
+
502
+
503
+ @lru_cache(maxsize=5)
504
+ def _get_persistent_array_normalization(
505
+ data_signal_shape: tuple,
506
+ dist: float,
507
+ wavelength: float,
508
+ psize_1: float,
509
+ psize_2: float,
510
+ center_1: float = None,
511
+ center_2: float = None,
512
+ poni1: float = None,
513
+ poni2: float = None,
514
+ rot1: float = 0.0,
515
+ rot2: float = 0.0,
516
+ rot3: float = 0.0,
517
+ normalization_factor: float = None,
518
+ polarization_factor: float = None,
519
+ polarization_axis: int = 0,
520
+ filename_flat: str = None,
521
+ filename_mask: str = None,
522
+ absolute_solidangle: bool = True,
523
+ dummy: int = None,
524
+ delta_dummy: float = None,
525
+ binning: tuple = (1, 1),
526
+ use_cupy: bool = False,
527
+ datatype: str = "float32",
528
+ ):
529
+ if use_cupy and CUPY_AVAILABLE:
530
+ normalization_array = cupy.ones(
531
+ data_signal_shape,
532
+ dtype=datatype,
533
+ )
534
+ else:
535
+ normalization_array = numpy.ones(
536
+ data_signal_shape,
537
+ dtype=datatype,
538
+ )
539
+
540
+ array_polarization = _get_persistent_array_polarization(
541
+ data_signal_shape=data_signal_shape,
542
+ dist=dist,
543
+ wavelength=wavelength,
544
+ psize_1=psize_1,
545
+ psize_2=psize_2,
546
+ center_1=center_1,
547
+ center_2=center_2,
548
+ poni1=poni1,
549
+ poni2=poni2,
550
+ rot1=rot1,
551
+ rot2=rot2,
552
+ rot3=rot3,
553
+ polarization_factor=polarization_factor,
554
+ polarization_axis=polarization_axis,
555
+ use_cupy=use_cupy,
556
+ )
557
+
558
+ if array_polarization is not None:
559
+ normalization_array *= array_polarization
560
+
561
+ array_flat_field = get_persistent_array_flat(
562
+ filename_flat=filename_flat,
563
+ data_signal_shape=data_signal_shape,
564
+ datatype=datatype,
565
+ binning=binning,
566
+ dummy=dummy,
567
+ delta_dummy=delta_dummy,
568
+ filename_mask=filename_mask,
569
+ use_cupy=use_cupy,
570
+ )
571
+ if array_flat_field is not None:
572
+ normalization_array *= array_flat_field
573
+
574
+ array_solidangle = _get_persistent_array_solidangle(
575
+ data_signal_shape=data_signal_shape,
576
+ dist=dist,
577
+ wavelength=wavelength,
578
+ psize_1=psize_1,
579
+ psize_2=psize_2,
580
+ center_1=center_1,
581
+ center_2=center_2,
582
+ poni1=poni1,
583
+ poni2=poni2,
584
+ rot1=rot1,
585
+ rot2=rot2,
586
+ rot3=rot3,
587
+ absolute=absolute_solidangle,
588
+ use_cupy=use_cupy,
589
+ )
590
+ if array_solidangle is not None:
591
+ normalization_array *= array_solidangle
592
+ if normalization_factor is not None:
593
+ normalization_array /= normalization_factor
594
+
595
+ array_mask = get_persistent_array_mask(
596
+ filename_mask=filename_mask,
597
+ data_signal_shape=data_signal_shape,
598
+ binning=binning,
599
+ use_cupy=use_cupy,
600
+ )
601
+ if array_mask is not None and dummy:
602
+ if use_cupy and CUPY_AVAILABLE:
603
+ normalization_array = cupy.where(array_mask, dummy, normalization_array)
604
+ else:
605
+ normalization_array = numpy.where(array_mask, dummy, normalization_array)
606
+ return normalization_array
607
+
608
+
609
+ def calculate_dataset_variance(
610
+ dataset_signal: numpy.ndarray,
611
+ variance_formula: str = None,
612
+ dark: numpy.ndarray = None,
613
+ ):
614
+ if not variance_formula:
615
+ return
616
+
617
+ variance_function = numexpr.NumExpr(
618
+ variance_formula, [("data", numpy.float64), ("dark", numpy.float64)]
619
+ )
620
+ dataset_variance = variance_function(dataset_signal, 0 if dark is None else dark)
621
+ return dataset_variance
622
+
623
+
624
+ def normalize_dataset(
625
+ azimuthal_integrator: AzimuthalIntegrator,
626
+ dataset_signal: numpy.ndarray,
627
+ monitor_values: numpy.ndarray = None,
628
+ NormalizationFactor: float = 1.0,
629
+ filename_mask: str = None,
630
+ filename_dark: str = None,
631
+ filename_flat: str = None,
632
+ absolute_solidangle: bool = True,
633
+ binning: tuple = (1, 1),
634
+ Dummy=None,
635
+ DDummy=None,
636
+ variance_formula=None,
637
+ polarization_factor=None,
638
+ polarization_axis_offset=0,
639
+ datatype="float32",
640
+ algorithm: str = "cython",
641
+ dark_filter: str = "quantil",
642
+ dark_filter_quantil_lower: float = 0.1,
643
+ dark_filter_quantil_upper: float = 0.9,
644
+ **kwargs,
645
+ ):
646
+ t0 = time.perf_counter()
647
+ datatype = _parse_datatype(datatype=datatype)
648
+ data_signal_shape = azimuthal_integrator.detector.shape
649
+ algorithm = _parse_norm_algorithm(algorithm=algorithm)
650
+ use_cupy = algorithm["use_cupy"]
651
+
652
+ common_params = {
653
+ "use_cupy": use_cupy,
654
+ "data_signal_shape": data_signal_shape,
655
+ "binning": binning,
656
+ "dummy": DDummy,
657
+ "delta_dummy": DDummy,
658
+ }
659
+
660
+ array_mask = get_persistent_array_mask(
661
+ filename_mask=filename_mask,
662
+ **common_params,
663
+ )
664
+
665
+ array_flat = get_persistent_array_flat(
666
+ filename_flat=filename_flat,
667
+ data_signal_shape=data_signal_shape,
668
+ datatype=datatype,
669
+ binning=binning,
670
+ dummy=Dummy,
671
+ delta_dummy=DDummy,
672
+ filename_mask=filename_mask,
673
+ use_cupy=use_cupy,
674
+ )
675
+
676
+ array_dark = get_persistent_array_dark(
677
+ filename_dark=filename_dark,
678
+ filename_mask=filename_mask,
679
+ dark_filter=dark_filter,
680
+ dark_filter_quantil_lower=dark_filter_quantil_lower,
681
+ dark_filter_quantil_upper=dark_filter_quantil_upper,
682
+ **common_params,
683
+ )
684
+
685
+ ai_params = {
686
+ "data_signal_shape": azimuthal_integrator.detector.shape,
687
+ "dist": azimuthal_integrator.dist,
688
+ "wavelength": azimuthal_integrator.wavelength,
689
+ "psize_1": azimuthal_integrator.detector.pixel1,
690
+ "psize_2": azimuthal_integrator.detector.pixel2,
691
+ "poni1": azimuthal_integrator.poni1,
692
+ "poni2": azimuthal_integrator.poni2,
693
+ "rot1": azimuthal_integrator.rot1,
694
+ "rot2": azimuthal_integrator.rot2,
695
+ "rot3": azimuthal_integrator.rot3,
696
+ }
697
+
698
+ array_polarization = _get_persistent_array_polarization(
699
+ polarization_factor=polarization_factor,
700
+ polarization_axis=polarization_axis_offset,
701
+ use_cupy=use_cupy,
702
+ **ai_params,
703
+ )
704
+
705
+ array_solidangle = _get_persistent_array_solidangle(
706
+ absolute=True,
707
+ use_cupy=use_cupy,
708
+ **ai_params,
709
+ )
710
+
711
+ # Patch due to inconsistent chiArray and position_array methods in pyFAI
712
+ if "chi_center" in azimuthal_integrator._cached_array:
713
+ azimuthal_integrator._cached_array.pop("chi_center")
714
+
715
+ array_normalization = _get_persistent_array_normalization(
716
+ normalization_factor=NormalizationFactor,
717
+ polarization_factor=polarization_factor,
718
+ polarization_axis=polarization_axis_offset,
719
+ filename_flat=filename_flat,
720
+ filename_mask=filename_mask,
721
+ absolute_solidangle=absolute_solidangle,
722
+ datatype=datatype,
723
+ use_cupy=use_cupy,
724
+ binning=binning,
725
+ dummy=Dummy,
726
+ delta_dummy=DDummy,
727
+ **ai_params,
728
+ )
729
+
730
+ if variance_formula and array_dark is not None:
731
+ if use_cupy and CUPY_AVAILABLE:
732
+ dark = array_dark.get()
733
+ else:
734
+ dark = array_dark
735
+ else:
736
+ dark = None
737
+
738
+ dataset_variance = calculate_dataset_variance(
739
+ dataset_signal=dataset_signal,
740
+ variance_formula=variance_formula,
741
+ dark=dark,
742
+ )
743
+
744
+ params_normalization = {
745
+ "dataset_signal": dataset_signal,
746
+ "dataset_variance": dataset_variance,
747
+ "monitor_values": monitor_values,
748
+ "array_flat": array_flat,
749
+ "array_dark": array_dark,
750
+ "array_mask": array_mask,
751
+ "array_solidangle": array_solidangle,
752
+ "array_polarization": array_polarization,
753
+ "array_normalization": array_normalization,
754
+ "dummy": Dummy,
755
+ "delta_dummy": DDummy,
756
+ "datatype": datatype,
757
+ "normalization_factor": NormalizationFactor,
758
+ "detector_distortion": azimuthal_integrator.detector,
759
+ }
760
+
761
+ (
762
+ dataset_normalized_signal,
763
+ dataset_normalized_variance,
764
+ dataset_normalized_sigma,
765
+ ) = algorithm["method"](
766
+ **params_normalization,
767
+ )
768
+ t1 = time.perf_counter()
769
+ logger.debug(
770
+ f"Normalization completed in {t1 - t0:.2f} seconds using {algorithm['name']} algorithm."
771
+ )
772
+
773
+ if dataset_normalized_variance is None:
774
+ dataset_normalized_variance = numpy.zeros_like(dataset_signal, dtype=datatype)
775
+ if dataset_normalized_sigma is None:
776
+ dataset_normalized_sigma = numpy.zeros_like(dataset_signal, dtype=datatype)
777
+ return (
778
+ dataset_normalized_signal,
779
+ dataset_normalized_variance,
780
+ dataset_normalized_sigma,
781
+ )
782
+
783
+
784
+ def calculate_normalization_values(
785
+ monitor_values,
786
+ azimuthal_integrator=None,
787
+ psize_1=None,
788
+ psize_2=None,
789
+ dist=None,
790
+ normalization_factor=None,
791
+ ):
792
+ if normalization_factor is None:
793
+ normalization_factor = 1.0
794
+
795
+ if azimuthal_integrator is not None:
796
+ psize_1 = psize_1 or azimuthal_integrator.detector.pixel1
797
+ psize_2 = psize_2 or azimuthal_integrator.detector.pixel2
798
+ dist = dist or azimuthal_integrator.dist
799
+
800
+ if monitor_values is None or psize_1 is None or psize_2 is None or dist is None:
801
+ return normalization_factor
802
+
803
+ solid_angle = psize_1 * psize_2 / dist**2
804
+ return monitor_values * solid_angle / normalization_factor