modacor 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. modacor/__init__.py +30 -0
  2. modacor/dataclasses/__init__.py +0 -0
  3. modacor/dataclasses/basedata.py +973 -0
  4. modacor/dataclasses/databundle.py +23 -0
  5. modacor/dataclasses/helpers.py +45 -0
  6. modacor/dataclasses/messagehandler.py +75 -0
  7. modacor/dataclasses/process_step.py +233 -0
  8. modacor/dataclasses/process_step_describer.py +146 -0
  9. modacor/dataclasses/processing_data.py +59 -0
  10. modacor/dataclasses/trace_event.py +118 -0
  11. modacor/dataclasses/uncertainty_tools.py +132 -0
  12. modacor/dataclasses/validators.py +84 -0
  13. modacor/debug/pipeline_tracer.py +548 -0
  14. modacor/io/__init__.py +33 -0
  15. modacor/io/csv/__init__.py +0 -0
  16. modacor/io/csv/csv_sink.py +114 -0
  17. modacor/io/csv/csv_source.py +210 -0
  18. modacor/io/hdf/__init__.py +27 -0
  19. modacor/io/hdf/hdf_source.py +120 -0
  20. modacor/io/io_sink.py +41 -0
  21. modacor/io/io_sinks.py +61 -0
  22. modacor/io/io_source.py +164 -0
  23. modacor/io/io_sources.py +208 -0
  24. modacor/io/processing_path.py +113 -0
  25. modacor/io/tiled/__init__.py +16 -0
  26. modacor/io/tiled/tiled_source.py +403 -0
  27. modacor/io/yaml/__init__.py +27 -0
  28. modacor/io/yaml/yaml_source.py +116 -0
  29. modacor/modules/__init__.py +53 -0
  30. modacor/modules/base_modules/__init__.py +0 -0
  31. modacor/modules/base_modules/append_processing_data.py +329 -0
  32. modacor/modules/base_modules/append_sink.py +141 -0
  33. modacor/modules/base_modules/append_source.py +181 -0
  34. modacor/modules/base_modules/bitwise_or_masks.py +113 -0
  35. modacor/modules/base_modules/combine_uncertainties.py +120 -0
  36. modacor/modules/base_modules/combine_uncertainties_max.py +105 -0
  37. modacor/modules/base_modules/divide.py +82 -0
  38. modacor/modules/base_modules/find_scale_factor1d.py +373 -0
  39. modacor/modules/base_modules/multiply.py +77 -0
  40. modacor/modules/base_modules/multiply_databundles.py +73 -0
  41. modacor/modules/base_modules/poisson_uncertainties.py +69 -0
  42. modacor/modules/base_modules/reduce_dimensionality.py +252 -0
  43. modacor/modules/base_modules/sink_processing_data.py +80 -0
  44. modacor/modules/base_modules/subtract.py +80 -0
  45. modacor/modules/base_modules/subtract_databundles.py +67 -0
  46. modacor/modules/base_modules/units_label_update.py +66 -0
  47. modacor/modules/instrument_modules/__init__.py +0 -0
  48. modacor/modules/instrument_modules/readme.md +9 -0
  49. modacor/modules/technique_modules/__init__.py +0 -0
  50. modacor/modules/technique_modules/scattering/__init__.py +0 -0
  51. modacor/modules/technique_modules/scattering/geometry_helpers.py +114 -0
  52. modacor/modules/technique_modules/scattering/index_pixels.py +492 -0
  53. modacor/modules/technique_modules/scattering/indexed_averager.py +628 -0
  54. modacor/modules/technique_modules/scattering/pixel_coordinates_3d.py +417 -0
  55. modacor/modules/technique_modules/scattering/solid_angle_correction.py +63 -0
  56. modacor/modules/technique_modules/scattering/xs_geometry.py +571 -0
  57. modacor/modules/technique_modules/scattering/xs_geometry_from_pixel_coordinates.py +293 -0
  58. modacor/runner/__init__.py +0 -0
  59. modacor/runner/pipeline.py +749 -0
  60. modacor/runner/process_step_registry.py +224 -0
  61. modacor/tests/__init__.py +27 -0
  62. modacor/tests/dataclasses/test_basedata.py +519 -0
  63. modacor/tests/dataclasses/test_basedata_operations.py +439 -0
  64. modacor/tests/dataclasses/test_basedata_to_base_units.py +57 -0
  65. modacor/tests/dataclasses/test_process_step_describer.py +73 -0
  66. modacor/tests/dataclasses/test_processstep.py +282 -0
  67. modacor/tests/debug/test_tracing_integration.py +188 -0
  68. modacor/tests/integration/__init__.py +0 -0
  69. modacor/tests/integration/test_pipeline_run.py +238 -0
  70. modacor/tests/io/__init__.py +27 -0
  71. modacor/tests/io/csv/__init__.py +0 -0
  72. modacor/tests/io/csv/test_csv_source.py +156 -0
  73. modacor/tests/io/hdf/__init__.py +27 -0
  74. modacor/tests/io/hdf/test_hdf_source.py +92 -0
  75. modacor/tests/io/test_io_sources.py +119 -0
  76. modacor/tests/io/tiled/__init__.py +12 -0
  77. modacor/tests/io/tiled/test_tiled_source.py +120 -0
  78. modacor/tests/io/yaml/__init__.py +27 -0
  79. modacor/tests/io/yaml/static_data_example.yaml +26 -0
  80. modacor/tests/io/yaml/test_yaml_source.py +47 -0
  81. modacor/tests/modules/__init__.py +27 -0
  82. modacor/tests/modules/base_modules/__init__.py +27 -0
  83. modacor/tests/modules/base_modules/test_append_processing_data.py +219 -0
  84. modacor/tests/modules/base_modules/test_append_sink.py +76 -0
  85. modacor/tests/modules/base_modules/test_append_source.py +180 -0
  86. modacor/tests/modules/base_modules/test_bitwise_or_masks.py +264 -0
  87. modacor/tests/modules/base_modules/test_combine_uncertainties.py +105 -0
  88. modacor/tests/modules/base_modules/test_combine_uncertainties_max.py +109 -0
  89. modacor/tests/modules/base_modules/test_divide.py +140 -0
  90. modacor/tests/modules/base_modules/test_find_scale_factor1d.py +220 -0
  91. modacor/tests/modules/base_modules/test_multiply.py +113 -0
  92. modacor/tests/modules/base_modules/test_multiply_databundles.py +136 -0
  93. modacor/tests/modules/base_modules/test_poisson_uncertainties.py +61 -0
  94. modacor/tests/modules/base_modules/test_reduce_dimensionality.py +358 -0
  95. modacor/tests/modules/base_modules/test_sink_processing_data.py +119 -0
  96. modacor/tests/modules/base_modules/test_subtract.py +111 -0
  97. modacor/tests/modules/base_modules/test_subtract_databundles.py +136 -0
  98. modacor/tests/modules/base_modules/test_units_label_update.py +91 -0
  99. modacor/tests/modules/technique_modules/__init__.py +0 -0
  100. modacor/tests/modules/technique_modules/scattering/__init__.py +0 -0
  101. modacor/tests/modules/technique_modules/scattering/test_geometry_helpers.py +198 -0
  102. modacor/tests/modules/technique_modules/scattering/test_index_pixels.py +426 -0
  103. modacor/tests/modules/technique_modules/scattering/test_indexed_averaging.py +559 -0
  104. modacor/tests/modules/technique_modules/scattering/test_pixel_coordinates_3d.py +282 -0
  105. modacor/tests/modules/technique_modules/scattering/test_xs_geometry_from_pixel_coordinates.py +224 -0
  106. modacor/tests/modules/technique_modules/scattering/test_xsgeometry.py +635 -0
  107. modacor/tests/requirements.txt +12 -0
  108. modacor/tests/runner/test_pipeline.py +438 -0
  109. modacor/tests/runner/test_process_step_registry.py +65 -0
  110. modacor/tests/test_import.py +43 -0
  111. modacor/tests/test_modacor.py +17 -0
  112. modacor/tests/test_units.py +79 -0
  113. modacor/units.py +97 -0
  114. modacor-1.0.0.dist-info/METADATA +482 -0
  115. modacor-1.0.0.dist-info/RECORD +120 -0
  116. modacor-1.0.0.dist-info/WHEEL +5 -0
  117. modacor-1.0.0.dist-info/licenses/AUTHORS.md +11 -0
  118. modacor-1.0.0.dist-info/licenses/LICENSE +11 -0
  119. modacor-1.0.0.dist-info/licenses/LICENSE.txt +11 -0
  120. modacor-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,373 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # /usr/bin/env python3
3
+ # -*- coding: utf-8 -*-
4
+
5
+ from __future__ import annotations
6
+
7
+ __coding__ = "utf-8"
8
+ __authors__ = ["Brian R. Pauw"]
9
+ __copyright__ = "Copyright 2025, The MoDaCor team"
10
+ __date__ = "12/12/2025"
11
+ __status__ = "Development"
12
+
13
+ __all__ = ["FindScaleFactor1D"]
14
+ __version__ = "20251212.2"
15
+
16
+ from pathlib import Path
17
+ from typing import Dict
18
+
19
+ import numpy as np
20
+ from attrs import define
21
+ from scipy.interpolate import interp1d
22
+ from scipy.optimize import least_squares
23
+
24
+ from modacor import ureg
25
+ from modacor.dataclasses.basedata import BaseData
26
+ from modacor.dataclasses.databundle import DataBundle
27
+ from modacor.dataclasses.process_step import ProcessStep
28
+ from modacor.dataclasses.process_step_describer import ProcessStepDescriber
29
+
30
+ # -------------------------------------------------------------------------
31
+ # Small data containers (attrs, not namedtuple)
32
+ # -------------------------------------------------------------------------
33
+
34
+
35
+ @define(slots=True)
36
+ class DependentData1D:
37
+ y: np.ndarray
38
+ sigma: np.ndarray
39
+ weights: np.ndarray
40
+
41
+
42
+ @define(slots=True)
43
+ class FitData1D:
44
+ x: np.ndarray
45
+ y_ref: np.ndarray
46
+ y_work: np.ndarray
47
+ sigma_ref: np.ndarray
48
+ sigma_work: np.ndarray
49
+ weights: np.ndarray
50
+
51
+
52
+ # -------------------------------------------------------------------------
53
+ # Helpers
54
+ # -------------------------------------------------------------------------
55
+
56
+
57
+ def _combined_sigma(bd: BaseData) -> np.ndarray:
58
+ if not bd.uncertainties:
59
+ return np.asarray(1.0)
60
+
61
+ sig2 = None
62
+ for u in bd.uncertainties.values():
63
+ arr = np.asarray(u, dtype=float)
64
+ sig2 = arr * arr if sig2 is None else sig2 + arr * arr
65
+ return np.sqrt(sig2)
66
+
67
+
68
+ def _extract_dependent(bd: BaseData) -> DependentData1D:
69
+ if bd.rank_of_data != 1:
70
+ raise ValueError("Dependent BaseData must be rank-1.")
71
+
72
+ y = np.asarray(bd.signal, dtype=float).squeeze()
73
+ if y.ndim != 1:
74
+ raise ValueError("Dependent signal must be 1D.")
75
+
76
+ sigma = np.asarray(_combined_sigma(bd), dtype=float)
77
+ weights = np.asarray(bd.weights, dtype=float)
78
+
79
+ if sigma.size == 1:
80
+ sigma = np.full_like(y, float(sigma))
81
+ else:
82
+ sigma = sigma.squeeze()
83
+
84
+ if weights.size == 1:
85
+ weights = np.full_like(y, float(weights))
86
+ else:
87
+ weights = weights.squeeze()
88
+
89
+ if sigma.shape != y.shape or weights.shape != y.shape:
90
+ raise ValueError("Uncertainties and weights must match dependent signal shape.")
91
+
92
+ sigma = np.where(sigma <= 0.0, np.nan, sigma)
93
+
94
+ return DependentData1D(y=y, sigma=sigma, weights=weights)
95
+
96
+
97
+ def _overlap_range(x1: np.ndarray, x2: np.ndarray) -> tuple[float, float]:
98
+ return float(max(np.nanmin(x1), np.nanmin(x2))), float(min(np.nanmax(x1), np.nanmax(x2)))
99
+
100
+
101
+ def _prepare_fit_data(
102
+ *,
103
+ x_work: np.ndarray,
104
+ dep_work: DependentData1D,
105
+ x_ref: np.ndarray,
106
+ dep_ref: DependentData1D,
107
+ require_overlap: bool,
108
+ interpolation_kind: str,
109
+ fit_min: float,
110
+ fit_max: float,
111
+ use_weights: bool,
112
+ ) -> FitData1D:
113
+ ov_min, ov_max = _overlap_range(x_ref, x_work)
114
+ if require_overlap and not (ov_min < ov_max):
115
+ raise ValueError("No overlap between working and reference x-axes.")
116
+
117
+ lo = max(fit_min, ov_min) if require_overlap else fit_min
118
+ hi = min(fit_max, ov_max) if require_overlap else fit_max
119
+ if not lo < hi:
120
+ raise ValueError("Empty fit range after overlap constraints.")
121
+
122
+ mask = (x_ref >= lo) & (x_ref <= hi)
123
+ if np.count_nonzero(mask) < 2:
124
+ raise ValueError("Not enough points in fit window.")
125
+
126
+ x_fit = x_ref[mask]
127
+ y_ref = dep_ref.y[mask]
128
+ sigma_ref = dep_ref.sigma[mask]
129
+ weights_ref = dep_ref.weights[mask]
130
+
131
+ # sort working data
132
+ order = np.argsort(x_work)
133
+ x_work = x_work[order]
134
+ y_work = dep_work.y[order]
135
+ sigma_work = dep_work.sigma[order]
136
+ weights_work = dep_work.weights[order]
137
+
138
+ bounds_error = require_overlap
139
+ fill_value = None if bounds_error else "extrapolate"
140
+
141
+ interp_y = interp1d(
142
+ x_work, y_work, kind=interpolation_kind, bounds_error=bounds_error, fill_value=fill_value, assume_sorted=True
143
+ )
144
+ interp_sigma = interp1d(
145
+ x_work, sigma_work, kind="linear", bounds_error=bounds_error, fill_value=fill_value, assume_sorted=True
146
+ )
147
+ interp_w = interp1d(
148
+ x_work, weights_work, kind="linear", bounds_error=bounds_error, fill_value=fill_value, assume_sorted=True
149
+ )
150
+
151
+ y_work_i = interp_y(x_fit)
152
+ sigma_work_i = interp_sigma(x_fit)
153
+ weights_work_i = interp_w(x_fit)
154
+
155
+ weights = (weights_ref * weights_work_i) if use_weights else np.ones_like(y_ref)
156
+
157
+ valid = (
158
+ np.isfinite(y_ref)
159
+ & np.isfinite(y_work_i)
160
+ & np.isfinite(sigma_ref)
161
+ & (sigma_ref > 0)
162
+ & np.isfinite(sigma_work_i)
163
+ & (sigma_work_i >= 0)
164
+ & np.isfinite(weights)
165
+ & (weights > 0)
166
+ )
167
+
168
+ if np.count_nonzero(valid) < 2:
169
+ raise ValueError("Not enough valid points after masking.")
170
+
171
+ return FitData1D(
172
+ x=x_fit[valid],
173
+ y_ref=y_ref[valid],
174
+ y_work=y_work_i[valid],
175
+ sigma_ref=sigma_ref[valid],
176
+ sigma_work=sigma_work_i[valid],
177
+ weights=weights[valid],
178
+ )
179
+
180
+
181
+ # -------------------------------------------------------------------------
182
+ # Main ProcessStep
183
+ # -------------------------------------------------------------------------
184
+
185
+
186
+ class FindScaleFactor1D(ProcessStep):
187
+ documentation = ProcessStepDescriber(
188
+ calling_name="Scale 1D curve to reference (compute-only)",
189
+ calling_id="FindScaleFactor1D",
190
+ calling_module_path=Path(__file__),
191
+ calling_version=__version__,
192
+ required_data_keys=["signal"],
193
+ modifies={
194
+ "scale_factor": ["signal", "uncertainties", "units"],
195
+ "scale_background": ["signal", "uncertainties", "units"],
196
+ },
197
+ arguments={
198
+ "signal_key": {
199
+ "type": str,
200
+ "default": "signal",
201
+ "doc": "BaseData key for the dependent variable signal.",
202
+ },
203
+ "independent_axis_key": {
204
+ "type": str,
205
+ "default": "Q",
206
+ "doc": "BaseData key for the independent axis.",
207
+ },
208
+ "scale_output_key": {
209
+ "type": str,
210
+ "default": "scale_factor",
211
+ "doc": "BaseData key to store the scale factor output.",
212
+ },
213
+ "background_output_key": {
214
+ "type": str,
215
+ "default": "scale_background",
216
+ "doc": "BaseData key to store the fitted background output.",
217
+ },
218
+ "fit_background": {
219
+ "type": bool,
220
+ "default": False,
221
+ "doc": "Whether to fit a constant background offset.",
222
+ },
223
+ "fit_min_val": {
224
+ "type": (float, int, type(None)),
225
+ "default": None,
226
+ "doc": "Minimum x-value for the fit (in fit_val_units).",
227
+ },
228
+ "fit_max_val": {
229
+ "type": (float, int, type(None)),
230
+ "default": None,
231
+ "doc": "Maximum x-value for the fit (in fit_val_units).",
232
+ },
233
+ "fit_val_units": {
234
+ "type": (str, type(None)),
235
+ "default": None,
236
+ "doc": "Units for fit_min_val/fit_max_val if provided.",
237
+ },
238
+ "require_overlap": {
239
+ "type": bool,
240
+ "default": True,
241
+ "doc": "Require overlapping x-range between reference and work data.",
242
+ },
243
+ "interpolation_kind": {
244
+ "type": str,
245
+ "default": "linear",
246
+ "doc": "Interpolation kind passed to scipy/numpy interpolation.",
247
+ },
248
+ "robust_loss": {
249
+ "type": str,
250
+ "default": "huber",
251
+ "doc": "Robust loss function name for the fit.",
252
+ },
253
+ "robust_fscale": {
254
+ "type": (float, int),
255
+ "default": 1.0,
256
+ "doc": "Robust loss scale parameter.",
257
+ },
258
+ "use_basedata_weights": {
259
+ "type": bool,
260
+ "default": True,
261
+ "doc": "Use BaseData weights when fitting.",
262
+ },
263
+ },
264
+ step_keywords=["scale", "calibration", "1D"],
265
+ step_doc="Compute scale factor between two 1D curves using robust least squares.",
266
+ )
267
+
268
+ def calculate(self) -> Dict[str, DataBundle]:
269
+ cfg = self.configuration
270
+ keys = self._normalised_processing_keys()
271
+ if len(keys) != 2:
272
+ raise ValueError("FindScaleFactor1D requires exactly two processing keys in 'with_processing_keys'.")
273
+ work_key, ref_key = keys
274
+
275
+ sig_key = cfg.get("signal_key", "signal")
276
+ axis_key = cfg.get("independent_axis_key", "Q")
277
+
278
+ work_db = self.processing_data[work_key]
279
+ ref_db = self.processing_data[ref_key]
280
+
281
+ y_work_bd = work_db[sig_key].copy(with_axes=True)
282
+ y_ref_bd = ref_db[sig_key].copy(with_axes=True)
283
+
284
+ x_work_bd = work_db[axis_key].copy(with_axes=False)
285
+ x_ref_bd = ref_db[axis_key].copy(with_axes=False)
286
+
287
+ if x_work_bd.units != x_ref_bd.units:
288
+ x_work_bd.to_units(x_ref_bd.units)
289
+
290
+ x_work = np.asarray(x_work_bd.signal, dtype=float).squeeze()
291
+ x_ref = np.asarray(x_ref_bd.signal, dtype=float).squeeze()
292
+
293
+ dep_work = _extract_dependent(y_work_bd)
294
+ dep_ref = _extract_dependent(y_ref_bd)
295
+
296
+ fit_min = cfg.get("fit_min_val")
297
+ fit_max = cfg.get("fit_max_val")
298
+
299
+ fit_units = cfg.get("fit_val_units") or x_ref_bd.units
300
+ if fit_min is not None:
301
+ fit_min = ureg.Quantity(fit_min, fit_units).to(x_ref_bd.units).magnitude
302
+ else:
303
+ fit_min = np.nanmin(x_ref)
304
+
305
+ if fit_max is not None:
306
+ fit_max = ureg.Quantity(fit_max, fit_units).to(x_ref_bd.units).magnitude
307
+ else:
308
+ fit_max = np.nanmax(x_ref)
309
+
310
+ fit_data = _prepare_fit_data(
311
+ x_work=x_work,
312
+ dep_work=dep_work,
313
+ x_ref=x_ref,
314
+ dep_ref=dep_ref,
315
+ require_overlap=cfg.get("require_overlap", True),
316
+ interpolation_kind=cfg.get("interpolation_kind", "linear"),
317
+ fit_min=float(fit_min),
318
+ fit_max=float(fit_max),
319
+ use_weights=cfg.get("use_basedata_weights", True),
320
+ )
321
+
322
+ fit_background = bool(cfg.get("fit_background", False))
323
+
324
+ def residuals(p: np.ndarray) -> np.ndarray:
325
+ scale = p[0]
326
+ background = p[1] if fit_background else 0.0
327
+ model = scale * fit_data.y_work + background
328
+ sigma = np.sqrt(fit_data.sigma_ref**2 + (scale * fit_data.sigma_work) ** 2)
329
+ r = (fit_data.y_ref - model) / sigma
330
+ return np.sqrt(fit_data.weights) * r
331
+
332
+ if fit_background:
333
+ X = np.column_stack([fit_data.y_work, np.ones_like(fit_data.y_work)])
334
+ x0, *_ = np.linalg.lstsq(X, fit_data.y_ref, rcond=None)
335
+ else:
336
+ denom = np.dot(fit_data.y_work, fit_data.y_work) or 1.0
337
+ x0 = np.array([np.dot(fit_data.y_ref, fit_data.y_work) / denom])
338
+
339
+ res = least_squares(
340
+ residuals,
341
+ x0=x0,
342
+ loss=cfg.get("robust_loss", "huber"),
343
+ f_scale=float(cfg.get("robust_fscale", 1.0)),
344
+ )
345
+
346
+ J = res.jac
347
+ dof = max(1, len(res.fun) - len(res.x))
348
+ s_sq = np.sum(res.fun**2) / dof
349
+
350
+ cov = s_sq * np.linalg.pinv(J.T @ J)
351
+ sig_params = np.sqrt(np.clip(np.diag(cov), 0.0, np.inf))
352
+
353
+ scale = float(res.x[0])
354
+ scale_sigma = float(sig_params[0])
355
+
356
+ out_key = cfg.get("scale_output_key", "scale_factor")
357
+ work_db[out_key] = BaseData(
358
+ signal=np.array([scale]),
359
+ units="dimensionless",
360
+ uncertainties={"propagate_to_all": np.array([scale_sigma])},
361
+ rank_of_data=0,
362
+ )
363
+
364
+ if fit_background:
365
+ bg_key = cfg.get("background_output_key", "scale_background")
366
+ work_db[bg_key] = BaseData(
367
+ signal=np.array([float(res.x[1])]),
368
+ units=y_ref_bd.units,
369
+ uncertainties={"propagate_to_all": np.array([sig_params[1]])},
370
+ rank_of_data=0,
371
+ )
372
+
373
+ return {work_key: work_db}
@@ -0,0 +1,77 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # /usr/bin/env python3
3
+ # -*- coding: utf-8 -*-
4
+
5
+ from __future__ import annotations
6
+
7
+ __coding__ = "utf-8"
8
+ __authors__ = ["Brian R. Pauw", "Armin Moser"] # add names to the list as appropriate
9
+ __copyright__ = "Copyright 2025, The MoDaCor team"
10
+ __date__ = "29/10/2025"
11
+ __status__ = "Development" # "Development", "Production"
12
+ # end of header and standard imports
13
+
14
+ __all__ = ["Multiply"]
15
+ __version__ = "20251029.1"
16
+
17
+ from pathlib import Path
18
+
19
+ from modacor.dataclasses.databundle import DataBundle
20
+ from modacor.dataclasses.helpers import basedata_from_sources
21
+ from modacor.dataclasses.process_step import ProcessStep
22
+ from modacor.dataclasses.process_step_describer import ProcessStepDescriber
23
+
24
+
25
+ class Multiply(ProcessStep):
26
+ """
27
+ Multiply a DataBundle by a BaseData from an IoSource
28
+ """
29
+
30
+ documentation = ProcessStepDescriber(
31
+ calling_name="Multiply by IoSource data",
32
+ calling_id="MultiplyBySourceData",
33
+ calling_module_path=Path(__file__),
34
+ calling_version=__version__,
35
+ required_data_keys=["signal"],
36
+ modifies={"signal": ["signal", "uncertainties", "units"]},
37
+ arguments={
38
+ "multiplier_source": {
39
+ "type": str,
40
+ "default": None,
41
+ "doc": "IoSources key for the multiplier signal.",
42
+ },
43
+ "multiplier_units_source": {
44
+ "type": str,
45
+ "default": None,
46
+ "doc": "IoSources key for multiplier units metadata.",
47
+ },
48
+ "multiplier_uncertainties_sources": {
49
+ "type": dict,
50
+ "default": {},
51
+ "doc": "Mapping of uncertainty name to IoSources key.",
52
+ },
53
+ },
54
+ step_keywords=["multiply", "scalar", "array"],
55
+ step_doc="Multiply a DataBundle element by a multiplier loaded from a data source",
56
+ step_reference="DOI 10.1088/0953-8984/25/38/383201",
57
+ step_note="""This loads a scalar (value, units and uncertainty)
58
+ from an IOSource and applies it to the data signal""",
59
+ )
60
+
61
+ def calculate(self) -> dict[str, DataBundle]:
62
+ # build up the multiplier BaseData object from the IoSources
63
+ multiplier = basedata_from_sources(
64
+ io_sources=self.io_sources,
65
+ signal_source=self.configuration.get("multiplier_source"),
66
+ units_source=self.configuration.get("multiplier_units_source", None),
67
+ uncertainty_sources=self.configuration.get("multiplier_uncertainties_sources", {}),
68
+ )
69
+
70
+ output: dict[str, DataBundle] = {}
71
+ # actual work happens here:
72
+ for key in self._normalised_processing_keys():
73
+ databundle = self.processing_data.get(key)
74
+ # multiply the data
75
+ databundle["signal"] *= multiplier
76
+ output[key] = databundle
77
+ return output
@@ -0,0 +1,73 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # /usr/bin/env python3
3
+ # -*- coding: utf-8 -*-
4
+
5
+ from __future__ import annotations
6
+
7
+ __coding__ = "utf-8"
8
+ __authors__ = ["Brian R. Pauw"] # add names to the list as appropriate
9
+ __copyright__ = "Copyright 2025, The MoDaCor team"
10
+ __date__ = "12/12/2025"
11
+ __status__ = "Development" # "Development", "Production"
12
+ # end of header and standard imports
13
+
14
+ __all__ = ["MultiplyDatabundles"]
15
+ __version__ = "20251212.1"
16
+
17
+ from pathlib import Path
18
+
19
+ from modacor.dataclasses.databundle import DataBundle
20
+ from modacor.dataclasses.process_step import ProcessStep
21
+ from modacor.dataclasses.process_step_describer import ProcessStepDescriber
22
+
23
+
24
+ class MultiplyDatabundles(ProcessStep):
25
+ """
26
+ Multiply a DataBundle with another DataBundle, useful for scaling or combining data
27
+ """
28
+
29
+ documentation = ProcessStepDescriber(
30
+ calling_name="Multiply another DataBundle",
31
+ calling_id="MultiplyDatabundles",
32
+ calling_module_path=Path(__file__),
33
+ calling_version=__version__,
34
+ required_data_keys=["signal"],
35
+ modifies={"signal": ["signal", "uncertainties", "units"]},
36
+ arguments={
37
+ "multiplicand_data_key": {
38
+ "type": str,
39
+ "default": "signal",
40
+ "doc": "BaseData key to modify in the multiplicand DataBundle.",
41
+ },
42
+ "multiplier_data_key": {
43
+ "type": str,
44
+ "default": "signal",
45
+ "doc": "BaseData key to read from the multiplier DataBundle.",
46
+ },
47
+ },
48
+ step_keywords=["multiply", "scaling", "databundle"],
49
+ step_doc="Multiply a DataBundle element using another DataBundle",
50
+ step_reference="DOI 10.1088/0953-8984/25/38/383201",
51
+ step_note="""
52
+ This multiplies one DataBundle's signal with another, useful for scaling or combining data.
53
+ 'with_processing_keys' in the configuration should contain two keys, the operation
54
+ will multiply the first key's DataBundle by the second key's DataBundle.
55
+ """,
56
+ )
57
+
58
+ def calculate(self) -> dict[str, DataBundle]:
59
+ # actual work happens here:
60
+ keys = self._normalised_processing_keys()
61
+ assert len(keys) == 2, (
62
+ "MultiplyDatabundles requires exactly two processing keys in 'with_processing_keys': "
63
+ "the first is the multiplicand, the second is the multiplier."
64
+ )
65
+ multiplicand_key = keys[0]
66
+ multiplicand = self.processing_data.get(multiplicand_key)
67
+ multiplier = self.processing_data.get(keys[1])
68
+ # multiply the data
69
+ multiplicand[self.configuration["multiplicand_data_key"]] *= multiplier[
70
+ self.configuration["multiplier_data_key"]
71
+ ]
72
+ output: dict[str, DataBundle] = {multiplicand_key: multiplicand}
73
+ return output
@@ -0,0 +1,69 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # /usr/bin/env python3
3
+ # -*- coding: utf-8 -*-
4
+
5
+ from __future__ import annotations
6
+
7
+ __coding__ = "utf-8"
8
+ __authors__ = ["Malte Storm", "Tim Snow", "Brian R. Pauw"] # add names to the list as appropriate
9
+ __copyright__ = "Copyright 2025, The MoDaCor team"
10
+ __date__ = "16/11/2025"
11
+ __status__ = "Development" # "Development", "Production"
12
+ # end of header and standard imports
13
+
14
+ __version__ = "20250522.1"
15
+ __all__ = ["PoissonUncertainties"]
16
+
17
+ from pathlib import Path
18
+
19
+ import numpy as np
20
+
21
+ # from modacor.dataclasses.databundle import DataBundle
22
+ from modacor.dataclasses.process_step import ProcessStep
23
+ from modacor.dataclasses.process_step_describer import ProcessStepDescriber
24
+
25
+ # from typing import Any
26
+
27
+
28
+ class PoissonUncertainties(ProcessStep):
29
+ """
30
+ Adding Poisson uncertainties to the data
31
+ """
32
+
33
+ documentation = ProcessStepDescriber(
34
+ calling_name="Add Poisson Uncertainties",
35
+ calling_id="PoissonUncertainties",
36
+ calling_module_path=Path(__file__),
37
+ calling_version=__version__,
38
+ required_data_keys=["signal"],
39
+ modifies={"variances": ["Poisson"]},
40
+ arguments={
41
+ "with_processing_keys": {
42
+ "type": list,
43
+ "required": True,
44
+ "default": None,
45
+ "doc": "ProcessingData keys to update with Poisson variances.",
46
+ },
47
+ },
48
+ step_keywords=["uncertainties", "Poisson"],
49
+ step_doc="Add Poisson uncertainties to the data",
50
+ step_reference="DOI 10.1088/0953-8984/25/38/383201",
51
+ step_note="This is a simple Poisson uncertainty calculation based on the signal intensity",
52
+ )
53
+
54
+ def calculate(self):
55
+ """
56
+ Calculate the Poisson uncertainties for the data
57
+ """
58
+
59
+ # Get the data
60
+ data = self.processing_data
61
+ output = {}
62
+ for key in self._normalised_processing_keys():
63
+ databundle = data.get(key)
64
+ signal = databundle["signal"].signal
65
+
66
+ # Add the variance to the data
67
+ databundle["signal"].variances["Poisson"] = np.clip(signal, 1, None)
68
+ output[key] = databundle
69
+ return output