modacor 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. modacor/__init__.py +30 -0
  2. modacor/dataclasses/__init__.py +0 -0
  3. modacor/dataclasses/basedata.py +973 -0
  4. modacor/dataclasses/databundle.py +23 -0
  5. modacor/dataclasses/helpers.py +45 -0
  6. modacor/dataclasses/messagehandler.py +75 -0
  7. modacor/dataclasses/process_step.py +233 -0
  8. modacor/dataclasses/process_step_describer.py +146 -0
  9. modacor/dataclasses/processing_data.py +59 -0
  10. modacor/dataclasses/trace_event.py +118 -0
  11. modacor/dataclasses/uncertainty_tools.py +132 -0
  12. modacor/dataclasses/validators.py +84 -0
  13. modacor/debug/pipeline_tracer.py +548 -0
  14. modacor/io/__init__.py +33 -0
  15. modacor/io/csv/__init__.py +0 -0
  16. modacor/io/csv/csv_sink.py +114 -0
  17. modacor/io/csv/csv_source.py +210 -0
  18. modacor/io/hdf/__init__.py +27 -0
  19. modacor/io/hdf/hdf_source.py +120 -0
  20. modacor/io/io_sink.py +41 -0
  21. modacor/io/io_sinks.py +61 -0
  22. modacor/io/io_source.py +164 -0
  23. modacor/io/io_sources.py +208 -0
  24. modacor/io/processing_path.py +113 -0
  25. modacor/io/tiled/__init__.py +16 -0
  26. modacor/io/tiled/tiled_source.py +403 -0
  27. modacor/io/yaml/__init__.py +27 -0
  28. modacor/io/yaml/yaml_source.py +116 -0
  29. modacor/modules/__init__.py +53 -0
  30. modacor/modules/base_modules/__init__.py +0 -0
  31. modacor/modules/base_modules/append_processing_data.py +329 -0
  32. modacor/modules/base_modules/append_sink.py +141 -0
  33. modacor/modules/base_modules/append_source.py +181 -0
  34. modacor/modules/base_modules/bitwise_or_masks.py +113 -0
  35. modacor/modules/base_modules/combine_uncertainties.py +120 -0
  36. modacor/modules/base_modules/combine_uncertainties_max.py +105 -0
  37. modacor/modules/base_modules/divide.py +82 -0
  38. modacor/modules/base_modules/find_scale_factor1d.py +373 -0
  39. modacor/modules/base_modules/multiply.py +77 -0
  40. modacor/modules/base_modules/multiply_databundles.py +73 -0
  41. modacor/modules/base_modules/poisson_uncertainties.py +69 -0
  42. modacor/modules/base_modules/reduce_dimensionality.py +252 -0
  43. modacor/modules/base_modules/sink_processing_data.py +80 -0
  44. modacor/modules/base_modules/subtract.py +80 -0
  45. modacor/modules/base_modules/subtract_databundles.py +67 -0
  46. modacor/modules/base_modules/units_label_update.py +66 -0
  47. modacor/modules/instrument_modules/__init__.py +0 -0
  48. modacor/modules/instrument_modules/readme.md +9 -0
  49. modacor/modules/technique_modules/__init__.py +0 -0
  50. modacor/modules/technique_modules/scattering/__init__.py +0 -0
  51. modacor/modules/technique_modules/scattering/geometry_helpers.py +114 -0
  52. modacor/modules/technique_modules/scattering/index_pixels.py +492 -0
  53. modacor/modules/technique_modules/scattering/indexed_averager.py +628 -0
  54. modacor/modules/technique_modules/scattering/pixel_coordinates_3d.py +417 -0
  55. modacor/modules/technique_modules/scattering/solid_angle_correction.py +63 -0
  56. modacor/modules/technique_modules/scattering/xs_geometry.py +571 -0
  57. modacor/modules/technique_modules/scattering/xs_geometry_from_pixel_coordinates.py +293 -0
  58. modacor/runner/__init__.py +0 -0
  59. modacor/runner/pipeline.py +749 -0
  60. modacor/runner/process_step_registry.py +224 -0
  61. modacor/tests/__init__.py +27 -0
  62. modacor/tests/dataclasses/test_basedata.py +519 -0
  63. modacor/tests/dataclasses/test_basedata_operations.py +439 -0
  64. modacor/tests/dataclasses/test_basedata_to_base_units.py +57 -0
  65. modacor/tests/dataclasses/test_process_step_describer.py +73 -0
  66. modacor/tests/dataclasses/test_processstep.py +282 -0
  67. modacor/tests/debug/test_tracing_integration.py +188 -0
  68. modacor/tests/integration/__init__.py +0 -0
  69. modacor/tests/integration/test_pipeline_run.py +238 -0
  70. modacor/tests/io/__init__.py +27 -0
  71. modacor/tests/io/csv/__init__.py +0 -0
  72. modacor/tests/io/csv/test_csv_source.py +156 -0
  73. modacor/tests/io/hdf/__init__.py +27 -0
  74. modacor/tests/io/hdf/test_hdf_source.py +92 -0
  75. modacor/tests/io/test_io_sources.py +119 -0
  76. modacor/tests/io/tiled/__init__.py +12 -0
  77. modacor/tests/io/tiled/test_tiled_source.py +120 -0
  78. modacor/tests/io/yaml/__init__.py +27 -0
  79. modacor/tests/io/yaml/static_data_example.yaml +26 -0
  80. modacor/tests/io/yaml/test_yaml_source.py +47 -0
  81. modacor/tests/modules/__init__.py +27 -0
  82. modacor/tests/modules/base_modules/__init__.py +27 -0
  83. modacor/tests/modules/base_modules/test_append_processing_data.py +219 -0
  84. modacor/tests/modules/base_modules/test_append_sink.py +76 -0
  85. modacor/tests/modules/base_modules/test_append_source.py +180 -0
  86. modacor/tests/modules/base_modules/test_bitwise_or_masks.py +264 -0
  87. modacor/tests/modules/base_modules/test_combine_uncertainties.py +105 -0
  88. modacor/tests/modules/base_modules/test_combine_uncertainties_max.py +109 -0
  89. modacor/tests/modules/base_modules/test_divide.py +140 -0
  90. modacor/tests/modules/base_modules/test_find_scale_factor1d.py +220 -0
  91. modacor/tests/modules/base_modules/test_multiply.py +113 -0
  92. modacor/tests/modules/base_modules/test_multiply_databundles.py +136 -0
  93. modacor/tests/modules/base_modules/test_poisson_uncertainties.py +61 -0
  94. modacor/tests/modules/base_modules/test_reduce_dimensionality.py +358 -0
  95. modacor/tests/modules/base_modules/test_sink_processing_data.py +119 -0
  96. modacor/tests/modules/base_modules/test_subtract.py +111 -0
  97. modacor/tests/modules/base_modules/test_subtract_databundles.py +136 -0
  98. modacor/tests/modules/base_modules/test_units_label_update.py +91 -0
  99. modacor/tests/modules/technique_modules/__init__.py +0 -0
  100. modacor/tests/modules/technique_modules/scattering/__init__.py +0 -0
  101. modacor/tests/modules/technique_modules/scattering/test_geometry_helpers.py +198 -0
  102. modacor/tests/modules/technique_modules/scattering/test_index_pixels.py +426 -0
  103. modacor/tests/modules/technique_modules/scattering/test_indexed_averaging.py +559 -0
  104. modacor/tests/modules/technique_modules/scattering/test_pixel_coordinates_3d.py +282 -0
  105. modacor/tests/modules/technique_modules/scattering/test_xs_geometry_from_pixel_coordinates.py +224 -0
  106. modacor/tests/modules/technique_modules/scattering/test_xsgeometry.py +635 -0
  107. modacor/tests/requirements.txt +12 -0
  108. modacor/tests/runner/test_pipeline.py +438 -0
  109. modacor/tests/runner/test_process_step_registry.py +65 -0
  110. modacor/tests/test_import.py +43 -0
  111. modacor/tests/test_modacor.py +17 -0
  112. modacor/tests/test_units.py +79 -0
  113. modacor/units.py +97 -0
  114. modacor-1.0.0.dist-info/METADATA +482 -0
  115. modacor-1.0.0.dist-info/RECORD +120 -0
  116. modacor-1.0.0.dist-info/WHEEL +5 -0
  117. modacor-1.0.0.dist-info/licenses/AUTHORS.md +11 -0
  118. modacor-1.0.0.dist-info/licenses/LICENSE +11 -0
  119. modacor-1.0.0.dist-info/licenses/LICENSE.txt +11 -0
  120. modacor-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,973 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # /usr/bin/env python3
3
+ # -*- coding: utf-8 -*-
4
+
5
+ from __future__ import annotations
6
+
7
+ __coding__ = "utf-8"
8
+ __authors__ = ["Brian R. Pauw, Jérôme Kieffer"] # add names to the list as appropriate
9
+ __copyright__ = "Copyright 2025, The MoDaCor team"
10
+ __date__ = "16/11/2025"
11
+ __status__ = "Development" # "Development", "Production"
12
+ # end of header and standard imports
13
+
14
+ __all__ = ["BaseData"]
15
+
16
+ # import tiled
17
+ # import tiled.client
18
+ import logging
19
+ import numbers
20
+ import operator
21
+ from collections.abc import MutableMapping
22
+ from typing import Any, Callable, Dict, List, Self
23
+
24
+ import numpy as np
25
+ import pint
26
+ from attrs import define, field, setters
27
+ from attrs import validators as v
28
+
29
+ from modacor import ureg
30
+
31
+ logger = logging.getLogger(__name__)
32
+
33
+
34
+ # make a varianceDict that quacks like a dict, but is secretly a view on the uncertainties dict
35
+ class _VarianceDict(MutableMapping):
36
+ def __init__(self, parent: BaseData):
37
+ self._parent = parent
38
+
39
+ def __getitem__(self, key):
40
+ # Return variance = (one‐sigma uncertainty)**2
41
+ return (self._parent.uncertainties[key]) ** 2
42
+
43
+ def __iter__(self):
44
+ # Iterate over keys in the underlying uncertainties dict
45
+ return iter(self._parent.uncertainties)
46
+
47
+ def __len__(self):
48
+ return len(self._parent.uncertainties)
49
+
50
+ def __contains__(self, x) -> bool:
51
+ return x in self._parent.uncertainties
52
+
53
+ def __setitem__(self, key, var):
54
+ # Accept scaling or array‐like, coerce to ndarray
55
+ arr = np.asarray(var, dtype=float)
56
+ # Validate broadcast to signal
57
+ validate_broadcast(self._parent.signal, arr, f"variances[{key}]")
58
+ # Store sqrt(var) as the one‐sigma uncertainty
59
+ self._parent.uncertainties[key] = np.asarray(arr**0.5)
60
+
61
+ def __delitem__(self, key):
62
+ del self._parent.uncertainties[key]
63
+
64
+ def __repr__(self):
65
+ """
66
+ Display exactly like a normal dict of {key: variance_array}, so that
67
+ printing `a.variances` looks familiar.
68
+ """
69
+ # Build a plain Python dict of {key: self[key]} for repr
70
+ d = {k: self[k] for k in self._parent.uncertainties}
71
+ return repr(d)
72
+
73
+ def __str__(self):
74
+ # Optional: same as __repr__
75
+ return self.__repr__()
76
+
77
+
78
+ def validate_rank_of_data(instance, attribute, value) -> None:
79
+ # Must be between 0 and 3
80
+ if not 0 <= value <= 3:
81
+ raise ValueError(f"{attribute.name} must be between 0 and 3, got {value}.")
82
+
83
+ # For array‐like signals, rank cannot exceed ndim
84
+ if instance.signal is not None and value > instance.signal.ndim:
85
+ raise ValueError(f"{attribute.name} ({value}) cannot exceed signal dim (ndim={instance.signal.ndim}).")
86
+
87
+
88
+ def signal_converter(value: int | float | np.ndarray) -> np.ndarray:
89
+ """
90
+ Convert the input value to a numpy array if it is not already one.
91
+ """
92
+ return np.array(value, dtype=float) if not isinstance(value, np.ndarray) else value
93
+
94
+
95
+ def dict_signal_converter(value: Dict[str, int | float | np.ndarray]) -> Dict[str, np.ndarray]:
96
+ """
97
+ Convert a dictionary of values to a dictionary of numpy arrays.
98
+ Each value in the dictionary is converted to a numpy array if it is not already one.
99
+ """
100
+ return {k: signal_converter(v) for k, v in value.items()}
101
+
102
+
103
+ def validate_broadcast(signal: np.ndarray, arr: np.ndarray, name: str) -> None:
104
+ """
105
+ Raise if `arr` cannot broadcast to `signal.shape`.
106
+ Scalars (size=1) and None are always accepted.
107
+ """
108
+ if arr.size == 1:
109
+ return # compatible with any shape
110
+ # find out if it can be broadcast at all
111
+ try:
112
+ out_shape = np.broadcast_shapes(signal.shape, arr.shape)
113
+ except ValueError:
114
+ raise ValueError(f"'{name}' with shape {arr.shape} cannot broadcast to signal shape {signal.shape}.")
115
+ # and find out whether the resulting shape does not change the shape of signal
116
+ if out_shape != signal.shape:
117
+ raise ValueError(f"'{name}' with shape {arr.shape} does not broadcast to signal shape {signal.shape}.")
118
+
119
+
120
+ def _copy_uncertainties(unc_dict: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
121
+ """Deep-copy the uncertainties dict (to avoid aliasing between objects)."""
122
+ return {k: np.array(v, copy=True) for k, v in unc_dict.items()}
123
+
124
+
125
+ def _inherit_metadata(source: BaseData, result: BaseData) -> BaseData:
126
+ """
127
+ Copy metadata-like attributes from `source` to `result` (axes, rank_of_data, weights)
128
+ without touching numerical content (signal, units, uncertainties).
129
+ """
130
+ # Shallow-copy axes list to avoid aliasing of the list object itself
131
+ result.axes = list(source.axes)
132
+
133
+ # Keep the same rank_of_data; attrs validation ensures it is still valid
134
+ result.rank_of_data = source.rank_of_data
135
+
136
+ # Try to propagate weights; if shapes are incompatible, keep defaults
137
+ try:
138
+ arr = np.asarray(source.weights)
139
+ validate_broadcast(result.signal, arr, "weights")
140
+ result.weights = np.broadcast_to(arr, result.signal.shape).copy()
141
+ except ValueError:
142
+ logger.debug("Could not broadcast source weights to result shape; leaving default weights on result BaseData.")
143
+
144
+ return result
145
+
146
+
147
+ def _binary_basedata_op(
148
+ left: BaseData,
149
+ right: BaseData,
150
+ op: Callable[[Any, Any], Any],
151
+ ) -> BaseData:
152
+ """
153
+ Apply a binary arithmetic operation to two BaseData objects, propagating
154
+ uncertainties using standard first-order, uncorrelated error propagation.
155
+
156
+ Semantics
157
+ ---------
158
+ - Uncertainty dict entries are treated as independent sources keyed by name.
159
+ - "propagate_to_all" is treated as a *global fallback* only when it is the sole key
160
+ on an operand. Otherwise it is treated like a normal key name and participates via
161
+ rule 4 (union, no cross-key combining).
162
+
163
+ Rules for result uncertainty keys:
164
+ 1) If both operands have only "propagate_to_all":
165
+ result.uncertainties = {"propagate_to_all": σ}
166
+ 2) If exactly one operand has only "propagate_to_all" and the other has non-global keys:
167
+ result.uncertainties contains only the other operand's non-global keys.
168
+ The global term contributes into each of those keys.
169
+ 3) Matching non-global keys propagate and combine by key.
170
+ 4) Non-matching non-global keys: transfer/propagate each key independently into the result
171
+ (union of keys), without cross-key combining.
172
+
173
+ Propagation formulas (uncorrelated, first-order)
174
+ -----------------------------------------------
175
+ Let A = left.signal * left.units, B = right.signal * right.units, R = op(A, B).
176
+ Using magnitudes A_val, B_val and absolute σA, σB:
177
+
178
+ - Add/Sub: σR² = σA² + σB² (after converting σA, σB to result units)
179
+ - Mul: σR² = (B σA)² + (A σB)²
180
+ - Div: σR² = (σA/B)² + (A σB/B²)²
181
+ """
182
+ # Nominal result (pint handles unit logic & broadcasting)
183
+ A_q = left.signal * left.units
184
+ B_q = right.signal * right.units
185
+ base_result = op(A_q, B_q)
186
+ base_signal = np.asarray(base_result.magnitude, dtype=float)
187
+ result_units = base_result.units
188
+ out_shape = base_signal.shape
189
+
190
+ # Broadcast nominal magnitudes to the result shape
191
+ A_val = np.broadcast_to(np.asarray(left.signal, dtype=float), out_shape)
192
+ B_val = np.broadcast_to(np.asarray(right.signal, dtype=float), out_shape)
193
+
194
+ left_unc = left.uncertainties
195
+ right_unc = right.uncertainties
196
+
197
+ # "global-only" if and only if propagate_to_all is the sole key
198
+ left_global = left_unc.get("propagate_to_all") if set(left_unc.keys()) == {"propagate_to_all"} else None
199
+ right_global = right_unc.get("propagate_to_all") if set(right_unc.keys()) == {"propagate_to_all"} else None
200
+
201
+ left_non_global_keys = set(left_unc.keys()) - {"propagate_to_all"}
202
+ right_non_global_keys = set(right_unc.keys()) - {"propagate_to_all"}
203
+
204
+ # Decide output keys per rules
205
+ if left_global is not None and right_global is not None:
206
+ out_keys = {"propagate_to_all"}
207
+ drop_global_key = False # we *do* want it
208
+ elif left_global is not None and right_non_global_keys:
209
+ out_keys = set(right_non_global_keys)
210
+ drop_global_key = True
211
+ elif right_global is not None and left_non_global_keys:
212
+ out_keys = set(left_non_global_keys)
213
+ drop_global_key = True
214
+ else:
215
+ # General case: union of non-global keys
216
+ out_keys = left_non_global_keys | right_non_global_keys
217
+ drop_global_key = True # we never emit propagate_to_all unless both-global-only
218
+
219
+ def _as_broadcast_float(err: Any) -> np.ndarray:
220
+ arr = np.asarray(err, dtype=float)
221
+ return np.broadcast_to(arr, out_shape)
222
+
223
+ def _get_err(unc_map: Dict[str, np.ndarray], key: str, global_err: Any | None) -> Any:
224
+ """Return uncertainty for `key`, falling back to global_err if provided, else 0."""
225
+ if key in unc_map:
226
+ return unc_map[key]
227
+ if global_err is not None:
228
+ return global_err
229
+ return 0.0
230
+
231
+ # Precompute unit conversion factors (magnitudes)
232
+ if op in (operator.add, operator.sub):
233
+ cf_A = result_units.m_from(left.units)
234
+ cf_B = result_units.m_from(right.units)
235
+
236
+ result_unc: Dict[str, np.ndarray] = {}
237
+ for key in out_keys:
238
+ sigma_A = _as_broadcast_float(_get_err(left_unc, key, left_global)) * cf_A
239
+ sigma_B = _as_broadcast_float(_get_err(right_unc, key, right_global)) * cf_B
240
+ result_unc[key] = np.sqrt(sigma_A**2 + sigma_B**2)
241
+
242
+ elif op is operator.mul:
243
+ # Convert from left.units * right.units to result_units
244
+ cf_AB = result_units.m_from(left.units * right.units)
245
+
246
+ result_unc = {}
247
+ for key in out_keys:
248
+ sigma_A = _as_broadcast_float(_get_err(left_unc, key, left_global))
249
+ sigma_B = _as_broadcast_float(_get_err(right_unc, key, right_global))
250
+ termA = (B_val * sigma_A) * cf_AB
251
+ termB = (A_val * sigma_B) * cf_AB
252
+ result_unc[key] = np.sqrt(termA**2 + termB**2)
253
+
254
+ elif op is operator.truediv:
255
+ # Convert from left.units / right.units to result_units
256
+ cf_A_div_B = result_units.m_from(left.units / right.units)
257
+
258
+ result_unc = {}
259
+ with np.errstate(divide="ignore", invalid="ignore", over="ignore"):
260
+ for key in out_keys:
261
+ sigma_A = _as_broadcast_float(_get_err(left_unc, key, left_global))
262
+ sigma_B = _as_broadcast_float(_get_err(right_unc, key, right_global))
263
+
264
+ termA = (sigma_A / B_val) * cf_A_div_B
265
+ termB = (A_val * sigma_B / (B_val**2)) * cf_A_div_B
266
+ sigma = np.sqrt(termA**2 + termB**2)
267
+
268
+ # Division by zero -> undefined uncertainty
269
+ sigma = np.where(B_val == 0.0, np.nan, sigma)
270
+ result_unc[key] = sigma
271
+ else:
272
+ raise NotImplementedError(f"Operation {op} not supported in _binary_basedata_op") # noqa: E713
273
+
274
+ # Only emit propagate_to_all in the both-global-only case
275
+ if drop_global_key:
276
+ result_unc.pop("propagate_to_all", None)
277
+
278
+ result = BaseData(signal=base_signal, units=result_units, uncertainties=result_unc)
279
+ return _inherit_metadata(left, result)
280
+
281
+
282
+ def _unary_basedata_op(
283
+ element: BaseData,
284
+ func: Callable[[np.ndarray], np.ndarray],
285
+ dfunc: Callable[[np.ndarray], np.ndarray],
286
+ out_units: pint.Unit,
287
+ domain: Callable[[np.ndarray], np.ndarray] | None = None,
288
+ ) -> BaseData:
289
+ """
290
+ Generic unary op: y = func(x), σ_y ≈ |dfunc(x)| σ_x, with an optional domain.
291
+
292
+ Outside the domain, signal and uncertainties are set to NaN (using np.where-style masking).
293
+ """
294
+ x = np.asarray(element.signal)
295
+ if domain is None:
296
+ valid = np.ones_like(x, dtype=bool)
297
+ else:
298
+ valid = domain(x)
299
+
300
+ y = np.full_like(x, np.nan, dtype=float)
301
+ y[valid] = func(x[valid])
302
+
303
+ deriv = np.zeros_like(x, dtype=float)
304
+ deriv[valid] = np.abs(dfunc(x[valid]))
305
+
306
+ result = BaseData(
307
+ signal=y,
308
+ units=out_units,
309
+ uncertainties={},
310
+ )
311
+
312
+ for key, err in element.uncertainties.items():
313
+ validate_broadcast(x, np.asarray(err), f"uncertainties['{key}']")
314
+ err_b = np.broadcast_to(err, x.shape)
315
+
316
+ sigma_y = np.full_like(x, np.nan, dtype=float)
317
+ sigma_y[valid] = deriv[valid] * np.abs(err_b[valid])
318
+ result.uncertainties[key] = sigma_y
319
+
320
+ # Preserve metadata from the original element
321
+ return _inherit_metadata(element, result)
322
+
323
+
324
+ class UncertaintyOpsMixin:
325
+ """
326
+ Mixin that adds arithmetic with uncertainty propagation to BaseData.
327
+
328
+ Assumptions
329
+ -----------
330
+ - `signal` is a numpy array of nominal values.
331
+ - `uncertainties` maps keys -> absolute 1σ uncertainty arrays (broadcastable to signal).
332
+ - Binary operations assume uncorrelated uncertainties between operands.
333
+ - Unary operations use first-order linear error propagation.
334
+ """
335
+
336
+ # ---- binary dunder ops ----
337
+
338
+ def _binary_op(
339
+ self: BaseData,
340
+ other: Any,
341
+ op: Callable[[Any, Any], Any],
342
+ swapped: bool = False,
343
+ ) -> BaseData:
344
+ if not isinstance(self, BaseData):
345
+ return NotImplemented
346
+
347
+ # --- numbers.Real: treat as dimensionless for * and /, same units for + and -
348
+ if isinstance(other, numbers.Real):
349
+ scalar = float(other)
350
+ if op in (operator.mul, operator.truediv):
351
+ # dimensionless scalar
352
+ scalar_units = ureg.dimensionless
353
+ else:
354
+ # additive scalar, interpret as self.units
355
+ scalar_units = self.units
356
+
357
+ signal = np.full_like(self.signal, scalar, dtype=float)
358
+ other = BaseData(
359
+ signal=signal,
360
+ units=scalar_units,
361
+ uncertainties={k: np.zeros_like(v, dtype=float) for k, v in self.uncertainties.items()},
362
+ )
363
+
364
+ # --- pint.Quantity: for +/-, use same units as self; for */÷, keep its own units
365
+ elif isinstance(other, pint.Quantity):
366
+ if op in (operator.add, operator.sub):
367
+ q = other.to(self.units)
368
+ scalar_units = self.units
369
+ else:
370
+ q = other
371
+ scalar_units = q.units
372
+
373
+ scalar = float(q.magnitude)
374
+ signal = np.full_like(self.signal, scalar, dtype=float)
375
+ other = BaseData(
376
+ signal=signal,
377
+ units=scalar_units,
378
+ uncertainties={k: np.zeros_like(v, dtype=float) for k, v in self.uncertainties.items()},
379
+ )
380
+
381
+ elif not isinstance(other, BaseData):
382
+ # unsupported type
383
+ return NotImplemented
384
+
385
+ # Now both operands are BaseData
386
+ if swapped:
387
+ left, right = other, self
388
+ else:
389
+ left, right = self, other
390
+
391
+ return _binary_basedata_op(left, right, op)
392
+
393
+ def __add__(self, other: Any) -> BaseData:
394
+ return self._binary_op(other, operator.add)
395
+
396
+ def __radd__(self, other: Any) -> BaseData:
397
+ return self._binary_op(other, operator.add, swapped=True)
398
+
399
+ def __sub__(self, other: Any) -> BaseData:
400
+ return self._binary_op(other, operator.sub)
401
+
402
+ def __rsub__(self, other: Any) -> BaseData:
403
+ return self._binary_op(other, operator.sub, swapped=True)
404
+
405
+ def __mul__(self, other: Any) -> BaseData:
406
+ return self._binary_op(other, operator.mul)
407
+
408
+ def __rmul__(self, other: Any) -> BaseData:
409
+ return self._binary_op(other, operator.mul, swapped=True)
410
+
411
+ def __truediv__(self, other: Any) -> BaseData:
412
+ return self._binary_op(other, operator.truediv)
413
+
414
+ def __rtruediv__(self, other: Any) -> BaseData:
415
+ return self._binary_op(other, operator.truediv, swapped=True)
416
+
417
+ # ---- unary dunder + convenience methods ----
418
+
419
+ def __neg__(self) -> BaseData:
420
+ return negate_basedata_element(self)
421
+
422
+ def __pow__(self, exponent: float, modulo=None) -> BaseData:
423
+ if modulo is not None:
424
+ return NotImplemented
425
+ return powered_basedata_element(self, exponent)
426
+
427
+ def sqrt(self) -> BaseData:
428
+ return sqrt_basedata_element(self)
429
+
430
+ def square(self) -> BaseData:
431
+ return square_basedata_element(self)
432
+
433
+ def log(self) -> BaseData:
434
+ return log_basedata_element(self)
435
+
436
+ def exp(self) -> BaseData:
437
+ return exp_basedata_element(self)
438
+
439
+ def sin(self) -> BaseData:
440
+ return sin_basedata_element(self)
441
+
442
+ def cos(self) -> BaseData:
443
+ return cos_basedata_element(self)
444
+
445
+ def tan(self) -> BaseData:
446
+ return tan_basedata_element(self)
447
+
448
+ def arcsin(self) -> BaseData:
449
+ return arcsin_basedata_element(self)
450
+
451
+ def arccos(self) -> BaseData:
452
+ return arccos_basedata_element(self)
453
+
454
+ def arctan(self) -> BaseData:
455
+ return arctan_basedata_element(self)
456
+
457
+ def reciprocal(self) -> BaseData:
458
+ return reciprocal_basedata_element(self)
459
+
460
+ def squeeze(self) -> BaseData:
461
+ return squeeze_basedata_element(self)
462
+
463
+
464
+ @define
465
+ class BaseData(UncertaintyOpsMixin):
466
+ """
467
+ BaseData stores a core data array (`signal`) with associated uncertainties, units,
468
+ and metadata. It validates that any weights or uncertainty arrays broadcast to
469
+ the shape of `signal`, and provides utilities for scaling operations and unit conversion.
470
+
471
+ Attributes
472
+ ----------
473
+ signal : np.ndarray
474
+ The primary data array. All weights/uncertainty arrays must be broadcastable
475
+ to this shape.
476
+ units : pint.Unit
477
+ Physical units of `signal*scaling` and their uncertainties.
478
+ uncertainties : Dict[str, np.ndarray]
479
+ Uncertainty (as one‐sigma standard deviation) arrays keyed by type (e.g. "Poisson",
480
+ "pixel_index"). Each array must broadcast to ``signal.shape``. Variances are computed
481
+ as ``uncertainties[k]**2``.
482
+
483
+ Binary operations propagate uncertainties using first-order, uncorrelated propagation.
484
+ Keys are treated as *independent sources*.
485
+
486
+ Special key: ``"propagate_to_all"``
487
+ ---------------------------------
488
+ ``"propagate_to_all"`` is treated as a *global* uncertainty **only when it is the sole
489
+ key present** on an operand.
490
+
491
+ The propagation rules for an operation ``result = op(a, b)`` are:
492
+
493
+ 1. If both operands only contain ``"propagate_to_all"``, the result contains only
494
+ ``"propagate_to_all"`` (propagated normally).
495
+
496
+ 2. If one operand only contains ``"propagate_to_all"`` and the other contains one or
497
+ more non-``"propagate_to_all"`` keys, the result contains only those non-global keys.
498
+ The global uncertainty contributes as a fallback term to each of those keys.
499
+
500
+ 3. If both operands contain matching non-global keys, those keys are propagated and
501
+ combined by key.
502
+
503
+ 4. If both operands contain non-global keys but with no matches, the result contains
504
+ the union of keys, and each key is propagated from its originating operand only
505
+ (no cross-key combining).
506
+ weights : np.ndarray, optional
507
+ Weights for `signal` (default is a scaling 1.0) for use in averaging operations.
508
+ Must broadcast to `signal.shape`.
509
+ axes : List[BaseData | None]
510
+ Optional metadata for each axis of `signal`. Defaults to an empty list.
511
+ rank_of_data : int, default=0
512
+ Rank (0–3) of the data; 1 is line data, 2 is image data. Must not exceed
513
+ `signal.ndim`.
514
+
515
+ Properties
516
+ ----------
517
+ variances : Dict[str, np.ndarray]
518
+ Returns `{k: u**2 for k, u in uncertainties.items()}`. Assigning expects a dict
519
+ of variance arrays; each is validated against `signal.shape` and
520
+ converted into `uncertainties[k] = sqrt(var)`.
521
+ shape : tuple[int, ...]
522
+ Shape of the `signal` array.
523
+ size : int
524
+ Size of the `signal` array.
525
+
526
+ Methods
527
+ -------
528
+ to_units(new_units: pint.Unit):
529
+ Converts internal `signal` and all `uncertainties` to `new_units` if compatible with
530
+ the existing `units`. Raises `TypeError` or `ValueError` on invalid input.
531
+ to_base_units(): Converts internal signal and uncertainties to the base units implied by current units.
532
+ to_dimensionless(): Converts internal signal and uncertainties to dimensionless units if possible.
533
+ is_dimensionless() -> bool:
534
+ Returns True if `units` is dimensionless, False otherwise.
535
+ """
536
+
537
+ # required:
538
+ # Core data array stored as a numpy ndarray
539
+ signal: np.ndarray = field(
540
+ converter=signal_converter, validator=v.instance_of(np.ndarray), on_setattr=setters.validate
541
+ )
542
+ # Unit of signal*scaling+offset - required input 'dimensionless' for dimensionless data
543
+ units: pint.Unit = field(validator=v.instance_of(pint.Unit), converter=ureg.Unit, on_setattr=setters.validate) # type: ignore
544
+ # optional:
545
+ # Dict of variances represented as numpy ndarray objects; defaulting to an empty dict
546
+ uncertainties: Dict[str, np.ndarray] = field(
547
+ factory=dict, converter=dict_signal_converter, validator=v.instance_of(dict), on_setattr=setters.validate
548
+ )
549
+ # weights for the signal, can be used for weighted averaging
550
+ weights: np.ndarray = field(
551
+ default=np.array(1.0),
552
+ converter=signal_converter,
553
+ validator=v.instance_of(np.ndarray),
554
+ on_setattr=setters.validate,
555
+ )
556
+
557
+ # metadata
558
+ axes: List[Self | None] = field(factory=list, validator=v.instance_of(list), on_setattr=setters.validate)
559
+ # Rank of the data with custom validation:
560
+ # Must be between 0 and 3 and not exceed the dimensionality of signal.
561
+ rank_of_data: int = field(
562
+ default=0, converter=int, validator=[v.instance_of(int), validate_rank_of_data], on_setattr=setters.validate
563
+ )
564
+
565
+ def __attrs_post_init__(self):
566
+ """
567
+ Post-initialization to ensure that the shapes of elements in variances dict,
568
+ and the shapes of weights are compatible with the signal array.
569
+ """
570
+ # Validate variances
571
+ for kind, var in self.variances.items():
572
+ validate_broadcast(self.signal, var, f"variances[{kind}]")
573
+
574
+ # Validate weights
575
+ validate_broadcast(self.signal, self.weights, "weights")
576
+
577
+ # Warn if axes length does not match signal.ndim
578
+ if self.axes and len(self.axes) != self.signal.ndim:
579
+ logger.debug(
580
+ "BaseData.axes length (%d) does not match signal.ndim (%d).",
581
+ len(self.axes),
582
+ self.signal.ndim,
583
+ )
584
+
585
+ @property
586
+ def variances(self) -> _VarianceDict:
587
+ """
588
+ A dict‐like view of variances.
589
+ • Reading: bd.variances['foo'] → returns uncertainties['foo']**2
590
+ • Writing: bd.variances['foo'] = var_array → validates + sets uncertainties['foo']=sqrt(var_array)
591
+ • Deleting: del bd.variances['foo'] → removes 'foo' from uncertainties
592
+ """
593
+ return _VarianceDict(self)
594
+
595
+ @variances.setter
596
+ def variances(self, value: Dict[str, np.ndarray]) -> None:
597
+ """
598
+ Set the uncertainties dictionary via the variances.
599
+ """
600
+ if not isinstance(value, dict):
601
+ raise TypeError(f"variances must be a dict, got {type(value)}.")
602
+ if not all(isinstance(v, (np.ndarray, int, float)) for v in value.values()):
603
+ raise TypeError("All variances must be int, float or numpy arrays.")
604
+ # (Optionally clear existing uncertainties, or merge—here we'll just overwrite keys:)
605
+ self.uncertainties.clear() # clear existing uncertainties
606
+ for kind, var in value.items():
607
+ arr = np.asarray(var, dtype=float)
608
+ validate_broadcast(self.signal, arr, f"variances[{kind}]")
609
+ self.uncertainties[kind] = arr**0.5
610
+
611
+ @property
612
+ def shape(self) -> tuple[int, ...]:
613
+ """
614
+ Get the shape of the BaseData signal.
615
+
616
+ Returns
617
+ -------
618
+ tuple[int, ...] :
619
+ The shape of the signal.
620
+ """
621
+ return self.signal.shape
622
+
623
+ @property
624
+ def size(self) -> int:
625
+ """
626
+ Get the size of the BaseData signal.
627
+
628
+ Returns
629
+ -------
630
+ int :
631
+ The size of the signal.
632
+ """
633
+ return self.signal.size
634
+
635
+ def to_dimensionless(self) -> None:
636
+ """
637
+ Convert the signal and uncertainties to dimensionless units if possible.
638
+ """
639
+ if not self.is_dimensionless:
640
+ self.to_units(ureg.dimensionless)
641
+
642
+ @property
643
+ def is_dimensionless(self) -> bool:
644
+ """
645
+ Check if the BaseData is dimensionless.
646
+
647
+ Returns
648
+ -------
649
+ bool :
650
+ True if the units are dimensionless, False otherwise.
651
+ """
652
+ return self.units == ureg.dimensionless
653
+
654
+ def to_base_units(self, *, multiplicative_conversion: bool = True) -> None:
655
+ """
656
+ Convert the signal and uncertainties to the pint *base units* of `self.units`.
657
+
658
+ Notes
659
+ -----
660
+ - This is an in-place operation.
661
+ - For offset / non-multiplicative units (e.g. degC <-> K), this will raise unless
662
+ explicit support is implemented (see `to_units`).
663
+ """
664
+ # Determine the canonical base unit for the current units
665
+ # (use a Quantity to let pint resolve to_base_units properly)
666
+ base_units = (1 * self.units).to_base_units().units
667
+
668
+ try:
669
+ self.to_units(base_units, multiplicative_conversion=multiplicative_conversion)
670
+ except pint.errors.OffsetUnitCalculusError as e:
671
+ # Offset-unit conversions are affine, not purely multiplicative.
672
+ raise NotImplementedError(
673
+ "BaseData.to_base_units() encountered an offset / non-multiplicative unit conversion.\n"
674
+ "This is not supported yet because uncertainties require explicit rules (e.g. delta units)."
675
+ ) from e
676
+
677
+ def to_units(self, new_units: pint.Unit, multiplicative_conversion=True) -> None:
678
+ """
679
+ Convert the signal and uncertainties to new units.
680
+ """
681
+ try:
682
+ new_units = ureg.Unit(new_units) # ensure new_units is a pint.Unit
683
+ except pint.errors.UndefinedUnitError as e:
684
+ raise ValueError(f"Invalid unit provided: {new_units}.") from e
685
+
686
+ if not isinstance(new_units, ureg.Unit):
687
+ raise TypeError(f"new_units must be a pint.Unit, got {type(new_units)}.")
688
+
689
+ if not self.units.is_compatible_with(new_units):
690
+ raise ValueError(
691
+ f"""
692
+ Cannot convert from {self.units} to {new_units}. Units are not compatible.
693
+ """
694
+ )
695
+
696
+ # If the units are the same, no conversion is needed
697
+ if self.units == new_units:
698
+ logger.debug("No unit conversion needed, units are the same.")
699
+ return
700
+
701
+ if not multiplicative_conversion:
702
+ # This path is subtle for offset units (e.g. degC <-> K) and we
703
+ # don't want to silently get uncertainties wrong.
704
+ raise NotImplementedError(
705
+ "Non-multiplicative unit conversions are not yet implemented for BaseData.\n"
706
+ "If you need this, we should design explicit rules (e.g. using delta units)."
707
+ )
708
+
709
+ logger.debug(f"Converting from {self.units} to {new_units}.")
710
+
711
+ # simple unit conversion, can be done to scalar
712
+ # Convert signal
713
+ cfact = new_units.m_from(self.units)
714
+ self.signal *= cfact
715
+ self.units = new_units
716
+ # Convert uncertainty
717
+ for key in self.uncertainties: # fastest as far as my limited testing goes against iterating over items():
718
+ self.uncertainties[key] *= cfact
719
+
720
+ def indexed(self, indexer: Any, *, rank_of_data: int | None = None) -> "BaseData":
721
+ """
722
+ Return a new BaseData corresponding to ``self`` indexed by ``indexer``.
723
+
724
+ Parameters
725
+ ----------
726
+ indexer :
727
+ Any valid NumPy indexer (int, slice, tuple of slices, boolean mask, ...),
728
+ applied consistently to ``signal`` and all uncertainty / weight arrays.
729
+ rank_of_data :
730
+ Optional explicit rank_of_data for the returned BaseData. If omitted,
731
+ it will default to ``min(self.rank_of_data, result.signal.ndim)``.
732
+
733
+ Notes
734
+ -----
735
+ - Units are preserved.
736
+ - Uncertainties and weights are sliced with the same indexer where possible.
737
+ - Axes handling is conservative: existing axes are kept unchanged. If you
738
+ want axes to track slicing semantics more strictly, a higher-level
739
+ helper can be added later.
740
+ """
741
+ sig = np.asarray(self.signal)[indexer]
742
+
743
+ # Slice uncertainties with the same indexer
744
+ new_uncs: Dict[str, np.ndarray] = {}
745
+ for k, u in self.uncertainties.items():
746
+ u_arr = np.asarray(u, dtype=float)
747
+ # broadcast to signal shape, then apply the same indexer
748
+ u_full = np.broadcast_to(u_arr, self.signal.shape)
749
+ new_uncs[k] = u_full[indexer].copy()
750
+
751
+ # Try to slice weights; if shapes don't line up, fall back to scalar 1.0
752
+ try:
753
+ w_arr = np.asarray(self.weights, dtype=float)
754
+ new_weights = w_arr[indexer].copy()
755
+ except Exception:
756
+ new_weights = np.array(1.0, dtype=float)
757
+
758
+ # Decide rank_of_data for the result
759
+ if rank_of_data is None:
760
+ new_rank = min(self.rank_of_data, np.ndim(sig))
761
+ else:
762
+ new_rank = int(rank_of_data)
763
+
764
+ result = BaseData(
765
+ signal=np.asarray(sig, dtype=float),
766
+ units=self.units,
767
+ uncertainties=new_uncs,
768
+ weights=new_weights,
769
+ # For now we keep axes as-is; more sophisticated axis handling can be
770
+ # added once the usage patterns are clear.
771
+ axes=list(self.axes),
772
+ rank_of_data=new_rank,
773
+ )
774
+ return result
775
+
776
+ def copy(self, with_axes: bool = True) -> "BaseData":
777
+ """
778
+ Return a new BaseData with copied signal/uncertainties/weights.
779
+ Axes are shallow-copied (list copy) by default, so axis objects
780
+ themselves are still shared.
781
+ """
782
+ new = BaseData(
783
+ signal=np.array(self.signal, copy=True),
784
+ units=self.units,
785
+ uncertainties=_copy_uncertainties(self.uncertainties),
786
+ weights=np.array(self.weights, copy=True),
787
+ axes=list(self.axes) if with_axes else [],
788
+ rank_of_data=self.rank_of_data,
789
+ )
790
+ return new
791
+
792
+ def __repr__(self):
793
+ return (
794
+ f"BaseData(shape={self.signal.shape}, dtype={self.signal.dtype}, units={self.units}, "
795
+ f"n_uncertainties={len(self.uncertainties)}, rank_of_data={self.rank_of_data})"
796
+ )
797
+
798
+ def __str__(self):
799
+ return f'{self.signal} {self.units} ± {[f"{u} ({k})" for k, u in self.uncertainties.items()]}'
800
+
801
+
802
+ # ---------------------------------------------------------------------------
803
+ # Unary operations built on the generic helper
804
+ # ---------------------------------------------------------------------------
805
+
806
+
807
+ def squeeze_basedata_element(element: BaseData) -> BaseData:
808
+ """Squeeze: remove single-dimensional entries from the shape of the signal."""
809
+ result = BaseData(
810
+ signal=np.squeeze(element.signal),
811
+ units=element.units,
812
+ uncertainties={k: np.squeeze(v) for k, v in element.uncertainties.items()},
813
+ )
814
+ return _inherit_metadata(element, result)
815
+
816
+
817
+ def negate_basedata_element(element: BaseData) -> BaseData:
818
+ """Negate a BaseData element with uncertainty and units propagation."""
819
+ result = BaseData(
820
+ signal=-element.signal,
821
+ units=element.units,
822
+ uncertainties=_copy_uncertainties(element.uncertainties),
823
+ )
824
+ return _inherit_metadata(element, result)
825
+
826
+
827
+ def sqrt_basedata_element(element: BaseData) -> BaseData:
828
+ """Square root: y = sqrt(x), σ_y ≈ σ_x / (2 sqrt(x)). x must be >= 0."""
829
+ return _unary_basedata_op(
830
+ element=element,
831
+ func=np.sqrt,
832
+ dfunc=lambda x: 0.5 / np.sqrt(x),
833
+ out_units=element.units**0.5,
834
+ domain=lambda x: x >= 0,
835
+ )
836
+
837
+
838
+ def square_basedata_element(element: BaseData) -> BaseData:
839
+ """Square: y = x^2, σ_y ≈ |2x| σ_x."""
840
+ return _unary_basedata_op(
841
+ element=element,
842
+ func=lambda x: x**2,
843
+ dfunc=lambda x: 2.0 * x,
844
+ out_units=element.units**2,
845
+ )
846
+
847
+
848
+ def powered_basedata_element(element: BaseData, exponent: float) -> BaseData:
849
+ """Power: y = x**n, σ_y ≈ |n x^(n-1)| σ_x."""
850
+ # If exponent is non-integer, restrict to x >= 0 to avoid complex results.
851
+ exp_float = float(exponent)
852
+ if float(exp_float).is_integer():
853
+ domain = None # all real x are allowed
854
+ else:
855
+ domain = lambda x: x >= 0 # noqa: E731
856
+
857
+ return _unary_basedata_op(
858
+ element=element,
859
+ func=lambda x: x**exp_float,
860
+ dfunc=lambda x: exp_float * (x ** (exp_float - 1.0)),
861
+ out_units=element.units**exponent,
862
+ domain=domain,
863
+ )
864
+
865
+
866
+ def log_basedata_element(element: BaseData) -> BaseData:
867
+ """Natural log: y = ln(x), σ_y ≈ |1/x| σ_x. x must be > 0."""
868
+ # ensure element is dimensionless:
869
+ element.to_dimensionless()
870
+ return _unary_basedata_op(
871
+ element=element,
872
+ func=np.log,
873
+ dfunc=lambda x: 1.0 / x,
874
+ out_units=ureg.dimensionless,
875
+ domain=lambda x: x > 0,
876
+ )
877
+
878
+
879
+ def exp_basedata_element(element: BaseData) -> BaseData:
880
+ """Exponential: y = exp(x), σ_y ≈ exp(x) σ_x. Argument should be dimensionless."""
881
+ # ensure element is dimensionless:
882
+ element.to_dimensionless()
883
+ return _unary_basedata_op(
884
+ element=element,
885
+ func=np.exp,
886
+ dfunc=np.exp,
887
+ out_units=ureg.dimensionless,
888
+ )
889
+
890
+
891
+ def sin_basedata_element(element: BaseData) -> BaseData:
892
+ """Sine: y = sin(x), σ_y ≈ |cos(x)| σ_x. x in radians."""
893
+ # ensure element is in radian:
894
+ element.to_units(ureg.radian)
895
+ return _unary_basedata_op(
896
+ element=element,
897
+ func=np.sin,
898
+ dfunc=np.cos,
899
+ out_units=ureg.dimensionless,
900
+ )
901
+
902
+
903
+ def cos_basedata_element(element: BaseData) -> BaseData:
904
+ """Cosine: y = cos(x), σ_y ≈ |sin(x)| σ_x. x in radians."""
905
+ # ensure element is in radian:
906
+ element.to_units(ureg.radian)
907
+ return _unary_basedata_op(
908
+ element=element,
909
+ func=np.cos,
910
+ dfunc=np.sin, # derivative is -sin(x), abs removes sign
911
+ out_units=ureg.dimensionless,
912
+ )
913
+
914
+
915
+ def tan_basedata_element(element: BaseData) -> BaseData:
916
+ """Tangent: y = tan(x), σ_y ≈ |1/cos^2(x)| σ_x. x in radians."""
917
+ # ensure element is in radian:
918
+ element.to_units(ureg.radian)
919
+ return _unary_basedata_op(
920
+ element=element,
921
+ func=np.tan,
922
+ dfunc=lambda x: 1.0 / (np.cos(x) ** 2),
923
+ out_units=ureg.dimensionless,
924
+ )
925
+
926
+
927
+ def arcsin_basedata_element(element: BaseData) -> BaseData:
928
+ """Arcsin: y = arcsin(x), σ_y ≈ |1/sqrt(1-x^2)| σ_x. x dimensionless, |x| <= 1."""
929
+ # ensure element is dimensionless:
930
+ element.to_dimensionless()
931
+ return _unary_basedata_op(
932
+ element=element,
933
+ func=np.arcsin,
934
+ dfunc=lambda x: 1.0 / np.sqrt(1.0 - x**2),
935
+ out_units=ureg.radian,
936
+ domain=lambda x: np.abs(x) <= 1.0,
937
+ )
938
+
939
+
940
+ def arccos_basedata_element(element: BaseData) -> BaseData:
941
+ """Arccos: y = arccos(x), σ_y ≈ |1/sqrt(1-x^2)| σ_x. x dimensionless, |x| <= 1."""
942
+ # ensure element is dimensionless:
943
+ element.to_dimensionless()
944
+ return _unary_basedata_op(
945
+ element=element,
946
+ func=np.arccos,
947
+ dfunc=lambda x: 1.0 / np.sqrt(1.0 - x**2), # abs removes the sign
948
+ out_units=ureg.radian,
949
+ domain=lambda x: np.abs(x) <= 1.0,
950
+ )
951
+
952
+
953
+ def arctan_basedata_element(element: BaseData) -> BaseData:
954
+ """Arctan: y = arctan(x), σ_y ≈ |1/(1+x^2)| σ_x. x dimensionless."""
955
+ # ensure element is dimensionless:
956
+ element.to_dimensionless()
957
+ return _unary_basedata_op(
958
+ element=element,
959
+ func=np.arctan,
960
+ dfunc=lambda x: 1.0 / (1.0 + x**2),
961
+ out_units=ureg.radian,
962
+ )
963
+
964
+
965
+ def reciprocal_basedata_element(element: BaseData) -> BaseData:
966
+ """Reciprocal: y = 1/x, σ_y ≈ |1/x^2| σ_x."""
967
+ return _unary_basedata_op(
968
+ element=element,
969
+ func=lambda x: 1.0 / x,
970
+ dfunc=lambda x: 1.0 / (x**2),
971
+ out_units=element.units**-1, # <-- unit, not quantity
972
+ domain=lambda x: x != 0,
973
+ )