modacor 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. modacor/__init__.py +30 -0
  2. modacor/dataclasses/__init__.py +0 -0
  3. modacor/dataclasses/basedata.py +973 -0
  4. modacor/dataclasses/databundle.py +23 -0
  5. modacor/dataclasses/helpers.py +45 -0
  6. modacor/dataclasses/messagehandler.py +75 -0
  7. modacor/dataclasses/process_step.py +233 -0
  8. modacor/dataclasses/process_step_describer.py +146 -0
  9. modacor/dataclasses/processing_data.py +59 -0
  10. modacor/dataclasses/trace_event.py +118 -0
  11. modacor/dataclasses/uncertainty_tools.py +132 -0
  12. modacor/dataclasses/validators.py +84 -0
  13. modacor/debug/pipeline_tracer.py +548 -0
  14. modacor/io/__init__.py +33 -0
  15. modacor/io/csv/__init__.py +0 -0
  16. modacor/io/csv/csv_sink.py +114 -0
  17. modacor/io/csv/csv_source.py +210 -0
  18. modacor/io/hdf/__init__.py +27 -0
  19. modacor/io/hdf/hdf_source.py +120 -0
  20. modacor/io/io_sink.py +41 -0
  21. modacor/io/io_sinks.py +61 -0
  22. modacor/io/io_source.py +164 -0
  23. modacor/io/io_sources.py +208 -0
  24. modacor/io/processing_path.py +113 -0
  25. modacor/io/tiled/__init__.py +16 -0
  26. modacor/io/tiled/tiled_source.py +403 -0
  27. modacor/io/yaml/__init__.py +27 -0
  28. modacor/io/yaml/yaml_source.py +116 -0
  29. modacor/modules/__init__.py +53 -0
  30. modacor/modules/base_modules/__init__.py +0 -0
  31. modacor/modules/base_modules/append_processing_data.py +329 -0
  32. modacor/modules/base_modules/append_sink.py +141 -0
  33. modacor/modules/base_modules/append_source.py +181 -0
  34. modacor/modules/base_modules/bitwise_or_masks.py +113 -0
  35. modacor/modules/base_modules/combine_uncertainties.py +120 -0
  36. modacor/modules/base_modules/combine_uncertainties_max.py +105 -0
  37. modacor/modules/base_modules/divide.py +82 -0
  38. modacor/modules/base_modules/find_scale_factor1d.py +373 -0
  39. modacor/modules/base_modules/multiply.py +77 -0
  40. modacor/modules/base_modules/multiply_databundles.py +73 -0
  41. modacor/modules/base_modules/poisson_uncertainties.py +69 -0
  42. modacor/modules/base_modules/reduce_dimensionality.py +252 -0
  43. modacor/modules/base_modules/sink_processing_data.py +80 -0
  44. modacor/modules/base_modules/subtract.py +80 -0
  45. modacor/modules/base_modules/subtract_databundles.py +67 -0
  46. modacor/modules/base_modules/units_label_update.py +66 -0
  47. modacor/modules/instrument_modules/__init__.py +0 -0
  48. modacor/modules/instrument_modules/readme.md +9 -0
  49. modacor/modules/technique_modules/__init__.py +0 -0
  50. modacor/modules/technique_modules/scattering/__init__.py +0 -0
  51. modacor/modules/technique_modules/scattering/geometry_helpers.py +114 -0
  52. modacor/modules/technique_modules/scattering/index_pixels.py +492 -0
  53. modacor/modules/technique_modules/scattering/indexed_averager.py +628 -0
  54. modacor/modules/technique_modules/scattering/pixel_coordinates_3d.py +417 -0
  55. modacor/modules/technique_modules/scattering/solid_angle_correction.py +63 -0
  56. modacor/modules/technique_modules/scattering/xs_geometry.py +571 -0
  57. modacor/modules/technique_modules/scattering/xs_geometry_from_pixel_coordinates.py +293 -0
  58. modacor/runner/__init__.py +0 -0
  59. modacor/runner/pipeline.py +749 -0
  60. modacor/runner/process_step_registry.py +224 -0
  61. modacor/tests/__init__.py +27 -0
  62. modacor/tests/dataclasses/test_basedata.py +519 -0
  63. modacor/tests/dataclasses/test_basedata_operations.py +439 -0
  64. modacor/tests/dataclasses/test_basedata_to_base_units.py +57 -0
  65. modacor/tests/dataclasses/test_process_step_describer.py +73 -0
  66. modacor/tests/dataclasses/test_processstep.py +282 -0
  67. modacor/tests/debug/test_tracing_integration.py +188 -0
  68. modacor/tests/integration/__init__.py +0 -0
  69. modacor/tests/integration/test_pipeline_run.py +238 -0
  70. modacor/tests/io/__init__.py +27 -0
  71. modacor/tests/io/csv/__init__.py +0 -0
  72. modacor/tests/io/csv/test_csv_source.py +156 -0
  73. modacor/tests/io/hdf/__init__.py +27 -0
  74. modacor/tests/io/hdf/test_hdf_source.py +92 -0
  75. modacor/tests/io/test_io_sources.py +119 -0
  76. modacor/tests/io/tiled/__init__.py +12 -0
  77. modacor/tests/io/tiled/test_tiled_source.py +120 -0
  78. modacor/tests/io/yaml/__init__.py +27 -0
  79. modacor/tests/io/yaml/static_data_example.yaml +26 -0
  80. modacor/tests/io/yaml/test_yaml_source.py +47 -0
  81. modacor/tests/modules/__init__.py +27 -0
  82. modacor/tests/modules/base_modules/__init__.py +27 -0
  83. modacor/tests/modules/base_modules/test_append_processing_data.py +219 -0
  84. modacor/tests/modules/base_modules/test_append_sink.py +76 -0
  85. modacor/tests/modules/base_modules/test_append_source.py +180 -0
  86. modacor/tests/modules/base_modules/test_bitwise_or_masks.py +264 -0
  87. modacor/tests/modules/base_modules/test_combine_uncertainties.py +105 -0
  88. modacor/tests/modules/base_modules/test_combine_uncertainties_max.py +109 -0
  89. modacor/tests/modules/base_modules/test_divide.py +140 -0
  90. modacor/tests/modules/base_modules/test_find_scale_factor1d.py +220 -0
  91. modacor/tests/modules/base_modules/test_multiply.py +113 -0
  92. modacor/tests/modules/base_modules/test_multiply_databundles.py +136 -0
  93. modacor/tests/modules/base_modules/test_poisson_uncertainties.py +61 -0
  94. modacor/tests/modules/base_modules/test_reduce_dimensionality.py +358 -0
  95. modacor/tests/modules/base_modules/test_sink_processing_data.py +119 -0
  96. modacor/tests/modules/base_modules/test_subtract.py +111 -0
  97. modacor/tests/modules/base_modules/test_subtract_databundles.py +136 -0
  98. modacor/tests/modules/base_modules/test_units_label_update.py +91 -0
  99. modacor/tests/modules/technique_modules/__init__.py +0 -0
  100. modacor/tests/modules/technique_modules/scattering/__init__.py +0 -0
  101. modacor/tests/modules/technique_modules/scattering/test_geometry_helpers.py +198 -0
  102. modacor/tests/modules/technique_modules/scattering/test_index_pixels.py +426 -0
  103. modacor/tests/modules/technique_modules/scattering/test_indexed_averaging.py +559 -0
  104. modacor/tests/modules/technique_modules/scattering/test_pixel_coordinates_3d.py +282 -0
  105. modacor/tests/modules/technique_modules/scattering/test_xs_geometry_from_pixel_coordinates.py +224 -0
  106. modacor/tests/modules/technique_modules/scattering/test_xsgeometry.py +635 -0
  107. modacor/tests/requirements.txt +12 -0
  108. modacor/tests/runner/test_pipeline.py +438 -0
  109. modacor/tests/runner/test_process_step_registry.py +65 -0
  110. modacor/tests/test_import.py +43 -0
  111. modacor/tests/test_modacor.py +17 -0
  112. modacor/tests/test_units.py +79 -0
  113. modacor/units.py +97 -0
  114. modacor-1.0.0.dist-info/METADATA +482 -0
  115. modacor-1.0.0.dist-info/RECORD +120 -0
  116. modacor-1.0.0.dist-info/WHEEL +5 -0
  117. modacor-1.0.0.dist-info/licenses/AUTHORS.md +11 -0
  118. modacor-1.0.0.dist-info/licenses/LICENSE +11 -0
  119. modacor-1.0.0.dist-info/licenses/LICENSE.txt +11 -0
  120. modacor-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,136 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # /usr/bin/env python3
3
+ # -*- coding: utf-8 -*-
4
+
5
+ from __future__ import annotations
6
+
7
+ __coding__ = "utf-8"
8
+ __authors__ = ["Brian R. Pauw"] # add names to the list as appropriate
9
+ __copyright__ = "Copyright 2025, The MoDaCor team"
10
+ __date__ = "16/11/2025"
11
+ __status__ = "Development" # "Development", "Production"
12
+ # end of header and standard imports
13
+
14
+ import unittest
15
+
16
+ import numpy as np
17
+
18
+ from modacor import ureg
19
+ from modacor.dataclasses.basedata import BaseData
20
+ from modacor.dataclasses.databundle import DataBundle
21
+ from modacor.dataclasses.processing_data import ProcessingData
22
+ from modacor.io.io_sources import IoSources
23
+ from modacor.modules.base_modules.subtract_databundles import SubtractDatabundles
24
+
25
+ TEST_IO_SOURCES = IoSources()
26
+
27
+
28
+ class TestSubtractDatabundles(unittest.TestCase):
29
+ """Testing class for modacor/modules/base_modules/subtract_databundles.py"""
30
+
31
+ def setUp(self):
32
+ # Two simple 2x3 BaseData objects to subtract
33
+ signal1 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]], dtype=float)
34
+ signal2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=float)
35
+
36
+ # Absolute 1σ uncertainties
37
+ unc1 = 0.5 * np.ones_like(signal1)
38
+ unc2 = 0.2 * np.ones_like(signal2)
39
+
40
+ self.bd1 = BaseData(
41
+ signal=signal1,
42
+ units=ureg.Unit("count"),
43
+ uncertainties={"u": unc1},
44
+ )
45
+ self.bd2 = BaseData(
46
+ signal=signal2,
47
+ units=ureg.Unit("count"),
48
+ uncertainties={"u": unc2},
49
+ )
50
+
51
+ self.bundle1 = DataBundle(signal=self.bd1)
52
+ self.bundle2 = DataBundle(signal=self.bd2)
53
+
54
+ self.processing_data = ProcessingData()
55
+ self.processing_data["bundle1"] = self.bundle1
56
+ self.processing_data["bundle2"] = self.bundle2
57
+
58
+ # Ground truth using BaseData.__sub__
59
+ self.expected_result = self.bd1 - self.bd2
60
+
61
+ def tearDown(self):
62
+ pass
63
+
64
+ # ------------------------------------------------------------------ #
65
+ # Tests
66
+ # ------------------------------------------------------------------ #
67
+
68
+ def test_subtract_databundles_calculation(self):
69
+ """
70
+ SubtractDatabundles.calculate() should subtract the second DataBundle's
71
+ signal from the first, using BaseData.__sub__ semantics.
72
+ """
73
+ step = SubtractDatabundles(io_sources=TEST_IO_SOURCES)
74
+ step.modify_config_by_kwargs(
75
+ with_processing_keys=["bundle1", "bundle2"],
76
+ )
77
+ step.processing_data = self.processing_data
78
+
79
+ output = step.calculate()
80
+
81
+ # Only the minuend key should be in output
82
+ self.assertEqual(list(output.keys()), ["bundle1"])
83
+
84
+ result_bd: BaseData = self.processing_data["bundle1"]["signal"]
85
+
86
+ # Signal and uncertainties should match the precomputed result
87
+ np.testing.assert_allclose(result_bd.signal, self.expected_result.signal)
88
+ for key in self.expected_result.uncertainties:
89
+ np.testing.assert_allclose(
90
+ result_bd.uncertainties[key],
91
+ self.expected_result.uncertainties[key],
92
+ )
93
+
94
+ # Units should be preserved (count - count → count)
95
+ self.assertEqual(result_bd.units, self.expected_result.units)
96
+
97
+ def test_subtract_databundles_execution_via_call(self):
98
+ """
99
+ SubtractDatabundles.__call__ should run the step and update ProcessingData in-place.
100
+ """
101
+ # Re-initialize processing_data to original state
102
+ processing_data = ProcessingData()
103
+ processing_data["bundle1"] = DataBundle(signal=self.bd1)
104
+ processing_data["bundle2"] = DataBundle(signal=self.bd2)
105
+
106
+ step = SubtractDatabundles(io_sources=TEST_IO_SOURCES)
107
+ step.modify_config_by_kwargs(
108
+ with_processing_keys=["bundle1", "bundle2"],
109
+ )
110
+
111
+ step(processing_data)
112
+
113
+ result_bd: BaseData = processing_data["bundle1"]["signal"]
114
+
115
+ np.testing.assert_allclose(result_bd.signal, self.expected_result.signal)
116
+ for key in self.expected_result.uncertainties:
117
+ np.testing.assert_allclose(
118
+ result_bd.uncertainties[key],
119
+ self.expected_result.uncertainties[key],
120
+ )
121
+ self.assertEqual(result_bd.units, self.expected_result.units)
122
+
123
+ def test_requires_exactly_two_keys(self):
124
+ """
125
+ SubtractDatabundles should assert if 'with_processing_keys' does not
126
+ contain exactly two keys.
127
+ """
128
+ step = SubtractDatabundles(io_sources=TEST_IO_SOURCES)
129
+ # Only one key → should trigger the assertion in calculate()
130
+ step.modify_config_by_kwargs(
131
+ with_processing_keys=["bundle1"],
132
+ )
133
+ step.processing_data = self.processing_data
134
+
135
+ with self.assertRaises(AssertionError):
136
+ step.calculate()
@@ -0,0 +1,91 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # /usr/bin/env python3
3
+ # -*- coding: utf-8 -*-
4
+
5
+ from __future__ import annotations
6
+
7
+ __coding__ = "utf-8"
8
+ __authors__ = ["Brian R. Pauw"] # add names to the list as appropriate
9
+ __copyright__ = "Copyright 2025, The MoDaCor team"
10
+ __date__ = "16/12/2025"
11
+ __status__ = "Development" # "Development", "Production"
12
+ # end of header and standard imports
13
+
14
+ import numpy as np
15
+
16
+ from modacor import ureg
17
+ from modacor.dataclasses.basedata import BaseData
18
+ from modacor.dataclasses.databundle import DataBundle
19
+ from modacor.dataclasses.processing_data import ProcessingData
20
+ from modacor.io.io_sources import IoSources
21
+
22
+ # adjust import if your file lives elsewhere
23
+ from modacor.modules.base_modules.units_label_update import UnitsLabelUpdate
24
+
25
+
26
+ def _make_pd() -> ProcessingData:
27
+ pd = ProcessingData()
28
+ db = DataBundle()
29
+
30
+ db["intensity_scale_factor"] = BaseData(
31
+ signal=np.array(2.0),
32
+ units="dimensionless",
33
+ uncertainties={"propagate_to_all": np.array(0.05)},
34
+ )
35
+ db["other_factor"] = BaseData(
36
+ signal=np.array([1.0, 2.0, 3.0]),
37
+ units="dimensionless",
38
+ uncertainties={"propagate_to_all": np.array([0.1, 0.1, 0.1])},
39
+ )
40
+
41
+ pd["intensity_calibration"] = db
42
+ return pd
43
+
44
+
45
+ def test_units_update_sets_units_for_multiple_keys_without_touching_values():
46
+ pd = _make_pd()
47
+ before_sf = pd["intensity_calibration"]["intensity_scale_factor"].signal.copy()
48
+ before_sf_u = dict(pd["intensity_calibration"]["intensity_scale_factor"].uncertainties)
49
+
50
+ before_other = pd["intensity_calibration"]["other_factor"].signal.copy()
51
+ before_other_u = dict(pd["intensity_calibration"]["other_factor"].uncertainties)
52
+
53
+ step = UnitsLabelUpdate(io_sources=IoSources())
54
+ step.modify_config_by_dict(
55
+ {
56
+ "with_processing_keys": ["intensity_calibration"],
57
+ "update_pairs": {
58
+ "intensity_scale_factor": {"units": "meter"},
59
+ "other_factor": {"units": "1/second"},
60
+ },
61
+ }
62
+ )
63
+ step.execute(pd)
64
+
65
+ sf = pd["intensity_calibration"]["intensity_scale_factor"]
66
+ other = pd["intensity_calibration"]["other_factor"]
67
+
68
+ assert sf.units == ureg.Unit("meter")
69
+ assert other.units == ureg.Unit("1/second")
70
+
71
+ np.testing.assert_allclose(sf.signal, before_sf)
72
+ np.testing.assert_allclose(other.signal, before_other)
73
+ np.testing.assert_allclose(sf.uncertainties["propagate_to_all"], before_sf_u["propagate_to_all"])
74
+ np.testing.assert_allclose(other.uncertainties["propagate_to_all"], before_other_u["propagate_to_all"])
75
+
76
+
77
+ def test_units_update_accepts_shorthand_string_form():
78
+ pd = _make_pd()
79
+
80
+ step = UnitsLabelUpdate(io_sources=IoSources())
81
+ step.modify_config_by_dict(
82
+ {
83
+ "with_processing_keys": ["intensity_calibration"],
84
+ "update_pairs": {
85
+ "intensity_scale_factor": "second",
86
+ },
87
+ }
88
+ )
89
+ step.execute(pd)
90
+
91
+ assert pd["intensity_calibration"]["intensity_scale_factor"].units == ureg.Unit("second")
File without changes
@@ -0,0 +1,198 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # /usr/bin/env python3
3
+ # -*- coding: utf-8 -*-
4
+
5
+ from __future__ import annotations
6
+
7
+ __coding__ = "utf-8"
8
+ __authors__ = ["Brian R. Pauw"] # add names to the list as appropriate
9
+ __copyright__ = "Copyright 2026, The MoDaCor team"
10
+ __date__ = "06/01/2026"
11
+ __status__ = "Development" # "Development", "Production"
12
+ # end of header and standard imports
13
+ __version__ = "20260106.1"
14
+
15
+ import numpy as np
16
+ import pytest
17
+
18
+ from modacor import ureg
19
+ from modacor.dataclasses.basedata import BaseData
20
+ from modacor.modules.technique_modules.scattering.geometry_helpers import (
21
+ prepare_static_scalar,
22
+ require_scalar,
23
+ unit_vec3,
24
+ )
25
+
26
+ # ----------------------------
27
+ # unit_vec3
28
+ # ----------------------------
29
+
30
+
31
+ def test_unit_vec3_normalizes():
32
+ v = np.array([3.0, 0.0, 4.0])
33
+ u = unit_vec3(v)
34
+ np.testing.assert_allclose(np.linalg.norm(u), 1.0)
35
+ np.testing.assert_allclose(u, np.array([0.6, 0.0, 0.8]))
36
+
37
+
38
+ def test_unit_vec3_rejects_zero_vector():
39
+ with pytest.raises(ValueError, match="must be non-zero"):
40
+ unit_vec3((0.0, 0.0, 0.0), name="basis_fast")
41
+
42
+
43
+ # ----------------------------
44
+ # require_scalar
45
+ # ----------------------------
46
+
47
+
48
+ def test_require_scalar_passes_scalar_and_sets_rod0():
49
+ # scalar signal => ndim=0 => rank_of_data MUST be 0
50
+ bd = BaseData(signal=np.array(2.5), units=ureg.m, rank_of_data=0)
51
+ out = require_scalar("z", bd)
52
+ assert np.size(out.signal) == 1
53
+ assert out.rank_of_data == 0
54
+ assert out.units.is_compatible_with(ureg.m)
55
+ np.testing.assert_allclose(out.signal, 2.5)
56
+
57
+
58
+ def test_require_scalar_squeezes_singleton_array():
59
+ # singleton array is valid; RoD must not exceed ndim
60
+ bd = BaseData(signal=np.array([[[[2.5]]]]), units=ureg.m, rank_of_data=0)
61
+ out = require_scalar("z", bd)
62
+ assert np.size(out.signal) == 1
63
+ assert out.rank_of_data == 0
64
+ np.testing.assert_allclose(out.signal, 2.5)
65
+
66
+
67
+ def test_require_scalar_rejects_non_scalar():
68
+ bd = BaseData(signal=np.array([1.0, 2.0]), units=ureg.m, rank_of_data=0)
69
+ with pytest.raises(ValueError, match="must be scalar"):
70
+ require_scalar("det_z", bd)
71
+
72
+
73
+ # ----------------------------
74
+ # prepare_static_scalar
75
+ # ----------------------------
76
+
77
+
78
+ def test_prepare_static_scalar_passes_through_scalar():
79
+ # scalar signal => ndim=0 => RoD must be 0
80
+ bd = BaseData(signal=np.array(2.5), units=ureg.m, rank_of_data=0)
81
+ out = prepare_static_scalar(bd, require_units=ureg.m, uncertainty_key="jitter")
82
+ assert np.size(out.signal) == 1
83
+ assert out.rank_of_data == 0
84
+ assert out.units.is_compatible_with(ureg.m)
85
+ np.testing.assert_allclose(out.signal, 2.5)
86
+ # passthrough: don't assert uncertainties content
87
+
88
+
89
+ def test_prepare_static_scalar_reduces_shape_5_1_1_1_uniform_weights_mean_and_sem():
90
+ values = np.array([2.50, 2.52, 2.48, 2.51, 2.49], dtype=float).reshape(5, 1, 1, 1)
91
+ bd = BaseData(signal=values, units=ureg.m, rank_of_data=0)
92
+
93
+ out = prepare_static_scalar(bd, require_units=ureg.m, uncertainty_key="sem")
94
+
95
+ exp_mean = float(np.mean(values))
96
+ flat = values.ravel()
97
+ exp_var = float(np.mean((flat - exp_mean) ** 2)) # population var
98
+ exp_sem = float(np.sqrt(exp_var) / np.sqrt(flat.size))
99
+
100
+ np.testing.assert_allclose(out.signal, exp_mean, rtol=0, atol=1e-15)
101
+ assert out.rank_of_data == 0
102
+ assert "sem" in out.uncertainties
103
+ np.testing.assert_allclose(out.uncertainties["sem"], exp_sem, rtol=0, atol=1e-15)
104
+
105
+
106
+ def test_prepare_static_scalar_reduces_1d_shape_5_to_scalar_mean_and_sem():
107
+ """
108
+ New: common case where NeXus/HDF5 read yields a squeezed vector shape (5,)
109
+ (e.g. after user preprocessing or reader behavior).
110
+ """
111
+ values = np.array([2.50, 2.52, 2.48, 2.51, 2.49], dtype=float) # shape (5,)
112
+ bd = BaseData(signal=values, units=ureg.m, rank_of_data=0)
113
+
114
+ out = prepare_static_scalar(bd, require_units=ureg.m, uncertainty_key="sem")
115
+
116
+ exp_mean = float(np.mean(values))
117
+ exp_var = float(np.mean((values - exp_mean) ** 2)) # population var
118
+ exp_sem = float(np.sqrt(exp_var) / np.sqrt(values.size))
119
+
120
+ assert np.size(out.signal) == 1
121
+ assert out.rank_of_data == 0
122
+ np.testing.assert_allclose(out.signal, exp_mean, rtol=0, atol=1e-15)
123
+ assert "sem" in out.uncertainties
124
+ np.testing.assert_allclose(out.uncertainties["sem"], exp_sem, rtol=0, atol=1e-15)
125
+
126
+
127
+ def test_prepare_static_scalar_accepts_scalar_weights_broadcasts():
128
+ values = np.array([1.0, 2.0, 3.0, 4.0], dtype=float).reshape(4, 1, 1, 1)
129
+ bd = BaseData(signal=values, units=ureg.m, rank_of_data=0)
130
+ bd.weights = np.array([1.0]) # scalar/size-1 weights
131
+
132
+ out = prepare_static_scalar(bd, require_units=ureg.m, uncertainty_key="sem")
133
+
134
+ exp_mean = float(np.mean(values))
135
+ flat = values.ravel()
136
+ exp_var = float(np.mean((flat - exp_mean) ** 2))
137
+ exp_sem = float(np.sqrt(exp_var) / np.sqrt(flat.size))
138
+
139
+ np.testing.assert_allclose(out.signal, exp_mean, rtol=0, atol=1e-15)
140
+ np.testing.assert_allclose(out.uncertainties["sem"], exp_sem, rtol=0, atol=1e-15)
141
+
142
+
143
+ def test_prepare_static_scalar_accepts_broadcastable_weights():
144
+ values = np.array([10.0, 20.0, 30.0, 40.0], dtype=float).reshape(4, 1, 1, 1)
145
+ weights = np.array([1.0, 1.0, 2.0, 2.0], dtype=float).reshape(4, 1, 1, 1)
146
+ bd = BaseData(signal=values, units=ureg.m, rank_of_data=0)
147
+ bd.weights = weights
148
+
149
+ out = prepare_static_scalar(bd, require_units=ureg.m, uncertainty_key="sem")
150
+
151
+ x = values.ravel()
152
+ w = weights.ravel()
153
+ wsum = float(np.sum(w))
154
+ exp_mean = float(np.sum(w * x) / wsum)
155
+ n_eff = float((wsum**2) / np.sum(w**2))
156
+ exp_var = float(np.sum(w * (x - exp_mean) ** 2) / wsum)
157
+ exp_sem = float(np.sqrt(exp_var) / np.sqrt(n_eff))
158
+
159
+ np.testing.assert_allclose(out.signal, exp_mean, rtol=0, atol=1e-15)
160
+ np.testing.assert_allclose(out.uncertainties["sem"], exp_sem, rtol=0, atol=1e-15)
161
+
162
+
163
+ def test_prepare_static_scalar_rejects_non_broadcastable_weights():
164
+ values = np.array([1.0, 2.0, 3.0, 4.0], dtype=float).reshape(4, 1, 1, 1)
165
+ bd = BaseData(signal=values, units=ureg.m, rank_of_data=0)
166
+ bd.weights = np.array([1.0, 2.0, 3.0]) # not broadcastable
167
+
168
+ with pytest.raises(ValueError, match="weights shape .* does not match signal shape"):
169
+ prepare_static_scalar(bd, require_units=ureg.m, uncertainty_key="sem")
170
+
171
+
172
+ def test_prepare_static_scalar_rejects_wrong_units():
173
+ bd = BaseData(signal=np.array([1.0, 2.0, 3.0]), units=ureg.pixel, rank_of_data=0)
174
+ with pytest.raises(ValueError, match="Value must be in"):
175
+ prepare_static_scalar(bd, require_units=ureg.m)
176
+
177
+
178
+ def test_prepare_static_scalar_rejects_nonpositive_weight_sum():
179
+ values = np.array([1.0, 2.0, 3.0], dtype=float)
180
+ bd = BaseData(signal=values, units=ureg.m, rank_of_data=0)
181
+ bd.weights = np.array([0.0, 0.0, 0.0])
182
+
183
+ with pytest.raises(ValueError, match="weights must sum to > 0"):
184
+ prepare_static_scalar(bd, require_units=ureg.m, uncertainty_key="sem")
185
+
186
+
187
+ def test_prepare_static_scalar_signal_1d_weights_5_1_1_1_raises():
188
+ # signal is already squeezed to (5,)
189
+ x = np.array([2.50, 2.52, 2.48, 2.51, 2.49], dtype=float) # shape (5,)
190
+
191
+ # weights come from a NeXus/HDF5-like dataset: shape (5,1,1,1)
192
+ w = np.array([1.0, 1.0, 2.0, 2.0, 4.0], dtype=float).reshape(5, 1, 1, 1)
193
+
194
+ bd = BaseData(signal=x, units=ureg.m, rank_of_data=0)
195
+ bd.weights = w
196
+
197
+ with pytest.raises(ValueError, match="weights shape .* does not match signal shape"):
198
+ _ = prepare_static_scalar(bd, require_units=ureg.m, uncertainty_key="sem")