essreduce 25.2.1__tar.gz → 25.2.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {essreduce-25.2.1/src/essreduce.egg-info → essreduce-25.2.3}/PKG-INFO +1 -1
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/user-guide/tof/dream.ipynb +3 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/user-guide/tof/frame-unwrapping.ipynb +6 -3
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/user-guide/tof/wfm.ipynb +1 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/live/raw.py +1 -1
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/live/roi.py +1 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/streaming.py +37 -5
- essreduce-25.2.3/src/ess/reduce/time_of_flight/fakes.py +119 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/time_of_flight/toa_to_tof.py +97 -39
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/time_of_flight/types.py +2 -2
- {essreduce-25.2.1 → essreduce-25.2.3/src/essreduce.egg-info}/PKG-INFO +1 -1
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/live/roi_test.py +8 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/streaming_test.py +106 -0
- essreduce-25.2.3/tests/time_of_flight/unwrap_test.py +385 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/time_of_flight/wfm_test.py +103 -2
- essreduce-25.2.1/src/ess/reduce/time_of_flight/fakes.py +0 -222
- essreduce-25.2.1/tests/time_of_flight/unwrap_test.py +0 -395
- {essreduce-25.2.1 → essreduce-25.2.3}/.copier-answers.ess.yml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.copier-answers.yml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.github/ISSUE_TEMPLATE/blank.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.github/ISSUE_TEMPLATE/high-level-requirement.yml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.github/dependabot.yml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.github/workflows/ci.yml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.github/workflows/docs.yml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.github/workflows/nightly_at_main.yml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.github/workflows/nightly_at_release.yml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.github/workflows/python-version-ci +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.github/workflows/release.yml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.github/workflows/test.yml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.github/workflows/unpinned.yml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.github/workflows/weekly_windows_macos.yml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.gitignore +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.pre-commit-config.yaml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/.python-version +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/CODE_OF_CONDUCT.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/CONTRIBUTING.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/LICENSE +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/MANIFEST.in +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/README.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/conda/meta.yaml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/_static/anaconda-icon.js +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/_static/favicon.svg +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/_static/logo-dark.svg +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/_static/logo.svg +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/_templates/class-template.rst +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/_templates/doc_version.html +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/_templates/module-template.rst +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/about/index.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/api-reference/index.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/conf.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/developer/coding-conventions.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/developer/dependency-management.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/developer/getting-started.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/developer/gui.ipynb +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/developer/index.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/index.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/user-guide/index.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/user-guide/reduction-workflow-guidelines.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/user-guide/tof/index.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/docs/user-guide/widget.md +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/pyproject.toml +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/base.in +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/base.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/basetest.in +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/basetest.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/ci.in +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/ci.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/dev.in +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/dev.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/docs.in +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/docs.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/make_base.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/mypy.in +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/mypy.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/nightly.in +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/nightly.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/static.in +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/static.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/test.in +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/test.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/wheels.in +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/requirements/wheels.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/resources/logo.svg +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/setup.cfg +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/__init__.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/data.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/live/__init__.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/live/workflow.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/logging.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/nexus/__init__.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/nexus/_nexus_loader.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/nexus/json_generator.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/nexus/json_nexus.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/nexus/types.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/nexus/workflow.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/parameter.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/py.typed +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/scripts/grow_nexus.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/time_of_flight/__init__.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/time_of_flight/simulation.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/time_of_flight/to_events.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/ui.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/uncertainty.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/widgets/__init__.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/widgets/_base.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/widgets/_binedges_widget.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/widgets/_bounds_widget.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/widgets/_config.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/widgets/_filename_widget.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/widgets/_linspace_widget.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/widgets/_optional_widget.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/widgets/_spinner.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/widgets/_string_widget.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/widgets/_switchable_widget.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/widgets/_vector_widget.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/ess/reduce/workflow.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/essreduce.egg-info/SOURCES.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/essreduce.egg-info/dependency_links.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/essreduce.egg-info/entry_points.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/essreduce.egg-info/requires.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/src/essreduce.egg-info/top_level.txt +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/live/raw_test.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/nexus/json_generator_test.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/nexus/json_nexus_examples/array_dataset.json +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/nexus/json_nexus_examples/dataset.json +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/nexus/json_nexus_examples/detector.json +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/nexus/json_nexus_examples/entry.json +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/nexus/json_nexus_examples/event_data.json +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/nexus/json_nexus_examples/instrument.json +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/nexus/json_nexus_examples/log.json +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/nexus/json_nexus_test.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/nexus/nexus_loader_test.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/nexus/workflow_test.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/package_test.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/scripts/test_grow_nexus.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/time_of_flight/to_events_test.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/uncertainty_test.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tests/widget_test.py +0 -0
- {essreduce-25.2.1 → essreduce-25.2.3}/tox.ini +0 -0
|
@@ -290,6 +290,7 @@
|
|
|
290
290
|
" time_of_flight.providers(), params=time_of_flight.default_parameters()\n",
|
|
291
291
|
")\n",
|
|
292
292
|
"workflow[time_of_flight.RawData] = raw_data\n",
|
|
293
|
+
"workflow[time_of_flight.Ltotal] = Ltotal\n",
|
|
293
294
|
"workflow[time_of_flight.LtotalRange] = (\n",
|
|
294
295
|
" sc.scalar(75.5, unit=\"m\"),\n",
|
|
295
296
|
" sc.scalar(78.0, unit=\"m\"),\n",
|
|
@@ -565,6 +566,7 @@
|
|
|
565
566
|
"source": [
|
|
566
567
|
"# Update workflow\n",
|
|
567
568
|
"workflow[time_of_flight.RawData] = raw_data\n",
|
|
569
|
+
"workflow[time_of_flight.Ltotal] = Ltotal\n",
|
|
568
570
|
"\n",
|
|
569
571
|
"# Compute tofs and wavelengths\n",
|
|
570
572
|
"tofs = workflow.compute(time_of_flight.TofData)\n",
|
|
@@ -663,6 +665,7 @@
|
|
|
663
665
|
" choppers=disk_choppers, neutrons=2_000_000\n",
|
|
664
666
|
")\n",
|
|
665
667
|
"workflow[time_of_flight.RawData] = ess_beamline.get_monitor(\"detector\")[0]\n",
|
|
668
|
+
"workflow[time_of_flight.Ltotal] = Ltotal\n",
|
|
666
669
|
"\n",
|
|
667
670
|
"sim = workflow.compute(time_of_flight.SimulationResults)\n",
|
|
668
671
|
"\n",
|
|
@@ -164,6 +164,7 @@
|
|
|
164
164
|
")\n",
|
|
165
165
|
"\n",
|
|
166
166
|
"workflow[time_of_flight.RawData] = nxevent_data\n",
|
|
167
|
+
"workflow[time_of_flight.Ltotal] = nxevent_data.coords[\"Ltotal\"]\n",
|
|
167
168
|
"workflow[time_of_flight.LtotalRange] = detectors[0].distance, detectors[-1].distance\n",
|
|
168
169
|
"workflow[time_of_flight.SimulationResults] = time_of_flight.simulate_beamline(\n",
|
|
169
170
|
" choppers={\n",
|
|
@@ -345,7 +346,7 @@
|
|
|
345
346
|
"\n",
|
|
346
347
|
"model = tof.Model(source=source, choppers=choppers, detectors=detectors)\n",
|
|
347
348
|
"results = model.run()\n",
|
|
348
|
-
"results.plot(
|
|
349
|
+
"results.plot(blocked_rays=5000)"
|
|
349
350
|
]
|
|
350
351
|
},
|
|
351
352
|
{
|
|
@@ -372,8 +373,11 @@
|
|
|
372
373
|
" time_of_flight.providers(), params=time_of_flight.default_parameters()\n",
|
|
373
374
|
")\n",
|
|
374
375
|
"\n",
|
|
376
|
+
"nxevent_data = results.to_nxevent_data()\n",
|
|
377
|
+
"\n",
|
|
375
378
|
"workflow[time_of_flight.PulseStride] = 2\n",
|
|
376
|
-
"workflow[time_of_flight.RawData] =
|
|
379
|
+
"workflow[time_of_flight.RawData] = nxevent_data\n",
|
|
380
|
+
"workflow[time_of_flight.Ltotal] = nxevent_data.coords[\"Ltotal\"]\n",
|
|
377
381
|
"workflow[time_of_flight.LtotalRange] = detectors[0].distance, detectors[-1].distance\n",
|
|
378
382
|
"workflow[time_of_flight.SimulationResults] = time_of_flight.simulate_beamline(\n",
|
|
379
383
|
" choppers={\n",
|
|
@@ -391,7 +395,6 @@
|
|
|
391
395
|
" )\n",
|
|
392
396
|
" for ch in choppers\n",
|
|
393
397
|
" },\n",
|
|
394
|
-
" neutrons=500_000,\n",
|
|
395
398
|
")\n",
|
|
396
399
|
"\n",
|
|
397
400
|
"workflow[time_of_flight.DistanceResolution] = sc.scalar(0.5, unit=\"m\")\n",
|
|
@@ -310,6 +310,7 @@
|
|
|
310
310
|
" time_of_flight.providers(), params=time_of_flight.default_parameters()\n",
|
|
311
311
|
")\n",
|
|
312
312
|
"workflow[time_of_flight.RawData] = raw_data\n",
|
|
313
|
+
"workflow[time_of_flight.Ltotal] = Ltotal\n",
|
|
313
314
|
"workflow[time_of_flight.LtotalRange] = Ltotal, Ltotal\n",
|
|
314
315
|
"workflow[time_of_flight.LookupTableRelativeErrorThreshold] = 0.1\n",
|
|
315
316
|
"\n",
|
|
@@ -639,7 +639,7 @@ def position_with_noisy_replicas(
|
|
|
639
639
|
# "Paint" the short array of noise on top of the (replicated) position data.
|
|
640
640
|
noise = sc.concat(
|
|
641
641
|
[position_noise] * ceil(size / position_noise.size), dim=noise_dim
|
|
642
|
-
)[:size].fold(dim=noise_dim, sizes={'replica': replicas, position.
|
|
642
|
+
)[:size].fold(dim=noise_dim, sizes={'replica': replicas, **position.sizes})
|
|
643
643
|
return sc.concat([position, noise + position], dim='replica')
|
|
644
644
|
|
|
645
645
|
|
|
@@ -32,6 +32,7 @@ def select_indices_in_intervals(
|
|
|
32
32
|
for dim, bounds in intervals.items():
|
|
33
33
|
low, high = sorted(bounds)
|
|
34
34
|
indices = indices[dim, low:high]
|
|
35
|
+
indices = indices if isinstance(indices, sc.Variable) else indices.data
|
|
35
36
|
indices = indices.flatten(to=out_dim)
|
|
36
37
|
if indices.bins is None:
|
|
37
38
|
return indices
|
|
@@ -147,7 +147,7 @@ class StreamProcessor:
|
|
|
147
147
|
*,
|
|
148
148
|
dynamic_keys: tuple[sciline.typing.Key, ...],
|
|
149
149
|
target_keys: tuple[sciline.typing.Key, ...],
|
|
150
|
-
accumulators: dict[sciline.typing.Key, Accumulator
|
|
150
|
+
accumulators: dict[sciline.typing.Key, Accumulator | Callable[..., Accumulator]]
|
|
151
151
|
| tuple[sciline.typing.Key, ...],
|
|
152
152
|
allow_bypass: bool = False,
|
|
153
153
|
) -> None:
|
|
@@ -180,6 +180,8 @@ class StreamProcessor:
|
|
|
180
180
|
for key in dynamic_keys:
|
|
181
181
|
workflow[key] = None # hack to prune branches
|
|
182
182
|
|
|
183
|
+
self._dynamic_keys = set(dynamic_keys)
|
|
184
|
+
|
|
183
185
|
# Find and pre-compute static nodes as far down the graph as possible
|
|
184
186
|
# See also https://github.com/scipp/sciline/issues/148.
|
|
185
187
|
nodes = _find_descendants(workflow, dynamic_keys)
|
|
@@ -194,12 +196,19 @@ class StreamProcessor:
|
|
|
194
196
|
if isinstance(accumulators, dict)
|
|
195
197
|
else {key: EternalAccumulator() for key in accumulators}
|
|
196
198
|
)
|
|
199
|
+
|
|
200
|
+
# Map each accumulator to its dependent dynamic keys
|
|
201
|
+
graph = workflow.underlying_graph
|
|
202
|
+
self._accumulator_dependencies = {
|
|
203
|
+
acc_key: nx.ancestors(graph, acc_key) & self._dynamic_keys
|
|
204
|
+
for acc_key in self._accumulators
|
|
205
|
+
if acc_key in graph
|
|
206
|
+
}
|
|
207
|
+
|
|
197
208
|
# Depending on the target_keys, some accumulators can be unused and should not
|
|
198
209
|
# be computed when adding a chunk.
|
|
199
210
|
self._accumulators = {
|
|
200
|
-
key: value
|
|
201
|
-
for key, value in self._accumulators.items()
|
|
202
|
-
if key in self._process_chunk_workflow.underlying_graph
|
|
211
|
+
key: value for key, value in self._accumulators.items() if key in graph
|
|
203
212
|
}
|
|
204
213
|
# Create accumulators unless instances were passed. This allows for initializing
|
|
205
214
|
# accumulators with arguments that depend on the workflow such as bin edges,
|
|
@@ -242,7 +251,30 @@ class StreamProcessor:
|
|
|
242
251
|
----------
|
|
243
252
|
chunks:
|
|
244
253
|
Chunks to be processed.
|
|
254
|
+
|
|
255
|
+
Raises
|
|
256
|
+
------
|
|
257
|
+
ValueError
|
|
258
|
+
If non-dynamic keys are provided in chunks.
|
|
259
|
+
If accumulator computation requires dynamic keys not provided in chunks.
|
|
245
260
|
"""
|
|
261
|
+
non_dynamic = set(chunks) - self._dynamic_keys
|
|
262
|
+
if non_dynamic:
|
|
263
|
+
raise ValueError(
|
|
264
|
+
f"Can only update dynamic keys. Got non-dynamic keys: {non_dynamic}"
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
accumulators_to_update = []
|
|
268
|
+
for acc_key, deps in self._accumulator_dependencies.items():
|
|
269
|
+
if deps.isdisjoint(chunks.keys()):
|
|
270
|
+
continue
|
|
271
|
+
if not deps.issubset(chunks.keys()):
|
|
272
|
+
raise ValueError(
|
|
273
|
+
f"Accumulator '{acc_key}' requires dynamic keys "
|
|
274
|
+
f"{deps - chunks.keys()} not provided in the current chunk."
|
|
275
|
+
)
|
|
276
|
+
accumulators_to_update.append(acc_key)
|
|
277
|
+
|
|
246
278
|
for key, value in chunks.items():
|
|
247
279
|
self._process_chunk_workflow[key] = value
|
|
248
280
|
# There can be dynamic keys that do not "terminate" in any accumulator. In
|
|
@@ -250,7 +282,7 @@ class StreamProcessor:
|
|
|
250
282
|
# the target keys.
|
|
251
283
|
if self._allow_bypass:
|
|
252
284
|
self._finalize_workflow[key] = value
|
|
253
|
-
to_accumulate = self._process_chunk_workflow.compute(
|
|
285
|
+
to_accumulate = self._process_chunk_workflow.compute(accumulators_to_update)
|
|
254
286
|
for key, processed in to_accumulate.items():
|
|
255
287
|
self._accumulators[key].push(processed)
|
|
256
288
|
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
2
|
+
# Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
|
|
3
|
+
"""
|
|
4
|
+
A fake time-of-flight neutron beamline for documentation and testing.
|
|
5
|
+
|
|
6
|
+
This provides detector event data in a structure as typically provided in a NeXus file,
|
|
7
|
+
with event_time_offset and event_time_zero information.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from collections.abc import Callable
|
|
11
|
+
|
|
12
|
+
import numpy as np
|
|
13
|
+
import scipp as sc
|
|
14
|
+
from scippneutron.chopper import DiskChopper
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class FakeBeamline:
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
choppers: dict[str, DiskChopper],
|
|
21
|
+
monitors: dict[str, sc.Variable],
|
|
22
|
+
run_length: sc.Variable,
|
|
23
|
+
events_per_pulse: int = 200000,
|
|
24
|
+
seed: int | None = None,
|
|
25
|
+
source: Callable | None = None,
|
|
26
|
+
):
|
|
27
|
+
import math
|
|
28
|
+
|
|
29
|
+
import tof as tof_pkg
|
|
30
|
+
from tof.facilities.ess_pulse import pulse
|
|
31
|
+
|
|
32
|
+
self.frequency = pulse.frequency
|
|
33
|
+
self.npulses = math.ceil((run_length * self.frequency).to(unit="").value)
|
|
34
|
+
self.events_per_pulse = events_per_pulse
|
|
35
|
+
|
|
36
|
+
# Create a source
|
|
37
|
+
if source is None:
|
|
38
|
+
self.source = tof_pkg.Source(
|
|
39
|
+
facility="ess",
|
|
40
|
+
neutrons=self.events_per_pulse,
|
|
41
|
+
pulses=self.npulses,
|
|
42
|
+
seed=seed,
|
|
43
|
+
)
|
|
44
|
+
else:
|
|
45
|
+
self.source = source(pulses=self.npulses)
|
|
46
|
+
|
|
47
|
+
# Convert the choppers to tof.Chopper
|
|
48
|
+
self.choppers = [
|
|
49
|
+
tof_pkg.Chopper(
|
|
50
|
+
frequency=abs(ch.frequency),
|
|
51
|
+
direction=tof_pkg.AntiClockwise
|
|
52
|
+
if (ch.frequency.value > 0.0)
|
|
53
|
+
else tof_pkg.Clockwise,
|
|
54
|
+
open=ch.slit_begin,
|
|
55
|
+
close=ch.slit_end,
|
|
56
|
+
phase=abs(ch.phase),
|
|
57
|
+
distance=ch.axle_position.fields.z,
|
|
58
|
+
name=name,
|
|
59
|
+
)
|
|
60
|
+
for name, ch in choppers.items()
|
|
61
|
+
]
|
|
62
|
+
|
|
63
|
+
# Add detectors
|
|
64
|
+
self.monitors = [
|
|
65
|
+
tof_pkg.Detector(distance=distance, name=key)
|
|
66
|
+
for key, distance in monitors.items()
|
|
67
|
+
]
|
|
68
|
+
|
|
69
|
+
# Propagate the neutrons
|
|
70
|
+
self.model = tof_pkg.Model(
|
|
71
|
+
source=self.source, choppers=self.choppers, detectors=self.monitors
|
|
72
|
+
)
|
|
73
|
+
self.model_result = self.model.run()
|
|
74
|
+
|
|
75
|
+
def get_monitor(self, name: str) -> sc.DataGroup:
|
|
76
|
+
nx_event_data = self.model_result.to_nxevent_data(name)
|
|
77
|
+
raw_data = self.model_result.detectors[name].data.flatten(to="event")
|
|
78
|
+
raw_data = raw_data[~raw_data.masks["blocked_by_others"]].copy()
|
|
79
|
+
return nx_event_data, raw_data
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def psc_choppers():
|
|
83
|
+
return {
|
|
84
|
+
"chopper": DiskChopper(
|
|
85
|
+
frequency=sc.scalar(-14.0, unit="Hz"),
|
|
86
|
+
beam_position=sc.scalar(0.0, unit="deg"),
|
|
87
|
+
phase=sc.scalar(-85.0, unit="deg"),
|
|
88
|
+
axle_position=sc.vector(value=[0, 0, 8.0], unit="m"),
|
|
89
|
+
slit_begin=sc.array(dims=["cutout"], values=[0.0], unit="deg"),
|
|
90
|
+
slit_end=sc.array(dims=["cutout"], values=[3.0], unit="deg"),
|
|
91
|
+
slit_height=sc.scalar(10.0, unit="cm"),
|
|
92
|
+
radius=sc.scalar(30.0, unit="cm"),
|
|
93
|
+
)
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def pulse_skipping_choppers():
|
|
98
|
+
return {
|
|
99
|
+
"chopper": DiskChopper(
|
|
100
|
+
frequency=sc.scalar(-14.0, unit="Hz"),
|
|
101
|
+
beam_position=sc.scalar(0.0, unit="deg"),
|
|
102
|
+
phase=sc.scalar(-35.0, unit="deg"),
|
|
103
|
+
axle_position=sc.vector(value=[0, 0, 8.0], unit="m"),
|
|
104
|
+
slit_begin=sc.array(dims=["cutout"], values=np.array([0.0]), unit="deg"),
|
|
105
|
+
slit_end=sc.array(dims=["cutout"], values=np.array([33.0]), unit="deg"),
|
|
106
|
+
slit_height=sc.scalar(10.0, unit="cm"),
|
|
107
|
+
radius=sc.scalar(30.0, unit="cm"),
|
|
108
|
+
),
|
|
109
|
+
"pulse_skipping": DiskChopper(
|
|
110
|
+
frequency=sc.scalar(-7.0, unit="Hz"),
|
|
111
|
+
beam_position=sc.scalar(0.0, unit="deg"),
|
|
112
|
+
phase=sc.scalar(-10.0, unit="deg"),
|
|
113
|
+
axle_position=sc.vector(value=[0, 0, 15.0], unit="m"),
|
|
114
|
+
slit_begin=sc.array(dims=["cutout"], values=np.array([0.0]), unit="deg"),
|
|
115
|
+
slit_end=sc.array(dims=["cutout"], values=np.array([120.0]), unit="deg"),
|
|
116
|
+
slit_height=sc.scalar(10.0, unit="cm"),
|
|
117
|
+
radius=sc.scalar(30.0, unit="cm"),
|
|
118
|
+
),
|
|
119
|
+
}
|
|
@@ -32,20 +32,6 @@ from .types import (
|
|
|
32
32
|
)
|
|
33
33
|
|
|
34
34
|
|
|
35
|
-
def extract_ltotal(da: RawData) -> Ltotal:
|
|
36
|
-
"""
|
|
37
|
-
Extract the total length of the flight path from the source to the detector from the
|
|
38
|
-
detector data.
|
|
39
|
-
|
|
40
|
-
Parameters
|
|
41
|
-
----------
|
|
42
|
-
da:
|
|
43
|
-
Raw detector data loaded from a NeXus file, e.g., NXdetector containing
|
|
44
|
-
NXevent_data.
|
|
45
|
-
"""
|
|
46
|
-
return Ltotal(da.coords["Ltotal"])
|
|
47
|
-
|
|
48
|
-
|
|
49
35
|
def _mask_large_uncertainty(table: sc.DataArray, error_threshold: float):
|
|
50
36
|
"""
|
|
51
37
|
Mask regions with large uncertainty with NaNs.
|
|
@@ -389,6 +375,56 @@ def _time_of_flight_data_histogram(
|
|
|
389
375
|
return rebinned.assign_coords(tof=tofs)
|
|
390
376
|
|
|
391
377
|
|
|
378
|
+
def _guess_pulse_stride_offset(
|
|
379
|
+
pulse_index: sc.Variable,
|
|
380
|
+
ltotal: sc.Variable,
|
|
381
|
+
event_time_offset: sc.Variable,
|
|
382
|
+
pulse_stride: int,
|
|
383
|
+
interp: Callable,
|
|
384
|
+
) -> int:
|
|
385
|
+
"""
|
|
386
|
+
Using the minimum ``event_time_zero`` to calculate a reference time when computing
|
|
387
|
+
the time-of-flight for the neutron events makes the workflow depend on when the
|
|
388
|
+
first event was recorded. There is no straightforward way to know if we started
|
|
389
|
+
recording at the beginning of a frame, or half-way through a frame, without looking
|
|
390
|
+
at the chopper logs. This can be manually corrected using the pulse_stride_offset
|
|
391
|
+
parameter, but this makes automatic reduction of the data difficult.
|
|
392
|
+
See https://github.com/scipp/essreduce/issues/184.
|
|
393
|
+
|
|
394
|
+
Here, we perform a simple guess for the ``pulse_stride_offset`` if it is not
|
|
395
|
+
provided.
|
|
396
|
+
We choose a few random events, compute the time-of-flight for every possible value
|
|
397
|
+
of pulse_stride_offset, and return the value that yields the least number of NaNs
|
|
398
|
+
in the computed time-of-flight.
|
|
399
|
+
|
|
400
|
+
Parameters
|
|
401
|
+
----------
|
|
402
|
+
pulse_index:
|
|
403
|
+
Pulse index for every event.
|
|
404
|
+
ltotal:
|
|
405
|
+
Total length of the flight path from the source to the detector for each event.
|
|
406
|
+
event_time_offset:
|
|
407
|
+
Time of arrival of the neutron at the detector for each event.
|
|
408
|
+
pulse_stride:
|
|
409
|
+
Stride of used pulses.
|
|
410
|
+
interp:
|
|
411
|
+
2D interpolator for the lookup table.
|
|
412
|
+
"""
|
|
413
|
+
tofs = {}
|
|
414
|
+
# Choose a few random events to compute the time-of-flight
|
|
415
|
+
inds = np.random.choice(
|
|
416
|
+
len(event_time_offset), min(5000, len(event_time_offset)), replace=False
|
|
417
|
+
)
|
|
418
|
+
pulse_index_values = pulse_index.values[inds]
|
|
419
|
+
ltotal_values = ltotal.values[inds]
|
|
420
|
+
etos_values = event_time_offset.values[inds]
|
|
421
|
+
for i in range(pulse_stride):
|
|
422
|
+
pulse_inds = (pulse_index_values + i) % pulse_stride
|
|
423
|
+
tofs[i] = interp((pulse_inds, ltotal_values, etos_values))
|
|
424
|
+
# Find the entry in the list with the least number of nan values
|
|
425
|
+
return sorted(tofs, key=lambda x: np.isnan(tofs[x]).sum())[0]
|
|
426
|
+
|
|
427
|
+
|
|
392
428
|
def _time_of_flight_data_events(
|
|
393
429
|
da: sc.DataArray,
|
|
394
430
|
lookup: sc.DataArray,
|
|
@@ -399,28 +435,6 @@ def _time_of_flight_data_events(
|
|
|
399
435
|
) -> sc.DataArray:
|
|
400
436
|
etos = da.bins.coords["event_time_offset"]
|
|
401
437
|
eto_unit = elem_unit(etos)
|
|
402
|
-
pulse_period = pulse_period.to(unit=eto_unit)
|
|
403
|
-
frame_period = pulse_period * pulse_stride
|
|
404
|
-
|
|
405
|
-
# TODO: Finding the `tmin` below will not work in the case were data is processed
|
|
406
|
-
# in chunks, as taking the minimum time in each chunk will lead to inconsistent
|
|
407
|
-
# pulse indices (this will be the case in live data, or when using the
|
|
408
|
-
# StreamProcessor). We could instead read it from the first chunk and store it?
|
|
409
|
-
|
|
410
|
-
# Compute a pulse index for every event: it is the index of the pulse within a
|
|
411
|
-
# frame period. When there is no pulse skipping, those are all zero. When there is
|
|
412
|
-
# pulse skipping, the index ranges from zero to pulse_stride - 1.
|
|
413
|
-
tmin = da.bins.coords['event_time_zero'].min()
|
|
414
|
-
pulse_index = (
|
|
415
|
-
(
|
|
416
|
-
(da.bins.coords['event_time_zero'] - tmin).to(unit=eto_unit)
|
|
417
|
-
+ 0.5 * pulse_period
|
|
418
|
-
)
|
|
419
|
-
% frame_period
|
|
420
|
-
) // pulse_period
|
|
421
|
-
# Apply the pulse_stride_offset
|
|
422
|
-
pulse_index += pulse_stride_offset
|
|
423
|
-
pulse_index %= pulse_stride
|
|
424
438
|
|
|
425
439
|
# Create 2D interpolator
|
|
426
440
|
interp = _make_tof_interpolator(
|
|
@@ -430,7 +444,51 @@ def _time_of_flight_data_events(
|
|
|
430
444
|
# Operate on events (broadcast distances to all events)
|
|
431
445
|
ltotal = sc.bins_like(etos, ltotal).bins.constituents["data"]
|
|
432
446
|
etos = etos.bins.constituents["data"]
|
|
433
|
-
|
|
447
|
+
|
|
448
|
+
# Compute a pulse index for every event: it is the index of the pulse within a
|
|
449
|
+
# frame period. When there is no pulse skipping, those are all zero. When there is
|
|
450
|
+
# pulse skipping, the index ranges from zero to pulse_stride - 1.
|
|
451
|
+
if pulse_stride == 1:
|
|
452
|
+
pulse_index = sc.zeros(sizes=etos.sizes)
|
|
453
|
+
else:
|
|
454
|
+
etz_unit = 'ns'
|
|
455
|
+
etz = (
|
|
456
|
+
da.bins.coords["event_time_zero"]
|
|
457
|
+
.bins.constituents["data"]
|
|
458
|
+
.to(unit=etz_unit, copy=False)
|
|
459
|
+
)
|
|
460
|
+
pulse_period = pulse_period.to(unit=etz_unit, dtype=int)
|
|
461
|
+
frame_period = pulse_period * pulse_stride
|
|
462
|
+
# Define a common reference time using epoch as a base, but making sure that it
|
|
463
|
+
# is aligned with the pulse_period and the frame_period.
|
|
464
|
+
# We need to use a global reference time instead of simply taking the minimum
|
|
465
|
+
# event_time_zero because the events may arrive in chunks, and the first event
|
|
466
|
+
# may not be the first event of the first pulse for all chunks. This would lead
|
|
467
|
+
# to inconsistent pulse indices.
|
|
468
|
+
epoch = sc.datetime(0, unit=etz_unit)
|
|
469
|
+
diff_to_epoch = (etz.min() - epoch) % pulse_period
|
|
470
|
+
# Here we offset the reference by half a pulse period to avoid errors from
|
|
471
|
+
# fluctuations in the event_time_zeros in the data. They are triggered by the
|
|
472
|
+
# neutron source, and may not always be exactly separated by the pulse period.
|
|
473
|
+
# While fluctuations will exist, they will be small, and offsetting the times
|
|
474
|
+
# by half a pulse period is a simple enough fix.
|
|
475
|
+
reference = epoch + diff_to_epoch - (pulse_period // 2)
|
|
476
|
+
# Use in-place operations to avoid large allocations
|
|
477
|
+
pulse_index = etz - reference
|
|
478
|
+
pulse_index %= frame_period
|
|
479
|
+
pulse_index //= pulse_period
|
|
480
|
+
|
|
481
|
+
# Apply the pulse_stride_offset
|
|
482
|
+
if pulse_stride_offset is None:
|
|
483
|
+
pulse_stride_offset = _guess_pulse_stride_offset(
|
|
484
|
+
pulse_index=pulse_index,
|
|
485
|
+
ltotal=ltotal,
|
|
486
|
+
event_time_offset=etos,
|
|
487
|
+
pulse_stride=pulse_stride,
|
|
488
|
+
interp=interp,
|
|
489
|
+
)
|
|
490
|
+
pulse_index += pulse_stride_offset
|
|
491
|
+
pulse_index %= pulse_stride
|
|
434
492
|
|
|
435
493
|
# Compute time-of-flight for all neutrons using the interpolator
|
|
436
494
|
tofs = sc.array(
|
|
@@ -535,7 +593,7 @@ def default_parameters() -> dict:
|
|
|
535
593
|
return {
|
|
536
594
|
PulsePeriod: 1.0 / sc.scalar(14.0, unit="Hz"),
|
|
537
595
|
PulseStride: 1,
|
|
538
|
-
PulseStrideOffset:
|
|
596
|
+
PulseStrideOffset: None,
|
|
539
597
|
DistanceResolution: sc.scalar(0.1, unit="m"),
|
|
540
598
|
TimeResolution: sc.scalar(250.0, unit='us'),
|
|
541
599
|
LookupTableRelativeErrorThreshold: 0.1,
|
|
@@ -546,4 +604,4 @@ def providers() -> tuple[Callable]:
|
|
|
546
604
|
"""
|
|
547
605
|
Providers of the time-of-flight workflow.
|
|
548
606
|
"""
|
|
549
|
-
return (compute_tof_lookup_table,
|
|
607
|
+
return (compute_tof_lookup_table, time_of_flight_data)
|
|
@@ -101,10 +101,10 @@ PulseStride = NewType("PulseStride", int)
|
|
|
101
101
|
Stride of used pulses. Usually 1, but may be a small integer when pulse-skipping.
|
|
102
102
|
"""
|
|
103
103
|
|
|
104
|
-
PulseStrideOffset = NewType("PulseStrideOffset", int)
|
|
104
|
+
PulseStrideOffset = NewType("PulseStrideOffset", int | None)
|
|
105
105
|
"""
|
|
106
106
|
When pulse-skipping, the offset of the first pulse in the stride. This is typically
|
|
107
|
-
zero but can be a small integer < pulse_stride.
|
|
107
|
+
zero but can be a small integer < pulse_stride. If None, a guess is made.
|
|
108
108
|
"""
|
|
109
109
|
|
|
110
110
|
RawData = NewType("RawData", sc.DataArray)
|
|
@@ -64,6 +64,14 @@ def test_select_indices_fails_without_required_coords():
|
|
|
64
64
|
)
|
|
65
65
|
|
|
66
66
|
|
|
67
|
+
def test_select_indices_works_with_empty_selection(binned_indices):
|
|
68
|
+
selected = roi.select_indices_in_intervals(
|
|
69
|
+
intervals=sc.DataGroup(x=(1, 1)), indices=binned_indices
|
|
70
|
+
)
|
|
71
|
+
assert selected.dim == 'index'
|
|
72
|
+
assert selected.sizes[selected.dim] == 0
|
|
73
|
+
|
|
74
|
+
|
|
67
75
|
def test_apply_selection_empty_yields_empty_result():
|
|
68
76
|
selection = sc.array(dims=['index'], values=[], unit=None, dtype='int32')
|
|
69
77
|
data = sc.arange('detector_number', 12, dtype='int32')
|
|
@@ -96,9 +96,11 @@ def test_rolling_accumulator_does_not_modify_pushed_values() -> None:
|
|
|
96
96
|
|
|
97
97
|
DynamicA = NewType('DynamicA', float)
|
|
98
98
|
DynamicB = NewType('DynamicB', float)
|
|
99
|
+
DynamicC = NewType('DynamicC', float)
|
|
99
100
|
StaticA = NewType('StaticA', float)
|
|
100
101
|
AccumA = NewType('AccumA', float)
|
|
101
102
|
AccumB = NewType('AccumB', float)
|
|
103
|
+
AccumC = NewType('AccumC', float)
|
|
102
104
|
Target = NewType('Target', float)
|
|
103
105
|
|
|
104
106
|
|
|
@@ -118,6 +120,10 @@ def make_accum_b(value: DynamicB) -> AccumB:
|
|
|
118
120
|
return AccumB(value)
|
|
119
121
|
|
|
120
122
|
|
|
123
|
+
def make_accum_c(value: DynamicC) -> AccumC:
|
|
124
|
+
return AccumC(value)
|
|
125
|
+
|
|
126
|
+
|
|
121
127
|
def make_target(accum_a: AccumA, accum_b: AccumB) -> Target:
|
|
122
128
|
return Target(accum_a / accum_b)
|
|
123
129
|
|
|
@@ -322,3 +328,103 @@ def test_StreamProcessor_calls_providers_after_accumulators_only_when_finalizing
|
|
|
322
328
|
assert sc.identical(result[Target], sc.scalar(2 * 6.0 / 15.0))
|
|
323
329
|
# Outputs are not cached.
|
|
324
330
|
assert _make_target.call_count == 3
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
def test_StreamProcessor_does_not_reuse_dynamic_keys() -> None:
|
|
334
|
+
base_workflow = sciline.Pipeline(
|
|
335
|
+
(make_accum_a, make_accum_b, make_target), params={StaticA: 2.0}
|
|
336
|
+
)
|
|
337
|
+
orig_workflow = base_workflow.copy()
|
|
338
|
+
|
|
339
|
+
streaming_wf = streaming.StreamProcessor(
|
|
340
|
+
base_workflow=base_workflow,
|
|
341
|
+
dynamic_keys=(DynamicA, DynamicB),
|
|
342
|
+
target_keys=(Target,),
|
|
343
|
+
accumulators=(AccumA, AccumB),
|
|
344
|
+
)
|
|
345
|
+
result = streaming_wf.add_chunk({DynamicA: sc.scalar(1), DynamicB: sc.scalar(4)})
|
|
346
|
+
assert sc.identical(result[Target], sc.scalar(2 * 1.0 / 4.0))
|
|
347
|
+
result = streaming_wf.add_chunk({DynamicA: sc.scalar(2)}) # Only A
|
|
348
|
+
assert not sc.identical(result[Target], sc.scalar(2 * 3.0 / 8.0))
|
|
349
|
+
assert sc.identical(result[Target], sc.scalar(2 * 3.0 / 4.0))
|
|
350
|
+
result = streaming_wf.add_chunk({DynamicB: sc.scalar(5)}) # Only B
|
|
351
|
+
assert sc.identical(result[Target], sc.scalar(2 * 3.0 / 9.0))
|
|
352
|
+
result = streaming_wf.add_chunk({DynamicA: sc.scalar(3), DynamicB: sc.scalar(6)})
|
|
353
|
+
assert sc.identical(result[Target], sc.scalar(2 * 6.0 / 15.0))
|
|
354
|
+
|
|
355
|
+
# Consistency check: Run the original workflow with the same inputs, all at once
|
|
356
|
+
orig_workflow[DynamicA] = sc.scalar(1 + 2 + 3)
|
|
357
|
+
orig_workflow[DynamicB] = sc.scalar(4 + 5 + 6)
|
|
358
|
+
expected = orig_workflow.compute(Target)
|
|
359
|
+
assert sc.identical(expected, result[Target])
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
def test_StreamProcessor_raises_given_partial_update_for_accumulator() -> None:
|
|
363
|
+
base_workflow = sciline.Pipeline(
|
|
364
|
+
(make_accum_a, make_accum_b, make_accum_c, make_target), params={StaticA: 2.0}
|
|
365
|
+
)
|
|
366
|
+
streaming_wf = streaming.StreamProcessor(
|
|
367
|
+
base_workflow=base_workflow,
|
|
368
|
+
dynamic_keys=(DynamicA, DynamicB, DynamicC),
|
|
369
|
+
target_keys=(Target, AccumC),
|
|
370
|
+
accumulators=(Target, AccumC), # Target depends on both A and B
|
|
371
|
+
)
|
|
372
|
+
# We can update either (A, B) and/or C...
|
|
373
|
+
result = streaming_wf.add_chunk({DynamicA: sc.scalar(1), DynamicB: sc.scalar(4)})
|
|
374
|
+
assert sc.identical(result[Target], sc.scalar(2 * 1.0 / 4.0))
|
|
375
|
+
assert result[AccumC] is None
|
|
376
|
+
result = streaming_wf.add_chunk({DynamicC: sc.scalar(11)})
|
|
377
|
+
assert sc.identical(result[Target], sc.scalar(2 * 1.0 / 4.0))
|
|
378
|
+
assert sc.identical(result[AccumC], sc.scalar(11))
|
|
379
|
+
result = streaming_wf.add_chunk({DynamicA: sc.scalar(2), DynamicB: sc.scalar(5)})
|
|
380
|
+
assert sc.identical(result[Target], sc.scalar(2 * (1.0 / 4.0 + 2.0 / 5.0)))
|
|
381
|
+
assert sc.identical(result[AccumC], sc.scalar(11))
|
|
382
|
+
result = streaming_wf.add_chunk({DynamicC: sc.scalar(12)})
|
|
383
|
+
assert sc.identical(result[Target], sc.scalar(2 * (1.0 / 4.0 + 2.0 / 5.0)))
|
|
384
|
+
assert sc.identical(result[AccumC], sc.scalar(23))
|
|
385
|
+
# ... but not just A or B
|
|
386
|
+
with pytest.raises(
|
|
387
|
+
ValueError,
|
|
388
|
+
match=r'{tests.streaming_test.DynamicB} not provided in the current chunk',
|
|
389
|
+
):
|
|
390
|
+
result = streaming_wf.add_chunk({DynamicA: sc.scalar(2)}) # Only A
|
|
391
|
+
with pytest.raises(
|
|
392
|
+
ValueError,
|
|
393
|
+
match=r'{tests.streaming_test.DynamicA} not provided in the current chunk',
|
|
394
|
+
):
|
|
395
|
+
result = streaming_wf.add_chunk({DynamicB: sc.scalar(5)}) # Only B
|
|
396
|
+
|
|
397
|
+
|
|
398
|
+
def test_StreamProcessor_raises_when_trying_to_update_non_dynamic_key() -> None:
|
|
399
|
+
base_workflow = sciline.Pipeline(
|
|
400
|
+
(make_static_a, make_accum_a, make_accum_b, make_target)
|
|
401
|
+
)
|
|
402
|
+
streaming_wf = streaming.StreamProcessor(
|
|
403
|
+
base_workflow=base_workflow,
|
|
404
|
+
dynamic_keys=(DynamicA, DynamicB),
|
|
405
|
+
target_keys=(Target,),
|
|
406
|
+
accumulators=(AccumA, AccumB),
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
# Regular update ok
|
|
410
|
+
result = streaming_wf.add_chunk({DynamicA: sc.scalar(1), DynamicB: sc.scalar(4)})
|
|
411
|
+
assert sc.identical(result[Target], sc.scalar(2 * 1.0 / 4.0))
|
|
412
|
+
|
|
413
|
+
# Non-dynamic input key
|
|
414
|
+
with pytest.raises(
|
|
415
|
+
ValueError,
|
|
416
|
+
match=r'Got non-dynamic keys: {tests.streaming_test.StaticA}',
|
|
417
|
+
):
|
|
418
|
+
result = streaming_wf.add_chunk({StaticA: sc.scalar(2)})
|
|
419
|
+
# Intermediate key depending on dynamic key
|
|
420
|
+
with pytest.raises(
|
|
421
|
+
ValueError,
|
|
422
|
+
match=r'Got non-dynamic keys: {tests.streaming_test.AccumA}',
|
|
423
|
+
):
|
|
424
|
+
result = streaming_wf.add_chunk({AccumA: sc.scalar(2)})
|
|
425
|
+
# Target key depending on dynamic key
|
|
426
|
+
with pytest.raises(
|
|
427
|
+
ValueError,
|
|
428
|
+
match=r'Got non-dynamic keys: {tests.streaming_test.Target}',
|
|
429
|
+
):
|
|
430
|
+
result = streaming_wf.add_chunk({Target: sc.scalar(2)})
|