essreduce 25.12.0__tar.gz → 26.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {essreduce-25.12.0 → essreduce-26.1.0}/PKG-INFO +1 -1
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/user-guide/tof/dream.ipynb +7 -7
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/user-guide/tof/frame-unwrapping.ipynb +6 -6
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/user-guide/tof/wfm.ipynb +4 -4
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/base.txt +1 -1
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/basetest.txt +1 -1
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/nexus/workflow.py +8 -6
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/normalization.py +154 -26
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/time_of_flight/__init__.py +4 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/time_of_flight/eto_to_tof.py +26 -10
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/time_of_flight/lut.py +5 -5
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/time_of_flight/types.py +9 -2
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/time_of_flight/workflow.py +5 -5
- {essreduce-25.12.0 → essreduce-26.1.0}/src/essreduce.egg-info/PKG-INFO +1 -1
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/nexus/workflow_test.py +119 -6
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/normalization_test.py +43 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/time_of_flight/lut_test.py +4 -4
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/time_of_flight/unwrap_test.py +3 -9
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/time_of_flight/wfm_test.py +1 -3
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/time_of_flight/workflow_test.py +76 -11
- {essreduce-25.12.0 → essreduce-26.1.0}/tox.ini +3 -3
- {essreduce-25.12.0 → essreduce-26.1.0}/.copier-answers.ess.yml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.copier-answers.yml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.github/ISSUE_TEMPLATE/high-level-requirement.yml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.github/dependabot.yml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.github/workflows/ci.yml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.github/workflows/docs.yml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.github/workflows/nightly_at_main.yml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.github/workflows/nightly_at_main_lower_bound.yml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.github/workflows/nightly_at_release.yml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.github/workflows/python-version-ci +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.github/workflows/release.yml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.github/workflows/test.yml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.github/workflows/unpinned.yml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.github/workflows/weekly_windows_macos.yml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.gitignore +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.pre-commit-config.yaml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/.python-version +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/CODE_OF_CONDUCT.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/CONTRIBUTING.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/LICENSE +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/MANIFEST.in +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/README.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/_static/anaconda-icon.js +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/_static/favicon.svg +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/_static/logo-dark.svg +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/_static/logo.svg +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/_templates/class-template.rst +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/_templates/doc_version.html +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/_templates/module-template.rst +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/about/index.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/api-reference/index.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/conf.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/developer/coding-conventions.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/developer/dependency-management.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/developer/getting-started.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/developer/gui.ipynb +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/developer/index.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/index.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/user-guide/index.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/user-guide/installation.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/user-guide/reduction-workflow-guidelines.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/user-guide/tof/index.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/docs/user-guide/widget.md +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/pyproject.toml +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/base.in +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/basetest.in +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/ci.in +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/ci.txt +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/dev.in +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/dev.txt +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/docs.in +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/docs.txt +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/make_base.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/mypy.in +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/mypy.txt +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/nightly.in +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/nightly.txt +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/static.in +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/static.txt +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/test.in +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/test.txt +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/wheels.in +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/requirements/wheels.txt +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/resources/logo.svg +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/setup.cfg +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/__init__.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/data/__init__.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/data/_registry.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/live/__init__.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/live/raw.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/live/roi.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/live/workflow.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/logging.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/nexus/__init__.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/nexus/_nexus_loader.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/nexus/json_generator.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/nexus/json_nexus.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/nexus/types.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/parameter.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/py.typed +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/scripts/grow_nexus.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/streaming.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/time_of_flight/fakes.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/time_of_flight/interpolator_numba.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/time_of_flight/interpolator_scipy.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/time_of_flight/resample.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/ui.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/uncertainty.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/widgets/__init__.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/widgets/_base.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/widgets/_binedges_widget.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/widgets/_bounds_widget.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/widgets/_config.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/widgets/_filename_widget.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/widgets/_linspace_widget.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/widgets/_optional_widget.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/widgets/_spinner.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/widgets/_string_widget.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/widgets/_switchable_widget.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/widgets/_vector_widget.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/ess/reduce/workflow.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/essreduce.egg-info/SOURCES.txt +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/essreduce.egg-info/dependency_links.txt +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/essreduce.egg-info/entry_points.txt +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/essreduce.egg-info/requires.txt +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/src/essreduce.egg-info/top_level.txt +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/accumulators_test.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/conftest.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/live/raw_test.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/live/roi_test.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/nexus/json_generator_test.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/nexus/json_nexus_examples/array_dataset.json +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/nexus/json_nexus_examples/dataset.json +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/nexus/json_nexus_examples/detector.json +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/nexus/json_nexus_examples/entry.json +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/nexus/json_nexus_examples/event_data.json +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/nexus/json_nexus_examples/instrument.json +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/nexus/json_nexus_examples/log.json +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/nexus/json_nexus_test.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/nexus/nexus_loader_test.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/package_test.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/scripts/test_grow_nexus.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/streaming_test.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/time_of_flight/interpolator_test.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/time_of_flight/resample_tests.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/uncertainty_test.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tests/widget_test.py +0 -0
- {essreduce-25.12.0 → essreduce-26.1.0}/tools/shrink_nexus.py +0 -0
|
@@ -311,7 +311,7 @@
|
|
|
311
311
|
"id": "21",
|
|
312
312
|
"metadata": {},
|
|
313
313
|
"source": [
|
|
314
|
-
"By default, the workflow tries to load a `
|
|
314
|
+
"By default, the workflow tries to load a `TofLookupTable` from a file.\n",
|
|
315
315
|
"\n",
|
|
316
316
|
"In this notebook, instead of using such a pre-made file,\n",
|
|
317
317
|
"we will build our own lookup table from the chopper information and apply it to the workflow."
|
|
@@ -346,7 +346,7 @@
|
|
|
346
346
|
" sc.scalar(75.5, unit=\"m\"),\n",
|
|
347
347
|
" sc.scalar(78.0, unit=\"m\"),\n",
|
|
348
348
|
")\n",
|
|
349
|
-
"lut_wf.visualize(
|
|
349
|
+
"lut_wf.visualize(TofLookupTable)"
|
|
350
350
|
]
|
|
351
351
|
},
|
|
352
352
|
{
|
|
@@ -411,7 +411,7 @@
|
|
|
411
411
|
"metadata": {},
|
|
412
412
|
"outputs": [],
|
|
413
413
|
"source": [
|
|
414
|
-
"table = lut_wf.compute(
|
|
414
|
+
"table = lut_wf.compute(TofLookupTable)\n",
|
|
415
415
|
"\n",
|
|
416
416
|
"# Overlay mean on the figure above\n",
|
|
417
417
|
"table.array[\"distance\", 13].plot(ax=fig2.ax, color=\"C1\", ls=\"-\", marker=None)"
|
|
@@ -453,7 +453,7 @@
|
|
|
453
453
|
"outputs": [],
|
|
454
454
|
"source": [
|
|
455
455
|
"# Set the computed lookup table onto the original workflow\n",
|
|
456
|
-
"wf[
|
|
456
|
+
"wf[TofLookupTable] = table\n",
|
|
457
457
|
"\n",
|
|
458
458
|
"# Compute time-of-flight of neutron events\n",
|
|
459
459
|
"tofs = wf.compute(TofDetector[SampleRun])\n",
|
|
@@ -743,7 +743,7 @@
|
|
|
743
743
|
"metadata": {},
|
|
744
744
|
"outputs": [],
|
|
745
745
|
"source": [
|
|
746
|
-
"table = lut_wf.compute(
|
|
746
|
+
"table = lut_wf.compute(TofLookupTable).array\n",
|
|
747
747
|
"table.plot() / (sc.stddevs(table) / sc.values(table)).plot(norm=\"log\")"
|
|
748
748
|
]
|
|
749
749
|
},
|
|
@@ -769,7 +769,7 @@
|
|
|
769
769
|
"source": [
|
|
770
770
|
"lut_wf[LookupTableRelativeErrorThreshold] = 0.01\n",
|
|
771
771
|
"\n",
|
|
772
|
-
"table = lut_wf.compute(
|
|
772
|
+
"table = lut_wf.compute(TofLookupTable)\n",
|
|
773
773
|
"table.plot()"
|
|
774
774
|
]
|
|
775
775
|
},
|
|
@@ -797,7 +797,7 @@
|
|
|
797
797
|
"wf[RawDetector[SampleRun]] = ess_beamline.get_monitor(\"detector\")[0]\n",
|
|
798
798
|
"wf[DetectorLtotal[SampleRun]] = Ltotal\n",
|
|
799
799
|
"\n",
|
|
800
|
-
"wf[
|
|
800
|
+
"wf[TofLookupTable] = table\n",
|
|
801
801
|
"\n",
|
|
802
802
|
"# Compute time-of-flight\n",
|
|
803
803
|
"tofs = wf.compute(TofDetector[SampleRun])\n",
|
|
@@ -171,7 +171,7 @@
|
|
|
171
171
|
"id": "9",
|
|
172
172
|
"metadata": {},
|
|
173
173
|
"source": [
|
|
174
|
-
"By default, the workflow tries to load a `
|
|
174
|
+
"By default, the workflow tries to load a `TofLookupTable` from a file.\n",
|
|
175
175
|
"\n",
|
|
176
176
|
"In this notebook, instead of using such a pre-made file,\n",
|
|
177
177
|
"we will build our own lookup table from the chopper information and apply it to the workflow.\n",
|
|
@@ -227,7 +227,7 @@
|
|
|
227
227
|
"}\n",
|
|
228
228
|
"lut_wf[SourcePosition] = sc.vector([0, 0, 0], unit=\"m\")\n",
|
|
229
229
|
"\n",
|
|
230
|
-
"lut_wf.visualize(
|
|
230
|
+
"lut_wf.visualize(TofLookupTable)"
|
|
231
231
|
]
|
|
232
232
|
},
|
|
233
233
|
{
|
|
@@ -245,7 +245,7 @@
|
|
|
245
245
|
"metadata": {},
|
|
246
246
|
"outputs": [],
|
|
247
247
|
"source": [
|
|
248
|
-
"table = lut_wf.compute(
|
|
248
|
+
"table = lut_wf.compute(TofLookupTable)\n",
|
|
249
249
|
"table.plot()"
|
|
250
250
|
]
|
|
251
251
|
},
|
|
@@ -267,7 +267,7 @@
|
|
|
267
267
|
"outputs": [],
|
|
268
268
|
"source": [
|
|
269
269
|
"# Set the computed lookup table on the original workflow\n",
|
|
270
|
-
"wf[
|
|
270
|
+
"wf[TofLookupTable] = table\n",
|
|
271
271
|
"\n",
|
|
272
272
|
"# Compute neutron tofs\n",
|
|
273
273
|
"tofs = wf.compute(TofDetector[SampleRun])\n",
|
|
@@ -432,7 +432,7 @@
|
|
|
432
432
|
"metadata": {},
|
|
433
433
|
"outputs": [],
|
|
434
434
|
"source": [
|
|
435
|
-
"table = lut_wf.compute(
|
|
435
|
+
"table = lut_wf.compute(TofLookupTable)\n",
|
|
436
436
|
"\n",
|
|
437
437
|
"table.plot(figsize=(9, 4))"
|
|
438
438
|
]
|
|
@@ -457,7 +457,7 @@
|
|
|
457
457
|
"nxevent_data = results.to_nxevent_data()\n",
|
|
458
458
|
"wf[RawDetector[SampleRun]] = nxevent_data\n",
|
|
459
459
|
"wf[DetectorLtotal[SampleRun]] = nxevent_data.coords[\"Ltotal\"]\n",
|
|
460
|
-
"wf[
|
|
460
|
+
"wf[TofLookupTable] = table\n",
|
|
461
461
|
"\n",
|
|
462
462
|
"tofs = wf.compute(TofDetector[SampleRun])\n",
|
|
463
463
|
"\n",
|
|
@@ -332,7 +332,7 @@
|
|
|
332
332
|
"id": "20",
|
|
333
333
|
"metadata": {},
|
|
334
334
|
"source": [
|
|
335
|
-
"By default, the workflow tries to load a `
|
|
335
|
+
"By default, the workflow tries to load a `TofLookupTable` from a file.\n",
|
|
336
336
|
"\n",
|
|
337
337
|
"In this notebook, instead of using such a pre-made file,\n",
|
|
338
338
|
"we will build our own lookup table from the chopper information and apply it to the workflow."
|
|
@@ -365,7 +365,7 @@
|
|
|
365
365
|
"lut_wf[SourcePosition] = source_position\n",
|
|
366
366
|
"lut_wf[LtotalRange] = Ltotal, Ltotal\n",
|
|
367
367
|
"lut_wf[LookupTableRelativeErrorThreshold] = 0.1\n",
|
|
368
|
-
"lut_wf.visualize(
|
|
368
|
+
"lut_wf.visualize(TofLookupTable)"
|
|
369
369
|
]
|
|
370
370
|
},
|
|
371
371
|
{
|
|
@@ -430,7 +430,7 @@
|
|
|
430
430
|
"metadata": {},
|
|
431
431
|
"outputs": [],
|
|
432
432
|
"source": [
|
|
433
|
-
"table = lut_wf.compute(
|
|
433
|
+
"table = lut_wf.compute(TofLookupTable)\n",
|
|
434
434
|
"\n",
|
|
435
435
|
"# Overlay mean on the figure above\n",
|
|
436
436
|
"table.array[\"distance\", 1].plot(ax=fig2.ax, color=\"C1\", ls=\"-\", marker=None)\n",
|
|
@@ -463,7 +463,7 @@
|
|
|
463
463
|
"metadata": {},
|
|
464
464
|
"outputs": [],
|
|
465
465
|
"source": [
|
|
466
|
-
"wf[
|
|
466
|
+
"wf[TofLookupTable] = table\n",
|
|
467
467
|
"\n",
|
|
468
468
|
"tofs = wf.compute(TofDetector[SampleRun])\n",
|
|
469
469
|
"tofs"
|
|
@@ -370,6 +370,8 @@ def get_calibrated_detector(
|
|
|
370
370
|
----------
|
|
371
371
|
detector:
|
|
372
372
|
NeXus detector group.
|
|
373
|
+
transform:
|
|
374
|
+
Transformation matrix for the detector.
|
|
373
375
|
offset:
|
|
374
376
|
Offset to add to the detector position.
|
|
375
377
|
bank_sizes:
|
|
@@ -430,8 +432,8 @@ def assemble_detector_data(
|
|
|
430
432
|
|
|
431
433
|
def get_calibrated_monitor(
|
|
432
434
|
monitor: NeXusComponent[MonitorType, RunType],
|
|
435
|
+
transform: NeXusTransformation[MonitorType, RunType],
|
|
433
436
|
offset: MonitorPositionOffset[RunType, MonitorType],
|
|
434
|
-
source_position: Position[snx.NXsource, RunType],
|
|
435
437
|
) -> EmptyMonitor[RunType, MonitorType]:
|
|
436
438
|
"""
|
|
437
439
|
Extract the data array corresponding to a monitor's signal field.
|
|
@@ -443,16 +445,16 @@ def get_calibrated_monitor(
|
|
|
443
445
|
----------
|
|
444
446
|
monitor:
|
|
445
447
|
NeXus monitor group.
|
|
448
|
+
transform:
|
|
449
|
+
Transformation matrix for the monitor.
|
|
446
450
|
offset:
|
|
447
451
|
Offset to add to the monitor position.
|
|
448
|
-
source_position:
|
|
449
|
-
Position of the neutron source.
|
|
450
452
|
"""
|
|
451
|
-
|
|
453
|
+
transform_unit = transform.value.unit
|
|
452
454
|
return EmptyMonitor[RunType, MonitorType](
|
|
453
455
|
nexus.extract_signal_data_array(monitor).assign_coords(
|
|
454
|
-
position=
|
|
455
|
-
|
|
456
|
+
position=transform.value * sc.vector([0, 0, 0], unit=transform_unit)
|
|
457
|
+
+ offset.to(unit=transform_unit),
|
|
456
458
|
)
|
|
457
459
|
)
|
|
458
460
|
|
|
@@ -2,7 +2,11 @@
|
|
|
2
2
|
# Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
|
|
3
3
|
"""Normalization routines for neutron data reduction."""
|
|
4
4
|
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import enum
|
|
5
8
|
import functools
|
|
9
|
+
import warnings
|
|
6
10
|
|
|
7
11
|
import scipp as sc
|
|
8
12
|
|
|
@@ -14,6 +18,7 @@ def normalize_by_monitor_histogram(
|
|
|
14
18
|
*,
|
|
15
19
|
monitor: sc.DataArray,
|
|
16
20
|
uncertainty_broadcast_mode: UncertaintyBroadcastMode,
|
|
21
|
+
skip_range_check: bool = False,
|
|
17
22
|
) -> sc.DataArray:
|
|
18
23
|
"""Normalize detector data by a normalized histogrammed monitor.
|
|
19
24
|
|
|
@@ -23,10 +28,13 @@ def normalize_by_monitor_histogram(
|
|
|
23
28
|
- For *event* detectors, the monitor values are mapped to the detector
|
|
24
29
|
using :func:`scipp.lookup`. That is, for detector event :math:`d_i`,
|
|
25
30
|
:math:`m_i` is the monitor bin value at the same coordinate.
|
|
26
|
-
- For *histogram* detectors, the monitor is rebinned using
|
|
31
|
+
- For *histogram* detectors, the monitor is generally rebinned using the detector
|
|
27
32
|
binning using :func:`scipp.rebin`. Thus, detector value :math:`d_i` and
|
|
28
33
|
monitor value :math:`m_i` correspond to the same bin.
|
|
29
34
|
|
|
35
|
+
- In case the detector coordinate does not have a dimension in common with the
|
|
36
|
+
monitor, :func:`scipp.lookup` is used as in the event case.
|
|
37
|
+
|
|
30
38
|
In both cases, let :math:`x_i` be the lower bound of monitor bin :math:`i`
|
|
31
39
|
and let :math:`\\Delta x_i = x_{i+1} - x_i` be the width of that bin.
|
|
32
40
|
|
|
@@ -47,6 +55,16 @@ def normalize_by_monitor_histogram(
|
|
|
47
55
|
Must be one-dimensional and have a dimension coordinate, typically "wavelength".
|
|
48
56
|
uncertainty_broadcast_mode:
|
|
49
57
|
Choose how uncertainties of the monitor are broadcast to the sample data.
|
|
58
|
+
skip_range_check:
|
|
59
|
+
If false (default), the detector data must be within the range of the monitor
|
|
60
|
+
coordinate. Set this to true to disable the check.
|
|
61
|
+
The value of out-of-range bins / events is undefined in that case.
|
|
62
|
+
|
|
63
|
+
This is useful when the detector contains data outside the monitor range, and it
|
|
64
|
+
is difficult or impossible to slice the detector without also removing in-range
|
|
65
|
+
data. In this case, the caller can mask those data points and skip the range
|
|
66
|
+
check. ``normalize_by_monitor_histogram`` does not take masks into account when
|
|
67
|
+
checking ranges as that is expensive to implement in a general case.
|
|
50
68
|
|
|
51
69
|
Returns
|
|
52
70
|
-------
|
|
@@ -60,22 +78,45 @@ def normalize_by_monitor_histogram(
|
|
|
60
78
|
normalize_by_monitor_integrated:
|
|
61
79
|
Normalize by an integrated monitor.
|
|
62
80
|
"""
|
|
63
|
-
|
|
81
|
+
if not skip_range_check:
|
|
82
|
+
_check_monitor_range_contains_detector(monitor=monitor, detector=detector)
|
|
64
83
|
|
|
65
84
|
dim = monitor.dim
|
|
66
85
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
86
|
+
match _HistogramNormalizationMode.deduce(detector, dim):
|
|
87
|
+
case _HistogramNormalizationMode.Events:
|
|
88
|
+
detector = _mask_detector_for_norm(detector=detector, monitor=monitor)
|
|
89
|
+
norm = _histogram_monitor_term(
|
|
90
|
+
monitor,
|
|
91
|
+
dim,
|
|
92
|
+
broadcast_to=detector,
|
|
93
|
+
uncertainty_broadcast_mode=uncertainty_broadcast_mode,
|
|
94
|
+
)
|
|
95
|
+
if dim in detector.bins.coords:
|
|
96
|
+
return detector.bins / sc.lookup(norm, dim=dim)
|
|
97
|
+
else:
|
|
98
|
+
return detector / norm
|
|
99
|
+
|
|
100
|
+
case _HistogramNormalizationMode.BinsCommonDim:
|
|
101
|
+
monitor = monitor.rebin({dim: detector.coords[dim]})
|
|
102
|
+
detector = _mask_detector_for_norm(detector=detector, monitor=monitor)
|
|
103
|
+
norm = _histogram_monitor_term(
|
|
104
|
+
monitor,
|
|
105
|
+
dim,
|
|
106
|
+
broadcast_to=detector,
|
|
107
|
+
uncertainty_broadcast_mode=uncertainty_broadcast_mode,
|
|
108
|
+
)
|
|
109
|
+
return detector / norm
|
|
110
|
+
|
|
111
|
+
case _HistogramNormalizationMode.BinsDifferentDim:
|
|
112
|
+
detector = _mask_detector_for_norm(detector=detector, monitor=monitor)
|
|
113
|
+
# No broadcast here because there are no common dims, use lookup instead.
|
|
114
|
+
norm = _histogram_monitor_term(
|
|
115
|
+
monitor,
|
|
116
|
+
dim,
|
|
117
|
+
uncertainty_broadcast_mode=uncertainty_broadcast_mode,
|
|
118
|
+
)
|
|
119
|
+
return detector / sc.lookup(norm)[_compute_bin_centers(detector, dim)]
|
|
79
120
|
|
|
80
121
|
|
|
81
122
|
def normalize_by_monitor_integrated(
|
|
@@ -83,6 +124,7 @@ def normalize_by_monitor_integrated(
|
|
|
83
124
|
*,
|
|
84
125
|
monitor: sc.DataArray,
|
|
85
126
|
uncertainty_broadcast_mode: UncertaintyBroadcastMode,
|
|
127
|
+
skip_range_check: bool = False,
|
|
86
128
|
) -> sc.DataArray:
|
|
87
129
|
"""Normalize detector data by an integrated monitor.
|
|
88
130
|
|
|
@@ -113,6 +155,16 @@ def normalize_by_monitor_integrated(
|
|
|
113
155
|
Must be one-dimensional and have a dimension coordinate, typically "wavelength".
|
|
114
156
|
uncertainty_broadcast_mode:
|
|
115
157
|
Choose how uncertainties of the monitor are broadcast to the sample data.
|
|
158
|
+
skip_range_check:
|
|
159
|
+
If false (default), the detector data must be within the range of the monitor
|
|
160
|
+
coordinate. Set this to true to disable the check.
|
|
161
|
+
The value of out-of-range bins / events is undefined in that case.
|
|
162
|
+
|
|
163
|
+
This is useful when the detector contains data outside the monitor range, and it
|
|
164
|
+
is difficult or impossible to slice the detector without also removing in-range
|
|
165
|
+
data. In this case, the caller can mask those data points and skip the range
|
|
166
|
+
check. ``normalize_by_monitor_histogram`` does not take masks into account when
|
|
167
|
+
checking ranges as that is expensive to implement in a general case.
|
|
116
168
|
|
|
117
169
|
Returns
|
|
118
170
|
-------
|
|
@@ -126,7 +178,8 @@ def normalize_by_monitor_integrated(
|
|
|
126
178
|
normalize_by_monitor_histogram:
|
|
127
179
|
Normalize by a monitor histogram.
|
|
128
180
|
"""
|
|
129
|
-
|
|
181
|
+
if not skip_range_check:
|
|
182
|
+
_check_monitor_range_contains_detector(monitor=monitor, detector=detector)
|
|
130
183
|
detector = _mask_detector_for_norm(detector=detector, monitor=monitor)
|
|
131
184
|
norm = monitor.nansum().data
|
|
132
185
|
norm = broadcast_uncertainties(
|
|
@@ -149,15 +202,15 @@ def _check_monitor_range_contains_detector(
|
|
|
149
202
|
# monitor range that is less than the detector bins which is fine for the events,
|
|
150
203
|
# but would be wrong if the detector was subsequently histogrammed.
|
|
151
204
|
if (det_coord := detector.coords.get(dim)) is not None:
|
|
152
|
-
|
|
153
|
-
hi = det_coord[dim, 1:].nanmax()
|
|
205
|
+
...
|
|
154
206
|
elif (det_coord := detector.bins.coords.get(dim)) is not None:
|
|
155
|
-
|
|
156
|
-
hi = det_coord.nanmax()
|
|
207
|
+
...
|
|
157
208
|
else:
|
|
158
209
|
raise sc.CoordError(
|
|
159
210
|
f"Missing '{dim}' coordinate in detector for monitor normalization."
|
|
160
211
|
)
|
|
212
|
+
lo = det_coord.nanmin()
|
|
213
|
+
hi = det_coord.nanmax()
|
|
161
214
|
|
|
162
215
|
if monitor.coords[dim].min() > lo or monitor.coords[dim].max() < hi:
|
|
163
216
|
raise ValueError(
|
|
@@ -181,13 +234,17 @@ def _mask_detector_for_norm(
|
|
|
181
234
|
if (monitor_mask := _monitor_mask(monitor)) is None:
|
|
182
235
|
return detector
|
|
183
236
|
|
|
184
|
-
if (detector_coord := detector.coords.get(
|
|
237
|
+
if (detector_coord := detector.coords.get(dim)) is not None:
|
|
185
238
|
# Apply the mask to the bins or a dense detector.
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
239
|
+
mask_da = sc.DataArray(monitor_mask, coords={dim: monitor.coords[dim]})
|
|
240
|
+
if dim in detector_coord.dims:
|
|
241
|
+
# Use rebin to reshape the mask to the detector.
|
|
242
|
+
mask = mask_da.rebin({dim: detector_coord}).data != sc.scalar(0, unit=None)
|
|
243
|
+
return detector.assign_masks(_monitor_mask=mask)
|
|
244
|
+
# else: need to use lookup to apply mask at matching coord elements
|
|
245
|
+
return detector.assign_masks(
|
|
246
|
+
_monitor_mask=sc.lookup(mask_da)[_compute_bin_centers(detector, dim)]
|
|
247
|
+
)
|
|
191
248
|
|
|
192
249
|
# else: Apply the mask to the events.
|
|
193
250
|
if dim not in detector.bins.coords:
|
|
@@ -197,7 +254,7 @@ def _mask_detector_for_norm(
|
|
|
197
254
|
event_mask = sc.lookup(
|
|
198
255
|
sc.DataArray(monitor_mask, coords={dim: monitor.coords[dim]})
|
|
199
256
|
)[detector.bins.coords[dim]]
|
|
200
|
-
return detector.bins.assign_masks(
|
|
257
|
+
return detector.bins.assign_masks(_monitor_mask=event_mask)
|
|
201
258
|
|
|
202
259
|
|
|
203
260
|
def _monitor_mask(monitor: sc.DataArray) -> sc.Variable | None:
|
|
@@ -213,3 +270,74 @@ def _monitor_mask(monitor: sc.DataArray) -> sc.Variable | None:
|
|
|
213
270
|
if not masks:
|
|
214
271
|
return None
|
|
215
272
|
return functools.reduce(sc.logical_or, masks)
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
def _histogram_monitor_term(
|
|
276
|
+
monitor: sc.DataArray,
|
|
277
|
+
dim: str,
|
|
278
|
+
*,
|
|
279
|
+
broadcast_to: sc.DataArray | None = None,
|
|
280
|
+
uncertainty_broadcast_mode: UncertaintyBroadcastMode,
|
|
281
|
+
) -> sc.DataArray:
|
|
282
|
+
if not monitor.coords.is_edges(dim, dim):
|
|
283
|
+
raise sc.CoordError(
|
|
284
|
+
f"Monitor coordinage {dim} must be bin-edges for normalization."
|
|
285
|
+
)
|
|
286
|
+
coord = monitor.coords[dim]
|
|
287
|
+
delta_w = sc.DataArray(coord[1:] - coord[:-1], masks=monitor.masks)
|
|
288
|
+
norm = monitor / delta_w
|
|
289
|
+
|
|
290
|
+
if broadcast_to is not None:
|
|
291
|
+
return broadcast_uncertainties(
|
|
292
|
+
norm, prototype=broadcast_to, mode=uncertainty_broadcast_mode
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
match uncertainty_broadcast_mode:
|
|
296
|
+
case UncertaintyBroadcastMode.fail:
|
|
297
|
+
return norm
|
|
298
|
+
case UncertaintyBroadcastMode.drop:
|
|
299
|
+
return sc.values(norm)
|
|
300
|
+
case _:
|
|
301
|
+
warnings.warn(
|
|
302
|
+
"Cannot broadcast uncertainties in this case.",
|
|
303
|
+
UserWarning,
|
|
304
|
+
stacklevel=3,
|
|
305
|
+
)
|
|
306
|
+
return norm
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
class _HistogramNormalizationMode(enum.Enum):
|
|
310
|
+
Events = enum.auto()
|
|
311
|
+
"""Use an event coordinate to lookup monitor values."""
|
|
312
|
+
BinsCommonDim = enum.auto()
|
|
313
|
+
"""Use a bin coordinate which contains the monitor dimension.
|
|
314
|
+
|
|
315
|
+
The coordinate may be multi-dimensional but one dimension matches
|
|
316
|
+
the dimension of the monitor.
|
|
317
|
+
"""
|
|
318
|
+
BinsDifferentDim = enum.auto()
|
|
319
|
+
"""Use a bin coordinate which does not contain the monitor dimension.
|
|
320
|
+
|
|
321
|
+
The coordinate may be multi-dimensions, e.g., in the DREAM powder workflow
|
|
322
|
+
where it has dims (two_theta, dspacing [bin-edges]).
|
|
323
|
+
"""
|
|
324
|
+
|
|
325
|
+
@classmethod
|
|
326
|
+
def deduce(cls, detector: sc.DataArray, dim: str) -> _HistogramNormalizationMode:
|
|
327
|
+
# Use an event-coord when available:
|
|
328
|
+
if detector.bins is not None and dim in detector.bins.coords:
|
|
329
|
+
return _HistogramNormalizationMode.Events
|
|
330
|
+
# else: use a bin-coord.
|
|
331
|
+
|
|
332
|
+
det_coord = detector.coords[dim]
|
|
333
|
+
if dim in det_coord.dims:
|
|
334
|
+
return _HistogramNormalizationMode.BinsCommonDim
|
|
335
|
+
return _HistogramNormalizationMode.BinsDifferentDim
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
def _compute_bin_centers(da: sc.DataArray, name: str) -> sc.Variable:
|
|
339
|
+
coord = da.coords[name]
|
|
340
|
+
for dim in coord.dims:
|
|
341
|
+
if da.coords.is_edges(name, dim):
|
|
342
|
+
coord = sc.midpoints(coord, dim=dim)
|
|
343
|
+
return coord
|
|
@@ -30,6 +30,8 @@ from .types import (
|
|
|
30
30
|
TimeOfFlightLookupTableFilename,
|
|
31
31
|
ToaDetector,
|
|
32
32
|
TofDetector,
|
|
33
|
+
TofLookupTable,
|
|
34
|
+
TofLookupTableFilename,
|
|
33
35
|
TofMonitor,
|
|
34
36
|
)
|
|
35
37
|
from .workflow import GenericTofWorkflow
|
|
@@ -54,6 +56,8 @@ __all__ = [
|
|
|
54
56
|
"TimeResolution",
|
|
55
57
|
"ToaDetector",
|
|
56
58
|
"TofDetector",
|
|
59
|
+
"TofLookupTable",
|
|
60
|
+
"TofLookupTableFilename",
|
|
57
61
|
"TofLookupTableWorkflow",
|
|
58
62
|
"TofMonitor",
|
|
59
63
|
"providers",
|
|
@@ -35,9 +35,9 @@ from .types import (
|
|
|
35
35
|
DetectorLtotal,
|
|
36
36
|
MonitorLtotal,
|
|
37
37
|
PulseStrideOffset,
|
|
38
|
-
TimeOfFlightLookupTable,
|
|
39
38
|
ToaDetector,
|
|
40
39
|
TofDetector,
|
|
40
|
+
TofLookupTable,
|
|
41
41
|
TofMonitor,
|
|
42
42
|
)
|
|
43
43
|
|
|
@@ -96,7 +96,7 @@ class TofInterpolator:
|
|
|
96
96
|
|
|
97
97
|
|
|
98
98
|
def _time_of_flight_data_histogram(
|
|
99
|
-
da: sc.DataArray, lookup:
|
|
99
|
+
da: sc.DataArray, lookup: TofLookupTable, ltotal: sc.Variable
|
|
100
100
|
) -> sc.DataArray:
|
|
101
101
|
# In NeXus, 'time_of_flight' is the canonical name in NXmonitor, but in some files,
|
|
102
102
|
# it may be called 'tof' or 'frame_time'.
|
|
@@ -201,7 +201,7 @@ def _guess_pulse_stride_offset(
|
|
|
201
201
|
|
|
202
202
|
def _prepare_tof_interpolation_inputs(
|
|
203
203
|
da: sc.DataArray,
|
|
204
|
-
lookup:
|
|
204
|
+
lookup: TofLookupTable,
|
|
205
205
|
ltotal: sc.Variable,
|
|
206
206
|
pulse_stride_offset: int | None,
|
|
207
207
|
) -> dict:
|
|
@@ -295,7 +295,7 @@ def _prepare_tof_interpolation_inputs(
|
|
|
295
295
|
|
|
296
296
|
def _time_of_flight_data_events(
|
|
297
297
|
da: sc.DataArray,
|
|
298
|
-
lookup:
|
|
298
|
+
lookup: TofLookupTable,
|
|
299
299
|
ltotal: sc.Variable,
|
|
300
300
|
pulse_stride_offset: int | None,
|
|
301
301
|
) -> sc.DataArray:
|
|
@@ -317,7 +317,19 @@ def _time_of_flight_data_events(
|
|
|
317
317
|
parts = da.bins.constituents
|
|
318
318
|
parts["data"] = tofs
|
|
319
319
|
result = da.bins.assign_coords(tof=sc.bins(**parts, validate_indices=False))
|
|
320
|
-
|
|
320
|
+
out = result.bins.drop_coords("event_time_offset")
|
|
321
|
+
|
|
322
|
+
# The result may still have an 'event_time_zero' dimension (in the case of an
|
|
323
|
+
# event monitor where events were not grouped by pixel).
|
|
324
|
+
if "event_time_zero" in out.dims:
|
|
325
|
+
if ("event_time_zero" in out.coords) and (
|
|
326
|
+
"event_time_zero" not in out.bins.coords
|
|
327
|
+
):
|
|
328
|
+
out.bins.coords["event_time_zero"] = sc.bins_like(
|
|
329
|
+
out, out.coords["event_time_zero"]
|
|
330
|
+
)
|
|
331
|
+
out = out.bins.concat("event_time_zero")
|
|
332
|
+
return out
|
|
321
333
|
|
|
322
334
|
|
|
323
335
|
def detector_ltotal_from_straight_line_approximation(
|
|
@@ -357,6 +369,7 @@ def detector_ltotal_from_straight_line_approximation(
|
|
|
357
369
|
|
|
358
370
|
def monitor_ltotal_from_straight_line_approximation(
|
|
359
371
|
monitor_beamline: EmptyMonitor[RunType, MonitorType],
|
|
372
|
+
source_position: Position[snx.NXsource, RunType],
|
|
360
373
|
) -> MonitorLtotal[RunType, MonitorType]:
|
|
361
374
|
"""
|
|
362
375
|
Compute Ltotal for the monitor.
|
|
@@ -369,7 +382,10 @@ def monitor_ltotal_from_straight_line_approximation(
|
|
|
369
382
|
Beamline data for the monitor that contains the positions necessary to compute
|
|
370
383
|
the straight-line approximation to Ltotal (source and monitor positions).
|
|
371
384
|
"""
|
|
372
|
-
graph =
|
|
385
|
+
graph = {
|
|
386
|
+
**scn.conversion.graph.beamline.beamline(scatter=False),
|
|
387
|
+
'source_position': lambda: source_position,
|
|
388
|
+
}
|
|
373
389
|
return MonitorLtotal[RunType, MonitorType](
|
|
374
390
|
monitor_beamline.transform_coords(
|
|
375
391
|
"Ltotal", graph=graph, keep_intermediate=False
|
|
@@ -379,7 +395,7 @@ def monitor_ltotal_from_straight_line_approximation(
|
|
|
379
395
|
|
|
380
396
|
def _compute_tof_data(
|
|
381
397
|
da: sc.DataArray,
|
|
382
|
-
lookup:
|
|
398
|
+
lookup: TofLookupTable,
|
|
383
399
|
ltotal: sc.Variable,
|
|
384
400
|
pulse_stride_offset: int,
|
|
385
401
|
) -> sc.DataArray:
|
|
@@ -397,7 +413,7 @@ def _compute_tof_data(
|
|
|
397
413
|
|
|
398
414
|
def detector_time_of_flight_data(
|
|
399
415
|
detector_data: RawDetector[RunType],
|
|
400
|
-
lookup:
|
|
416
|
+
lookup: TofLookupTable,
|
|
401
417
|
ltotal: DetectorLtotal[RunType],
|
|
402
418
|
pulse_stride_offset: PulseStrideOffset,
|
|
403
419
|
) -> TofDetector[RunType]:
|
|
@@ -431,7 +447,7 @@ def detector_time_of_flight_data(
|
|
|
431
447
|
|
|
432
448
|
def monitor_time_of_flight_data(
|
|
433
449
|
monitor_data: RawMonitor[RunType, MonitorType],
|
|
434
|
-
lookup:
|
|
450
|
+
lookup: TofLookupTable,
|
|
435
451
|
ltotal: MonitorLtotal[RunType, MonitorType],
|
|
436
452
|
pulse_stride_offset: PulseStrideOffset,
|
|
437
453
|
) -> TofMonitor[RunType, MonitorType]:
|
|
@@ -465,7 +481,7 @@ def monitor_time_of_flight_data(
|
|
|
465
481
|
|
|
466
482
|
def detector_time_of_arrival_data(
|
|
467
483
|
detector_data: RawDetector[RunType],
|
|
468
|
-
lookup:
|
|
484
|
+
lookup: TofLookupTable,
|
|
469
485
|
ltotal: DetectorLtotal[RunType],
|
|
470
486
|
pulse_stride_offset: PulseStrideOffset,
|
|
471
487
|
) -> ToaDetector[RunType]:
|
|
@@ -13,7 +13,7 @@ import sciline as sl
|
|
|
13
13
|
import scipp as sc
|
|
14
14
|
|
|
15
15
|
from ..nexus.types import AnyRun, DiskChoppers
|
|
16
|
-
from .types import
|
|
16
|
+
from .types import TofLookupTable
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
@dataclass
|
|
@@ -230,7 +230,7 @@ def make_tof_lookup_table(
|
|
|
230
230
|
pulse_period: PulsePeriod,
|
|
231
231
|
pulse_stride: PulseStride,
|
|
232
232
|
error_threshold: LookupTableRelativeErrorThreshold,
|
|
233
|
-
) ->
|
|
233
|
+
) -> TofLookupTable:
|
|
234
234
|
"""
|
|
235
235
|
Compute a lookup table for time-of-flight as a function of distance and
|
|
236
236
|
time-of-arrival.
|
|
@@ -372,7 +372,7 @@ def make_tof_lookup_table(
|
|
|
372
372
|
# In-place masking for better performance
|
|
373
373
|
_mask_large_uncertainty(table, error_threshold)
|
|
374
374
|
|
|
375
|
-
return
|
|
375
|
+
return TofLookupTable(
|
|
376
376
|
array=table,
|
|
377
377
|
pulse_period=pulse_period,
|
|
378
378
|
pulse_stride=pulse_stride,
|
|
@@ -398,13 +398,13 @@ def simulate_chopper_cascade_using_tof(
|
|
|
398
398
|
) -> SimulationResults:
|
|
399
399
|
"""
|
|
400
400
|
Simulate a pulse of neutrons propagating through a chopper cascade using the
|
|
401
|
-
``tof`` package (https://
|
|
401
|
+
``tof`` package (https://scipp.github.io/tof).
|
|
402
402
|
|
|
403
403
|
Parameters
|
|
404
404
|
----------
|
|
405
405
|
choppers:
|
|
406
406
|
A dict of DiskChopper objects representing the choppers in the beamline. See
|
|
407
|
-
https://scipp.github.io/scippneutron/user-guide/chopper/processing-nexus-choppers.html
|
|
407
|
+
https://scipp.github.io/scippneutron/user-guide/chopper/processing-nexus-choppers.html
|
|
408
408
|
for more information.
|
|
409
409
|
source_position:
|
|
410
410
|
A scalar variable with ``dtype=vector3`` that defines the source position.
|