essreduce 24.12.0__py3-none-any.whl → 25.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ess/reduce/__init__.py +2 -2
- ess/reduce/live/raw.py +14 -1
- ess/reduce/nexus/_nexus_loader.py +38 -2
- ess/reduce/nexus/types.py +25 -0
- ess/reduce/time_of_flight/__init__.py +59 -0
- ess/reduce/time_of_flight/fakes.py +240 -0
- ess/reduce/time_of_flight/simulation.py +74 -0
- ess/reduce/time_of_flight/to_events.py +104 -0
- ess/reduce/time_of_flight/toa_to_tof.py +541 -0
- ess/reduce/time_of_flight/types.py +176 -0
- ess/reduce/ui.py +3 -1
- ess/reduce/widgets/__init__.py +2 -0
- ess/reduce/widgets/_base.py +85 -21
- ess/reduce/widgets/_optional_widget.py +24 -0
- ess/reduce/widgets/_spinner.py +100 -0
- ess/reduce/widgets/_switchable_widget.py +15 -0
- {essreduce-24.12.0.dist-info → essreduce-25.1.0.dist-info}/METADATA +5 -3
- essreduce-25.1.0.dist-info/RECORD +43 -0
- {essreduce-24.12.0.dist-info → essreduce-25.1.0.dist-info}/WHEEL +1 -1
- essreduce-24.12.0.dist-info/RECORD +0 -36
- {essreduce-24.12.0.dist-info → essreduce-25.1.0.dist-info}/LICENSE +0 -0
- {essreduce-24.12.0.dist-info → essreduce-25.1.0.dist-info}/entry_points.txt +0 -0
- {essreduce-24.12.0.dist-info → essreduce-25.1.0.dist-info}/top_level.txt +0 -0
ess/reduce/__init__.py
CHANGED
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
|
|
5
5
|
import importlib.metadata
|
|
6
6
|
|
|
7
|
-
from . import nexus, uncertainty
|
|
7
|
+
from . import nexus, uncertainty, time_of_flight
|
|
8
8
|
|
|
9
9
|
try:
|
|
10
10
|
__version__ = importlib.metadata.version("essreduce")
|
|
@@ -13,4 +13,4 @@ except importlib.metadata.PackageNotFoundError:
|
|
|
13
13
|
|
|
14
14
|
del importlib
|
|
15
15
|
|
|
16
|
-
__all__ = [
|
|
16
|
+
__all__ = ["nexus", "uncertainty", "time_of_flight"]
|
ess/reduce/live/raw.py
CHANGED
|
@@ -111,6 +111,19 @@ class LogicalView:
|
|
|
111
111
|
Logical view of a multi-dimensional detector.
|
|
112
112
|
|
|
113
113
|
Instances can be used as a "projection" function for a detector view.
|
|
114
|
+
|
|
115
|
+
Parameters
|
|
116
|
+
----------
|
|
117
|
+
fold:
|
|
118
|
+
Dimensions to fold. This is useful is the raw data has a single dimension that
|
|
119
|
+
corresponds to multiple dimensions in the logical view.
|
|
120
|
+
transpose:
|
|
121
|
+
Dimensions to transpose. This is useful for reordering dimensions.
|
|
122
|
+
select:
|
|
123
|
+
Dimensions with associated index to select from the data. This extracts a slice
|
|
124
|
+
of the data for each given dimension.
|
|
125
|
+
flatten:
|
|
126
|
+
Dimensions to flatten.
|
|
114
127
|
"""
|
|
115
128
|
|
|
116
129
|
fold: dict[str, int] | None = None
|
|
@@ -281,7 +294,7 @@ class RollingDetectorView(Detector):
|
|
|
281
294
|
noise_replica_count = 0
|
|
282
295
|
else:
|
|
283
296
|
noise_replica_count = 4
|
|
284
|
-
wf = GenericNeXusWorkflow()
|
|
297
|
+
wf = GenericNeXusWorkflow(run_types=[SampleRun], monitor_types=[])
|
|
285
298
|
wf[RollingDetectorViewWindow] = window
|
|
286
299
|
if isinstance(projection, LogicalView):
|
|
287
300
|
wf[LogicalView] = projection
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
|
|
4
4
|
"""NeXus loaders."""
|
|
5
5
|
|
|
6
|
+
import errno
|
|
6
7
|
from collections.abc import Generator, Mapping
|
|
7
8
|
from contextlib import AbstractContextManager, contextmanager, nullcontext
|
|
8
9
|
from dataclasses import dataclass
|
|
@@ -91,6 +92,8 @@ def compute_component_position(dg: sc.DataGroup) -> sc.DataGroup:
|
|
|
91
92
|
def _open_nexus_file(
|
|
92
93
|
file_path: FilePath | NeXusFile | NeXusGroup,
|
|
93
94
|
definitions: Mapping | None | NoNewDefinitionsType = NoNewDefinitions,
|
|
95
|
+
*,
|
|
96
|
+
locking: bool | None = None,
|
|
94
97
|
) -> AbstractContextManager[snx.Group]:
|
|
95
98
|
if isinstance(file_path, getattr(NeXusGroup, '__supertype__', type(None))):
|
|
96
99
|
if (
|
|
@@ -101,9 +104,42 @@ def _open_nexus_file(
|
|
|
101
104
|
"Cannot apply new definitions to open nexus file or nexus group."
|
|
102
105
|
)
|
|
103
106
|
return nullcontext(file_path)
|
|
107
|
+
|
|
108
|
+
try:
|
|
109
|
+
return _open_nexus_file_from_path(file_path, definitions, locking=locking)
|
|
110
|
+
except OSError as err:
|
|
111
|
+
if err.errno == errno.EROFS:
|
|
112
|
+
# Failed to open because the filesystem is read-only.
|
|
113
|
+
# (According to https://www.ioplex.com/%7Emiallen/errcmpp.html
|
|
114
|
+
# this error code is universal.)
|
|
115
|
+
#
|
|
116
|
+
# On ESS machines, this happens for network filesystems of data that was
|
|
117
|
+
# ingested into SciCat, including raw data.
|
|
118
|
+
# In this case, it is safe to open the file without locking because:
|
|
119
|
+
# - For raw files, they were written on a separate machine and are synced
|
|
120
|
+
# with the one running reduction software. So there cannot be concurrent
|
|
121
|
+
# write and read accesses to the same file on the same filesystem.
|
|
122
|
+
# The ground truth on the filesystem used by the file writer is protected
|
|
123
|
+
# and cannot be corrupted by our reader.
|
|
124
|
+
# - For processed data, the file was copied to the read-only filesystem.
|
|
125
|
+
# So the copy we are opening was not written by HDF5 directly and thus
|
|
126
|
+
# locking has no effect anyway.
|
|
127
|
+
#
|
|
128
|
+
# When running on user machines, disabling locking can potentially corrupt
|
|
129
|
+
# files. But the risk is minimal because very few users will have read-only
|
|
130
|
+
# filesystems and do concurrent reads and writes.
|
|
131
|
+
return _open_nexus_file_from_path(file_path, definitions, locking=False)
|
|
132
|
+
raise
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def _open_nexus_file_from_path(
|
|
136
|
+
file_path: FilePath,
|
|
137
|
+
definitions: Mapping | None | NoNewDefinitionsType,
|
|
138
|
+
**kwargs: object,
|
|
139
|
+
) -> AbstractContextManager[snx.Group]:
|
|
104
140
|
if definitions is NoNewDefinitions:
|
|
105
|
-
return snx.File(file_path)
|
|
106
|
-
return snx.File(file_path, definitions=definitions)
|
|
141
|
+
return snx.File(file_path, **kwargs)
|
|
142
|
+
return snx.File(file_path, definitions=definitions, **kwargs)
|
|
107
143
|
|
|
108
144
|
|
|
109
145
|
@contextmanager
|
ess/reduce/nexus/types.py
CHANGED
|
@@ -107,6 +107,16 @@ IncidentMonitor = NewType('IncidentMonitor', int)
|
|
|
107
107
|
"""Incident monitor"""
|
|
108
108
|
TransmissionMonitor = NewType('TransmissionMonitor', int)
|
|
109
109
|
"""Transmission monitor"""
|
|
110
|
+
FrameMonitor0 = NewType('FrameMonitor', int)
|
|
111
|
+
"""Frame monitor number 0"""
|
|
112
|
+
FrameMonitor1 = NewType('FrameMonitor', int)
|
|
113
|
+
"""Frame monitor number 1"""
|
|
114
|
+
FrameMonitor2 = NewType('FrameMonitor', int)
|
|
115
|
+
"""Frame monitor number 2"""
|
|
116
|
+
FrameMonitor3 = NewType('FrameMonitor', int)
|
|
117
|
+
"""Frame monitor number 3"""
|
|
118
|
+
CaveMonitor = NewType('CaveMonitor', int)
|
|
119
|
+
"""A monitor located in the instrument cave"""
|
|
110
120
|
MonitorType = TypeVar(
|
|
111
121
|
'MonitorType',
|
|
112
122
|
Monitor1,
|
|
@@ -117,6 +127,11 @@ MonitorType = TypeVar(
|
|
|
117
127
|
Monitor6,
|
|
118
128
|
IncidentMonitor,
|
|
119
129
|
TransmissionMonitor,
|
|
130
|
+
FrameMonitor0,
|
|
131
|
+
FrameMonitor1,
|
|
132
|
+
FrameMonitor2,
|
|
133
|
+
FrameMonitor3,
|
|
134
|
+
CaveMonitor,
|
|
120
135
|
)
|
|
121
136
|
"""TypeVar for specifying what monitor some data belongs to.
|
|
122
137
|
|
|
@@ -130,6 +145,11 @@ Possible values:
|
|
|
130
145
|
- :class:`Monitor6`
|
|
131
146
|
- :class:`IncidentMonitor`
|
|
132
147
|
- :class:`TransmissionMonitor`
|
|
148
|
+
- :class:`FrameMonitor0`
|
|
149
|
+
- :class:`FrameMonitor1`
|
|
150
|
+
- :class:`FrameMonitor2`
|
|
151
|
+
- :class:`FrameMonitor3`
|
|
152
|
+
- :class:`CaveMonitor`
|
|
133
153
|
"""
|
|
134
154
|
|
|
135
155
|
|
|
@@ -148,6 +168,11 @@ Component = TypeVar(
|
|
|
148
168
|
Monitor6,
|
|
149
169
|
IncidentMonitor,
|
|
150
170
|
TransmissionMonitor,
|
|
171
|
+
FrameMonitor0,
|
|
172
|
+
FrameMonitor1,
|
|
173
|
+
FrameMonitor2,
|
|
174
|
+
FrameMonitor3,
|
|
175
|
+
CaveMonitor,
|
|
151
176
|
)
|
|
152
177
|
UniqueComponent = TypeVar('UniqueComponent', snx.NXsample, snx.NXsource)
|
|
153
178
|
"""Components that can be identified by their type as there will only be one."""
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
2
|
+
# Copyright (c) 2023 Scipp contributors (https://github.com/scipp)
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
Utilities for computing real neutron time-of-flight from chopper settings and
|
|
6
|
+
neutron time-of-arrival at the detectors.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from .toa_to_tof import default_parameters, resample_tof_data, providers, TofWorkflow
|
|
10
|
+
from .simulation import simulate_beamline
|
|
11
|
+
from .types import (
|
|
12
|
+
DistanceResolution,
|
|
13
|
+
FrameFoldedTimeOfArrival,
|
|
14
|
+
FramePeriod,
|
|
15
|
+
LookupTableRelativeErrorThreshold,
|
|
16
|
+
Ltotal,
|
|
17
|
+
LtotalRange,
|
|
18
|
+
MaskedTimeOfFlightLookupTable,
|
|
19
|
+
PivotTimeAtDetector,
|
|
20
|
+
PulsePeriod,
|
|
21
|
+
PulseStride,
|
|
22
|
+
PulseStrideOffset,
|
|
23
|
+
RawData,
|
|
24
|
+
ResampledTofData,
|
|
25
|
+
SimulationResults,
|
|
26
|
+
TimeOfArrivalMinusPivotTimeModuloPeriod,
|
|
27
|
+
TimeOfFlightLookupTable,
|
|
28
|
+
TofData,
|
|
29
|
+
UnwrappedTimeOfArrival,
|
|
30
|
+
UnwrappedTimeOfArrivalMinusPivotTime,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
__all__ = [
|
|
35
|
+
"DistanceResolution",
|
|
36
|
+
"FrameFoldedTimeOfArrival",
|
|
37
|
+
"FramePeriod",
|
|
38
|
+
"LookupTableRelativeErrorThreshold",
|
|
39
|
+
"Ltotal",
|
|
40
|
+
"LtotalRange",
|
|
41
|
+
"MaskedTimeOfFlightLookupTable",
|
|
42
|
+
"PivotTimeAtDetector",
|
|
43
|
+
"PulsePeriod",
|
|
44
|
+
"PulseStride",
|
|
45
|
+
"PulseStrideOffset",
|
|
46
|
+
"RawData",
|
|
47
|
+
"ResampledTofData",
|
|
48
|
+
"SimulationResults",
|
|
49
|
+
"TimeOfArrivalMinusPivotTimeModuloPeriod",
|
|
50
|
+
"TimeOfFlightLookupTable",
|
|
51
|
+
"TofData",
|
|
52
|
+
"TofWorkflow",
|
|
53
|
+
"UnwrappedTimeOfArrival",
|
|
54
|
+
"UnwrappedTimeOfArrivalMinusPivotTime",
|
|
55
|
+
"default_parameters",
|
|
56
|
+
"providers",
|
|
57
|
+
"resample_tof_data",
|
|
58
|
+
"simulate_beamline",
|
|
59
|
+
]
|
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
2
|
+
# Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
|
|
3
|
+
"""
|
|
4
|
+
A fake time-of-flight neutron beamline for documentation and testing.
|
|
5
|
+
|
|
6
|
+
This provides detector event data in a structure as typically provided in a NeXus file,
|
|
7
|
+
with event_time_offset and event_time_zero information.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from collections.abc import Callable
|
|
11
|
+
|
|
12
|
+
import numpy as np
|
|
13
|
+
import scipp as sc
|
|
14
|
+
from scippneutron.chopper import DiskChopper
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class FakeBeamline:
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
choppers: dict[str, DiskChopper],
|
|
21
|
+
monitors: dict[str, sc.Variable],
|
|
22
|
+
run_length: sc.Variable,
|
|
23
|
+
events_per_pulse: int = 200000,
|
|
24
|
+
source: Callable | None = None,
|
|
25
|
+
):
|
|
26
|
+
import math
|
|
27
|
+
|
|
28
|
+
import tof as tof_pkg
|
|
29
|
+
from tof.facilities.ess_pulse import pulse
|
|
30
|
+
|
|
31
|
+
self.frequency = pulse.frequency
|
|
32
|
+
self.npulses = math.ceil((run_length * self.frequency).to(unit="").value)
|
|
33
|
+
self.events_per_pulse = events_per_pulse
|
|
34
|
+
|
|
35
|
+
# Create a source
|
|
36
|
+
if source is None:
|
|
37
|
+
self.source = tof_pkg.Source(
|
|
38
|
+
facility="ess", neutrons=self.events_per_pulse, pulses=self.npulses
|
|
39
|
+
)
|
|
40
|
+
else:
|
|
41
|
+
self.source = source(pulses=self.npulses)
|
|
42
|
+
|
|
43
|
+
# Convert the choppers to tof.Chopper
|
|
44
|
+
self.choppers = [
|
|
45
|
+
tof_pkg.Chopper(
|
|
46
|
+
frequency=abs(ch.frequency),
|
|
47
|
+
direction=tof_pkg.AntiClockwise
|
|
48
|
+
if (ch.frequency.value > 0.0)
|
|
49
|
+
else tof_pkg.Clockwise,
|
|
50
|
+
open=ch.slit_begin,
|
|
51
|
+
close=ch.slit_end,
|
|
52
|
+
phase=abs(ch.phase),
|
|
53
|
+
distance=ch.axle_position.fields.z,
|
|
54
|
+
name=name,
|
|
55
|
+
)
|
|
56
|
+
for name, ch in choppers.items()
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
# Add detectors
|
|
60
|
+
self.monitors = [
|
|
61
|
+
tof_pkg.Detector(distance=distance, name=key)
|
|
62
|
+
for key, distance in monitors.items()
|
|
63
|
+
]
|
|
64
|
+
|
|
65
|
+
# Propagate the neutrons
|
|
66
|
+
self.model = tof_pkg.Model(
|
|
67
|
+
source=self.source, choppers=self.choppers, detectors=self.monitors
|
|
68
|
+
)
|
|
69
|
+
self.model_result = self.model.run()
|
|
70
|
+
|
|
71
|
+
def get_monitor(self, name: str) -> sc.DataGroup:
|
|
72
|
+
# Create some fake pulse time zero
|
|
73
|
+
start = sc.datetime("2024-01-01T12:00:00.000000")
|
|
74
|
+
period = sc.reciprocal(self.frequency)
|
|
75
|
+
|
|
76
|
+
detector = self.model_result.detectors[name]
|
|
77
|
+
raw_data = detector.data.flatten(to="event")
|
|
78
|
+
# Select only the neutrons that make it to the detector
|
|
79
|
+
raw_data = raw_data[~raw_data.masks["blocked_by_others"]].copy()
|
|
80
|
+
raw_data.coords["Ltotal"] = detector.distance
|
|
81
|
+
|
|
82
|
+
# Format the data in a way that resembles data loaded from NeXus
|
|
83
|
+
event_data = raw_data.copy(deep=False)
|
|
84
|
+
dt = period.to(unit="us")
|
|
85
|
+
event_time_zero = (dt * (event_data.coords["toa"] // dt)).to(dtype=int) + start
|
|
86
|
+
raw_data.coords["event_time_zero"] = event_time_zero
|
|
87
|
+
event_data.coords["event_time_zero"] = event_time_zero
|
|
88
|
+
event_data.coords["event_time_offset"] = (
|
|
89
|
+
event_data.coords.pop("toa").to(unit="s") % period
|
|
90
|
+
)
|
|
91
|
+
del event_data.coords["tof"]
|
|
92
|
+
del event_data.coords["speed"]
|
|
93
|
+
del event_data.coords["time"]
|
|
94
|
+
del event_data.coords["wavelength"]
|
|
95
|
+
|
|
96
|
+
return (
|
|
97
|
+
event_data.group("event_time_zero").rename_dims(event_time_zero="pulse"),
|
|
98
|
+
raw_data.group("event_time_zero").rename_dims(event_time_zero="pulse"),
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
wfm1_chopper = DiskChopper(
|
|
103
|
+
frequency=sc.scalar(-70.0, unit="Hz"),
|
|
104
|
+
beam_position=sc.scalar(0.0, unit="deg"),
|
|
105
|
+
phase=sc.scalar(-47.10, unit="deg"),
|
|
106
|
+
axle_position=sc.vector(value=[0, 0, 6.6], unit="m"),
|
|
107
|
+
slit_begin=sc.array(
|
|
108
|
+
dims=["cutout"],
|
|
109
|
+
values=np.array([83.71, 140.49, 193.26, 242.32, 287.91, 330.3]) + 15.0,
|
|
110
|
+
unit="deg",
|
|
111
|
+
),
|
|
112
|
+
slit_end=sc.array(
|
|
113
|
+
dims=["cutout"],
|
|
114
|
+
values=np.array([94.7, 155.79, 212.56, 265.33, 314.37, 360.0]) + 15.0,
|
|
115
|
+
unit="deg",
|
|
116
|
+
),
|
|
117
|
+
slit_height=sc.scalar(10.0, unit="cm"),
|
|
118
|
+
radius=sc.scalar(30.0, unit="cm"),
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
wfm2_chopper = DiskChopper(
|
|
122
|
+
frequency=sc.scalar(-70.0, unit="Hz"),
|
|
123
|
+
beam_position=sc.scalar(0.0, unit="deg"),
|
|
124
|
+
phase=sc.scalar(-76.76, unit="deg"),
|
|
125
|
+
axle_position=sc.vector(value=[0, 0, 7.1], unit="m"),
|
|
126
|
+
slit_begin=sc.array(
|
|
127
|
+
dims=["cutout"],
|
|
128
|
+
values=np.array([65.04, 126.1, 182.88, 235.67, 284.73, 330.32]) + 15.0,
|
|
129
|
+
unit="deg",
|
|
130
|
+
),
|
|
131
|
+
slit_end=sc.array(
|
|
132
|
+
dims=["cutout"],
|
|
133
|
+
values=np.array([76.03, 141.4, 202.18, 254.97, 307.74, 360.0]) + 15.0,
|
|
134
|
+
unit="deg",
|
|
135
|
+
),
|
|
136
|
+
slit_height=sc.scalar(10.0, unit="cm"),
|
|
137
|
+
radius=sc.scalar(30.0, unit="cm"),
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
foc1_chopper = DiskChopper(
|
|
141
|
+
frequency=sc.scalar(-56.0, unit="Hz"),
|
|
142
|
+
beam_position=sc.scalar(0.0, unit="deg"),
|
|
143
|
+
phase=sc.scalar(-62.40, unit="deg"),
|
|
144
|
+
axle_position=sc.vector(value=[0, 0, 8.8], unit="m"),
|
|
145
|
+
slit_begin=sc.array(
|
|
146
|
+
dims=["cutout"],
|
|
147
|
+
values=np.array([74.6, 139.6, 194.3, 245.3, 294.8, 347.2]),
|
|
148
|
+
unit="deg",
|
|
149
|
+
),
|
|
150
|
+
slit_end=sc.array(
|
|
151
|
+
dims=["cutout"],
|
|
152
|
+
values=np.array([95.2, 162.8, 216.1, 263.1, 310.5, 371.6]),
|
|
153
|
+
unit="deg",
|
|
154
|
+
),
|
|
155
|
+
slit_height=sc.scalar(10.0, unit="cm"),
|
|
156
|
+
radius=sc.scalar(30.0, unit="cm"),
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
foc2_chopper = DiskChopper(
|
|
160
|
+
frequency=sc.scalar(-28.0, unit="Hz"),
|
|
161
|
+
beam_position=sc.scalar(0.0, unit="deg"),
|
|
162
|
+
phase=sc.scalar(-12.27, unit="deg"),
|
|
163
|
+
axle_position=sc.vector(value=[0, 0, 15.9], unit="m"),
|
|
164
|
+
slit_begin=sc.array(
|
|
165
|
+
dims=["cutout"],
|
|
166
|
+
values=np.array([98.0, 154.0, 206.8, 255.0, 299.0, 344.65]),
|
|
167
|
+
unit="deg",
|
|
168
|
+
),
|
|
169
|
+
slit_end=sc.array(
|
|
170
|
+
dims=["cutout"],
|
|
171
|
+
values=np.array([134.6, 190.06, 237.01, 280.88, 323.56, 373.76]),
|
|
172
|
+
unit="deg",
|
|
173
|
+
),
|
|
174
|
+
slit_height=sc.scalar(10.0, unit="cm"),
|
|
175
|
+
radius=sc.scalar(30.0, unit="cm"),
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
pol_chopper = DiskChopper(
|
|
179
|
+
frequency=sc.scalar(-14.0, unit="Hz"),
|
|
180
|
+
beam_position=sc.scalar(0.0, unit="deg"),
|
|
181
|
+
phase=sc.scalar(0.0, unit="deg"),
|
|
182
|
+
axle_position=sc.vector(value=[0, 0, 17.0], unit="m"),
|
|
183
|
+
slit_begin=sc.array(
|
|
184
|
+
dims=["cutout"],
|
|
185
|
+
values=np.array([40.0]),
|
|
186
|
+
unit="deg",
|
|
187
|
+
),
|
|
188
|
+
slit_end=sc.array(
|
|
189
|
+
dims=["cutout"],
|
|
190
|
+
values=np.array([240.0]),
|
|
191
|
+
unit="deg",
|
|
192
|
+
),
|
|
193
|
+
slit_height=sc.scalar(10.0, unit="cm"),
|
|
194
|
+
radius=sc.scalar(30.0, unit="cm"),
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
pulse_skipping = DiskChopper(
|
|
198
|
+
frequency=sc.scalar(-7.0, unit="Hz"),
|
|
199
|
+
beam_position=sc.scalar(0.0, unit="deg"),
|
|
200
|
+
phase=sc.scalar(0.0, unit="deg"),
|
|
201
|
+
axle_position=sc.vector(value=[0, 0, 30.0], unit="m"),
|
|
202
|
+
slit_begin=sc.array(
|
|
203
|
+
dims=["cutout"],
|
|
204
|
+
values=np.array([40.0]),
|
|
205
|
+
unit="deg",
|
|
206
|
+
),
|
|
207
|
+
slit_end=sc.array(
|
|
208
|
+
dims=["cutout"],
|
|
209
|
+
values=np.array([140.0]),
|
|
210
|
+
unit="deg",
|
|
211
|
+
),
|
|
212
|
+
slit_height=sc.scalar(10.0, unit="cm"),
|
|
213
|
+
radius=sc.scalar(30.0, unit="cm"),
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def wfm_choppers():
|
|
218
|
+
return {
|
|
219
|
+
"wfm1": wfm1_chopper,
|
|
220
|
+
"wfm2": wfm2_chopper,
|
|
221
|
+
"foc1": foc1_chopper,
|
|
222
|
+
"foc2": foc2_chopper,
|
|
223
|
+
"pol": pol_chopper,
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def psc_choppers():
|
|
228
|
+
return {
|
|
229
|
+
name: DiskChopper(
|
|
230
|
+
frequency=ch.frequency,
|
|
231
|
+
beam_position=ch.beam_position,
|
|
232
|
+
phase=ch.phase,
|
|
233
|
+
axle_position=ch.axle_position,
|
|
234
|
+
slit_begin=ch.slit_begin[0:1],
|
|
235
|
+
slit_end=ch.slit_end[0:1],
|
|
236
|
+
slit_height=ch.slit_height[0:1],
|
|
237
|
+
radius=ch.radius,
|
|
238
|
+
)
|
|
239
|
+
for name, ch in wfm_choppers().items()
|
|
240
|
+
}
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
2
|
+
# Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
|
|
3
|
+
from collections.abc import Mapping
|
|
4
|
+
|
|
5
|
+
import scipp as sc
|
|
6
|
+
from scippneutron.chopper import DiskChopper
|
|
7
|
+
|
|
8
|
+
from .types import SimulationResults
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def simulate_beamline(
|
|
12
|
+
choppers: Mapping[str, DiskChopper],
|
|
13
|
+
neutrons: int = 1_000_000,
|
|
14
|
+
seed: int | None = None,
|
|
15
|
+
facility: str = 'ess',
|
|
16
|
+
) -> SimulationResults:
|
|
17
|
+
"""
|
|
18
|
+
Simulate a pulse of neutrons propagating through a chopper cascade using the
|
|
19
|
+
``tof`` package (https://tof.readthedocs.io).
|
|
20
|
+
|
|
21
|
+
Parameters
|
|
22
|
+
----------
|
|
23
|
+
choppers:
|
|
24
|
+
A dict of DiskChopper objects representing the choppers in the beamline. See
|
|
25
|
+
https://scipp.github.io/scippneutron/user-guide/chopper/processing-nexus-choppers.html#Build-DiskChopper
|
|
26
|
+
for more information.
|
|
27
|
+
neutrons:
|
|
28
|
+
Number of neutrons to simulate.
|
|
29
|
+
seed:
|
|
30
|
+
Seed for the random number generator used in the simulation.
|
|
31
|
+
facility:
|
|
32
|
+
Facility where the experiment is performed.
|
|
33
|
+
"""
|
|
34
|
+
import tof
|
|
35
|
+
|
|
36
|
+
tof_choppers = [
|
|
37
|
+
tof.Chopper(
|
|
38
|
+
frequency=abs(ch.frequency),
|
|
39
|
+
direction=tof.AntiClockwise
|
|
40
|
+
if (ch.frequency.value > 0.0)
|
|
41
|
+
else tof.Clockwise,
|
|
42
|
+
open=ch.slit_begin,
|
|
43
|
+
close=ch.slit_end,
|
|
44
|
+
phase=abs(ch.phase),
|
|
45
|
+
distance=ch.axle_position.fields.z,
|
|
46
|
+
name=name,
|
|
47
|
+
)
|
|
48
|
+
for name, ch in choppers.items()
|
|
49
|
+
]
|
|
50
|
+
source = tof.Source(facility=facility, neutrons=neutrons, seed=seed)
|
|
51
|
+
if not tof_choppers:
|
|
52
|
+
events = source.data.squeeze()
|
|
53
|
+
return SimulationResults(
|
|
54
|
+
time_of_arrival=events.coords["time"],
|
|
55
|
+
speed=events.coords["speed"],
|
|
56
|
+
wavelength=events.coords["wavelength"],
|
|
57
|
+
weight=events.data,
|
|
58
|
+
distance=0.0 * sc.units.m,
|
|
59
|
+
)
|
|
60
|
+
model = tof.Model(source=source, choppers=tof_choppers)
|
|
61
|
+
results = model.run()
|
|
62
|
+
# Find name of the furthest chopper in tof_choppers
|
|
63
|
+
furthest_chopper = max(tof_choppers, key=lambda c: c.distance)
|
|
64
|
+
events = results[furthest_chopper.name].data.squeeze()
|
|
65
|
+
events = events[
|
|
66
|
+
~(events.masks["blocked_by_others"] | events.masks["blocked_by_me"])
|
|
67
|
+
]
|
|
68
|
+
return SimulationResults(
|
|
69
|
+
time_of_arrival=events.coords["toa"],
|
|
70
|
+
speed=events.coords["speed"],
|
|
71
|
+
wavelength=events.coords["wavelength"],
|
|
72
|
+
weight=events.data,
|
|
73
|
+
distance=furthest_chopper.distance,
|
|
74
|
+
)
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
2
|
+
# Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
|
|
3
|
+
|
|
4
|
+
from functools import reduce
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
import scipp as sc
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def to_events(
|
|
11
|
+
da: sc.DataArray, event_dim: str, events_per_bin: int = 500
|
|
12
|
+
) -> sc.DataArray:
|
|
13
|
+
"""
|
|
14
|
+
Convert a histogrammed data array to an event list.
|
|
15
|
+
The generated events have a uniform distribution within each bin.
|
|
16
|
+
Each dimension with a bin-edge coordinate is converted to an event coordinate.
|
|
17
|
+
The contract is that if we re-histogram the event list with the same bin edges,
|
|
18
|
+
we should get the original counts back.
|
|
19
|
+
Masks on non-bin-edge dimensions are preserved.
|
|
20
|
+
If there are masks on bin-edge dimensions, the masked values are zeroed out in the
|
|
21
|
+
original data before the conversion to events.
|
|
22
|
+
|
|
23
|
+
Parameters
|
|
24
|
+
----------
|
|
25
|
+
da:
|
|
26
|
+
DataArray to convert to events.
|
|
27
|
+
event_dim:
|
|
28
|
+
Name of the new event dimension.
|
|
29
|
+
events_per_bin:
|
|
30
|
+
Number of events to generate per bin.
|
|
31
|
+
"""
|
|
32
|
+
if da.bins is not None:
|
|
33
|
+
raise ValueError("Cannot convert a binned DataArray to events.")
|
|
34
|
+
rng = np.random.default_rng()
|
|
35
|
+
event_coords = {}
|
|
36
|
+
edge_dims = []
|
|
37
|
+
midp_dims = []
|
|
38
|
+
# Separate bin-edge and midpoints coords
|
|
39
|
+
for dim in da.dims:
|
|
40
|
+
if da.coords.is_edges(dim):
|
|
41
|
+
edge_dims.append(dim)
|
|
42
|
+
else:
|
|
43
|
+
midp_dims.append(dim)
|
|
44
|
+
|
|
45
|
+
edge_sizes = {dim: da.sizes[dim] for dim in edge_dims}
|
|
46
|
+
for dim in edge_dims:
|
|
47
|
+
coord = da.coords[dim]
|
|
48
|
+
low = sc.broadcast(coord[dim, :-1], sizes=edge_sizes).values
|
|
49
|
+
high = sc.broadcast(coord[dim, 1:], sizes=edge_sizes).values
|
|
50
|
+
|
|
51
|
+
# The numpy.random.uniform function below does not support NaNs, so we need to
|
|
52
|
+
# replace them with zeros, and then replace them back after the random numbers
|
|
53
|
+
# have been generated.
|
|
54
|
+
nans = np.isnan(low) | np.isnan(high)
|
|
55
|
+
low = np.where(nans, 0.0, low)
|
|
56
|
+
high = np.where(nans, 0.0, high)
|
|
57
|
+
|
|
58
|
+
# In each bin, we generate a number of events with a uniform distribution.
|
|
59
|
+
events = rng.uniform(
|
|
60
|
+
low, high, size=(events_per_bin, *list(edge_sizes.values()))
|
|
61
|
+
)
|
|
62
|
+
events[..., nans] = np.nan
|
|
63
|
+
event_coords[dim] = sc.array(
|
|
64
|
+
dims=[event_dim, *edge_dims], values=events, unit=coord.unit
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# Find and apply masks that are on a bin-edge dimension
|
|
68
|
+
event_masks = {}
|
|
69
|
+
other_masks = {}
|
|
70
|
+
edge_dims_set = set(edge_dims)
|
|
71
|
+
for key, mask in da.masks.items():
|
|
72
|
+
if set(mask.dims) & edge_dims_set:
|
|
73
|
+
event_masks[key] = mask
|
|
74
|
+
else:
|
|
75
|
+
other_masks[key] = mask
|
|
76
|
+
|
|
77
|
+
data = da.data
|
|
78
|
+
if event_masks:
|
|
79
|
+
inv_mask = (~reduce(lambda a, b: a | b, event_masks.values())).to(dtype=int)
|
|
80
|
+
inv_mask.unit = ''
|
|
81
|
+
data = data * inv_mask
|
|
82
|
+
|
|
83
|
+
# Create the data counts, which are the original counts divided by the number of
|
|
84
|
+
# events per bin
|
|
85
|
+
sizes = {event_dim: events_per_bin} | da.sizes
|
|
86
|
+
val = sc.broadcast(sc.values(data) / float(events_per_bin), sizes=sizes)
|
|
87
|
+
kwargs = {'dims': sizes.keys(), 'values': val.values, 'unit': data.unit}
|
|
88
|
+
if data.variances is not None:
|
|
89
|
+
# Note here that all the events are correlated.
|
|
90
|
+
# If we later histogram the events with different edges than the original
|
|
91
|
+
# histogram, then neighboring bins will be correlated, and the error obtained
|
|
92
|
+
# will be too small. It is however not clear what can be done to improve this.
|
|
93
|
+
kwargs['variances'] = sc.broadcast(
|
|
94
|
+
sc.variances(data) / float(events_per_bin), sizes=sizes
|
|
95
|
+
).values
|
|
96
|
+
new_data = sc.array(**kwargs)
|
|
97
|
+
|
|
98
|
+
new = sc.DataArray(data=new_data, coords=event_coords)
|
|
99
|
+
new = new.transpose((*midp_dims, *edge_dims, event_dim)).flatten(
|
|
100
|
+
dims=[*edge_dims, event_dim], to=event_dim
|
|
101
|
+
)
|
|
102
|
+
return new.assign_coords(
|
|
103
|
+
{dim: da.coords[dim].copy() for dim in midp_dims}
|
|
104
|
+
).assign_masks({key: mask.copy() for key, mask in other_masks.items()})
|