essreduce 25.4.1__py3-none-any.whl → 25.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ess/reduce/nexus/types.py +0 -7
- ess/reduce/nexus/workflow.py +2 -40
- ess/reduce/time_of_flight/__init__.py +25 -10
- ess/reduce/time_of_flight/eto_to_tof.py +245 -152
- ess/reduce/time_of_flight/fakes.py +2 -2
- ess/reduce/time_of_flight/interpolator_numba.py +32 -61
- ess/reduce/time_of_flight/interpolator_scipy.py +13 -14
- ess/reduce/time_of_flight/simulation.py +25 -1
- ess/reduce/time_of_flight/types.py +63 -27
- ess/reduce/time_of_flight/workflow.py +94 -0
- ess/reduce/utils.py +36 -0
- ess/reduce/workflow.py +4 -2
- {essreduce-25.4.1.dist-info → essreduce-25.5.1.dist-info}/METADATA +2 -2
- {essreduce-25.4.1.dist-info → essreduce-25.5.1.dist-info}/RECORD +18 -16
- {essreduce-25.4.1.dist-info → essreduce-25.5.1.dist-info}/WHEEL +1 -1
- {essreduce-25.4.1.dist-info → essreduce-25.5.1.dist-info}/entry_points.txt +0 -0
- {essreduce-25.4.1.dist-info → essreduce-25.5.1.dist-info}/licenses/LICENSE +0 -0
- {essreduce-25.4.1.dist-info → essreduce-25.5.1.dist-info}/top_level.txt +0 -0
|
@@ -11,28 +11,39 @@ from collections.abc import Callable
|
|
|
11
11
|
|
|
12
12
|
import numpy as np
|
|
13
13
|
import scipp as sc
|
|
14
|
-
|
|
14
|
+
import scippneutron as scn
|
|
15
15
|
from scippneutron._utils import elem_unit
|
|
16
16
|
|
|
17
17
|
try:
|
|
18
18
|
from .interpolator_numba import Interpolator as InterpolatorImpl
|
|
19
19
|
except ImportError:
|
|
20
20
|
from .interpolator_scipy import Interpolator as InterpolatorImpl
|
|
21
|
+
|
|
22
|
+
from ..nexus.types import (
|
|
23
|
+
CalibratedBeamline,
|
|
24
|
+
CalibratedMonitor,
|
|
25
|
+
DetectorData,
|
|
26
|
+
MonitorData,
|
|
27
|
+
MonitorType,
|
|
28
|
+
RunType,
|
|
29
|
+
)
|
|
21
30
|
from .to_events import to_events
|
|
22
31
|
from .types import (
|
|
32
|
+
DetectorLtotal,
|
|
33
|
+
DetectorTofData,
|
|
23
34
|
DistanceResolution,
|
|
24
35
|
LookupTableRelativeErrorThreshold,
|
|
25
|
-
Ltotal,
|
|
26
36
|
LtotalRange,
|
|
37
|
+
MonitorLtotal,
|
|
38
|
+
MonitorTofData,
|
|
27
39
|
PulsePeriod,
|
|
28
40
|
PulseStride,
|
|
29
41
|
PulseStrideOffset,
|
|
30
|
-
|
|
31
|
-
|
|
42
|
+
ResampledDetectorTofData,
|
|
43
|
+
ResampledMonitorTofData,
|
|
32
44
|
SimulationResults,
|
|
33
45
|
TimeOfFlightLookupTable,
|
|
34
46
|
TimeResolution,
|
|
35
|
-
TofData,
|
|
36
47
|
)
|
|
37
48
|
|
|
38
49
|
|
|
@@ -84,7 +95,7 @@ def _compute_mean_tof_in_distance_range(
|
|
|
84
95
|
frame_period:
|
|
85
96
|
Period of the source pulses, i.e., time between consecutive pulse starts.
|
|
86
97
|
time_bins_half_width:
|
|
87
|
-
Half
|
|
98
|
+
Half width of the time bins in the event_time_offset axis.
|
|
88
99
|
"""
|
|
89
100
|
simulation_distance = simulation.distance.to(unit=distance_unit)
|
|
90
101
|
distances = sc.midpoints(distance_bins)
|
|
@@ -104,8 +115,7 @@ def _compute_mean_tof_in_distance_range(
|
|
|
104
115
|
},
|
|
105
116
|
).flatten(to="event")
|
|
106
117
|
|
|
107
|
-
# Add the event_time_offset coordinate to the
|
|
108
|
-
# frame period. The table will later be folded to the pulse period.
|
|
118
|
+
# Add the event_time_offset coordinate, wrapped to the frame_period
|
|
109
119
|
data.coords['event_time_offset'] = data.coords['toa'] % frame_period
|
|
110
120
|
|
|
111
121
|
# Because we staggered the mesh by half a bin width, we want the values above
|
|
@@ -134,51 +144,6 @@ def _compute_mean_tof_in_distance_range(
|
|
|
134
144
|
return mean_tof
|
|
135
145
|
|
|
136
146
|
|
|
137
|
-
def _fold_table_to_pulse_period(
|
|
138
|
-
table: sc.DataArray, pulse_period: sc.Variable, pulse_stride: int
|
|
139
|
-
) -> sc.DataArray:
|
|
140
|
-
"""
|
|
141
|
-
Fold the lookup table to the pulse period. We make sure the left and right edges of
|
|
142
|
-
the table wrap around the ``event_time_offset`` dimension.
|
|
143
|
-
|
|
144
|
-
Parameters
|
|
145
|
-
----------
|
|
146
|
-
table:
|
|
147
|
-
Lookup table with time-of-flight as a function of distance and time-of-arrival.
|
|
148
|
-
pulse_period:
|
|
149
|
-
Period of the source pulses, i.e., time between consecutive pulse starts.
|
|
150
|
-
pulse_stride:
|
|
151
|
-
Stride of used pulses. Usually 1, but may be a small integer when
|
|
152
|
-
pulse-skipping.
|
|
153
|
-
"""
|
|
154
|
-
size = table.sizes['event_time_offset']
|
|
155
|
-
if (size % pulse_stride) != 0:
|
|
156
|
-
raise ValueError(
|
|
157
|
-
"TimeOfFlightLookupTable: the number of time bins must be a multiple of "
|
|
158
|
-
f"the pulse stride, but got {size} time bins and a pulse stride of "
|
|
159
|
-
f"{pulse_stride}."
|
|
160
|
-
)
|
|
161
|
-
|
|
162
|
-
size = size // pulse_stride
|
|
163
|
-
out = sc.concat([table, table['event_time_offset', 0]], dim='event_time_offset')
|
|
164
|
-
out = sc.concat(
|
|
165
|
-
[
|
|
166
|
-
out['event_time_offset', (i * size) : (i + 1) * size + 1]
|
|
167
|
-
for i in range(pulse_stride)
|
|
168
|
-
],
|
|
169
|
-
dim='pulse',
|
|
170
|
-
)
|
|
171
|
-
return out.assign_coords(
|
|
172
|
-
event_time_offset=sc.concat(
|
|
173
|
-
[
|
|
174
|
-
table.coords['event_time_offset']['event_time_offset', :size],
|
|
175
|
-
pulse_period,
|
|
176
|
-
],
|
|
177
|
-
'event_time_offset',
|
|
178
|
-
)
|
|
179
|
-
)
|
|
180
|
-
|
|
181
|
-
|
|
182
147
|
def compute_tof_lookup_table(
|
|
183
148
|
simulation: SimulationResults,
|
|
184
149
|
ltotal_range: LtotalRange,
|
|
@@ -212,6 +177,43 @@ def compute_tof_lookup_table(
|
|
|
212
177
|
error_threshold:
|
|
213
178
|
Threshold for the relative standard deviation (coefficient of variation) of the
|
|
214
179
|
projected time-of-flight above which values are masked.
|
|
180
|
+
|
|
181
|
+
Notes
|
|
182
|
+
-----
|
|
183
|
+
|
|
184
|
+
Below are some details about the binning and wrapping around frame period in the
|
|
185
|
+
time dimension.
|
|
186
|
+
|
|
187
|
+
We have some simulated ``toa`` (events) from a Tof/McStas simulation.
|
|
188
|
+
Those are absolute ``toa``, unwrapped.
|
|
189
|
+
First we compute the usual ``event_time_offset = toa % frame_period``.
|
|
190
|
+
|
|
191
|
+
Now, we want to ensure periodic boundaries. If we make a bin centered around 0,
|
|
192
|
+
and a bin centered around 71ms: the first bin will use events between 0 and
|
|
193
|
+
``0.5 * dt`` (where ``dt`` is the bin width).
|
|
194
|
+
The last bin will use events between ``frame_period - 0.5*dt`` and
|
|
195
|
+
``frame_period + 0.5 * dt``. So when we compute the mean inside those two bins,
|
|
196
|
+
they will not yield the same results.
|
|
197
|
+
It is as if the first bin is missing the events it should have between
|
|
198
|
+
``-0.5 * dt`` and 0 (because of the modulo we computed above).
|
|
199
|
+
|
|
200
|
+
To fix this, we do not make a last bin around 71ms (the bins stop at
|
|
201
|
+
``frame_period - 0.5*dt``). Instead, we compute modulo a second time,
|
|
202
|
+
but this time using ``event_time_offset %= (frame_period - 0.5*dt)``.
|
|
203
|
+
(we cannot directly do ``event_time_offset = toa % (frame_period - 0.5*dt)`` in a
|
|
204
|
+
single step because it would introduce a gradual shift,
|
|
205
|
+
as the pulse number increases).
|
|
206
|
+
|
|
207
|
+
This second modulo effectively takes all the events that would have gone in the
|
|
208
|
+
last bin (between ``frame_period - 0.5*dt`` and ``frame_period``) and puts them in
|
|
209
|
+
the first bin. Instead of placing them between ``-0.5*dt`` and 0,
|
|
210
|
+
it places them between 0 and ``0.5*dt``, but this does not really matter,
|
|
211
|
+
because we then take the mean inside the first bin.
|
|
212
|
+
Whether the events are on the left or right side of zero does not matter.
|
|
213
|
+
|
|
214
|
+
Finally, we make a copy of the left edge, and append it to the right of the table,
|
|
215
|
+
thus ensuring that the values on the right edge are strictly the same as on the
|
|
216
|
+
left edge.
|
|
215
217
|
"""
|
|
216
218
|
distance_unit = "m"
|
|
217
219
|
time_unit = simulation.time_of_arrival.unit
|
|
@@ -276,16 +278,31 @@ def compute_tof_lookup_table(
|
|
|
276
278
|
table.coords["distance"] = sc.midpoints(table.coords["distance"])
|
|
277
279
|
table.coords["event_time_offset"] = sc.midpoints(table.coords["event_time_offset"])
|
|
278
280
|
|
|
279
|
-
|
|
280
|
-
|
|
281
|
+
# Copy the left edge to the right to create periodic boundary conditions
|
|
282
|
+
table = sc.DataArray(
|
|
283
|
+
data=sc.concat(
|
|
284
|
+
[table.data, table.data['event_time_offset', 0]], dim='event_time_offset'
|
|
285
|
+
),
|
|
286
|
+
coords={
|
|
287
|
+
"distance": table.coords["distance"],
|
|
288
|
+
"event_time_offset": sc.concat(
|
|
289
|
+
[table.coords["event_time_offset"], frame_period],
|
|
290
|
+
dim='event_time_offset',
|
|
291
|
+
),
|
|
292
|
+
"pulse_period": pulse_period,
|
|
293
|
+
"pulse_stride": sc.scalar(pulse_stride, unit=None),
|
|
294
|
+
"distance_resolution": table.coords["distance"][1]
|
|
295
|
+
- table.coords["distance"][0],
|
|
296
|
+
"time_resolution": table.coords["event_time_offset"][1]
|
|
297
|
+
- table.coords["event_time_offset"][0],
|
|
298
|
+
"error_threshold": sc.scalar(error_threshold),
|
|
299
|
+
},
|
|
281
300
|
)
|
|
282
301
|
|
|
283
302
|
# In-place masking for better performance
|
|
284
303
|
_mask_large_uncertainty(table, error_threshold)
|
|
285
304
|
|
|
286
|
-
return TimeOfFlightLookupTable(
|
|
287
|
-
table.transpose(('pulse', 'distance', 'event_time_offset'))
|
|
288
|
-
)
|
|
305
|
+
return TimeOfFlightLookupTable(table)
|
|
289
306
|
|
|
290
307
|
|
|
291
308
|
class TofInterpolator:
|
|
@@ -293,22 +310,6 @@ class TofInterpolator:
|
|
|
293
310
|
self._distance_unit = distance_unit
|
|
294
311
|
self._time_unit = time_unit
|
|
295
312
|
|
|
296
|
-
# In the pulse dimension, it could be that for a given event_time_offset and
|
|
297
|
-
# distance, a tof value is finite in one pulse and NaN in the other.
|
|
298
|
-
# When using the bilinear interpolation, even if the value of the requested
|
|
299
|
-
# point is exactly 0 or 1 (in the case of pulse_stride=2), the interpolator
|
|
300
|
-
# will still use all 4 corners surrounding the point. This means that if one of
|
|
301
|
-
# the corners is NaN, the result will be NaN.
|
|
302
|
-
# Here, we use a trick where we duplicate the lookup values in the 'pulse'
|
|
303
|
-
# dimension so that the interpolator has values on bin edges for that dimension.
|
|
304
|
-
# The interpolator raises an error if axes coordinates are not strictly
|
|
305
|
-
# monotonic, so we cannot use e.g. [-0.5, 0.5, 0.5, 1.5] in the case of
|
|
306
|
-
# pulse_stride=2. Instead we use [-0.25, 0.25, 0.75, 1.25].
|
|
307
|
-
base_grid = np.arange(float(lookup.sizes["pulse"]))
|
|
308
|
-
self._pulse_edges = np.sort(
|
|
309
|
-
np.concatenate([base_grid - 0.25, base_grid + 0.25])
|
|
310
|
-
)
|
|
311
|
-
|
|
312
313
|
self._time_edges = (
|
|
313
314
|
lookup.coords["event_time_offset"]
|
|
314
315
|
.to(unit=self._time_unit, copy=False)
|
|
@@ -321,23 +322,16 @@ class TofInterpolator:
|
|
|
321
322
|
self._interpolator = InterpolatorImpl(
|
|
322
323
|
time_edges=self._time_edges,
|
|
323
324
|
distance_edges=self._distance_edges,
|
|
324
|
-
|
|
325
|
-
values=np.repeat(
|
|
326
|
-
lookup.data.to(unit=self._time_unit, copy=False).values, 2, axis=0
|
|
327
|
-
),
|
|
325
|
+
values=lookup.data.to(unit=self._time_unit, copy=False).values,
|
|
328
326
|
)
|
|
329
327
|
|
|
330
328
|
def __call__(
|
|
331
329
|
self,
|
|
332
|
-
pulse_index: sc.Variable,
|
|
333
330
|
ltotal: sc.Variable,
|
|
334
331
|
event_time_offset: sc.Variable,
|
|
332
|
+
pulse_period: sc.Variable,
|
|
333
|
+
pulse_index: sc.Variable | None = None,
|
|
335
334
|
) -> sc.Variable:
|
|
336
|
-
if pulse_index.unit not in ("", None):
|
|
337
|
-
raise sc.UnitError(
|
|
338
|
-
"pulse_index must have unit dimensionless or None, "
|
|
339
|
-
f"but got unit: {pulse_index.unit}."
|
|
340
|
-
)
|
|
341
335
|
if ltotal.unit != self._distance_unit:
|
|
342
336
|
raise sc.UnitError(
|
|
343
337
|
f"ltotal must have unit: {self._distance_unit}, "
|
|
@@ -349,31 +343,30 @@ class TofInterpolator:
|
|
|
349
343
|
f"but got unit: {event_time_offset.unit}."
|
|
350
344
|
)
|
|
351
345
|
out_dims = event_time_offset.dims
|
|
352
|
-
pulse_index = pulse_index.values
|
|
353
346
|
ltotal = ltotal.values
|
|
354
347
|
event_time_offset = event_time_offset.values
|
|
355
348
|
|
|
356
349
|
return sc.array(
|
|
357
350
|
dims=out_dims,
|
|
358
351
|
values=self._interpolator(
|
|
359
|
-
times=event_time_offset,
|
|
352
|
+
times=event_time_offset,
|
|
353
|
+
distances=ltotal,
|
|
354
|
+
pulse_index=pulse_index.values if pulse_index is not None else None,
|
|
355
|
+
pulse_period=pulse_period.value,
|
|
360
356
|
),
|
|
361
357
|
unit=self._time_unit,
|
|
362
358
|
)
|
|
363
359
|
|
|
364
360
|
|
|
365
361
|
def _time_of_flight_data_histogram(
|
|
366
|
-
da: sc.DataArray,
|
|
367
|
-
lookup: sc.DataArray,
|
|
368
|
-
ltotal: sc.Variable,
|
|
369
|
-
pulse_period: sc.Variable,
|
|
362
|
+
da: sc.DataArray, lookup: sc.DataArray, ltotal: sc.Variable
|
|
370
363
|
) -> sc.DataArray:
|
|
371
364
|
# In NeXus, 'time_of_flight' is the canonical name in NXmonitor, but in some files,
|
|
372
365
|
# it may be called 'tof'.
|
|
373
366
|
key = next(iter(set(da.coords.keys()) & {"time_of_flight", "tof"}))
|
|
374
367
|
raw_eto = da.coords[key].to(dtype=float, copy=False)
|
|
375
368
|
eto_unit = raw_eto.unit
|
|
376
|
-
pulse_period = pulse_period.to(unit=eto_unit)
|
|
369
|
+
pulse_period = lookup.coords["pulse_period"].to(unit=eto_unit)
|
|
377
370
|
|
|
378
371
|
# In histogram mode, because there is a wrap around at the end of the pulse, we
|
|
379
372
|
# need to insert a bin edge at that exact location to avoid having the last bin
|
|
@@ -386,31 +379,14 @@ def _time_of_flight_data_histogram(
|
|
|
386
379
|
rebinned = da.rebin({key: new_bins})
|
|
387
380
|
etos = rebinned.coords[key]
|
|
388
381
|
|
|
389
|
-
# In histogram mode, the lookup table cannot have a pulse dimension because we
|
|
390
|
-
# cannot know in the histogrammed data which pulse the events belong to.
|
|
391
|
-
# So we merge the pulse dimension in the lookup table. A quick way to do this
|
|
392
|
-
# is to take the mean of the data along the pulse dimension (there should
|
|
393
|
-
# only be regions that are NaN in one pulse and finite in the other).
|
|
394
|
-
merged = lookup.data.nanmean('pulse')
|
|
395
|
-
dim = merged.dims[0]
|
|
396
|
-
lookup = sc.DataArray(
|
|
397
|
-
data=merged.fold(dim=dim, sizes={'pulse': 1, dim: merged.sizes[dim]}),
|
|
398
|
-
coords={
|
|
399
|
-
'pulse': sc.arange('pulse', 1.0),
|
|
400
|
-
'distance': lookup.coords['distance'],
|
|
401
|
-
'event_time_offset': lookup.coords['event_time_offset'],
|
|
402
|
-
},
|
|
403
|
-
)
|
|
404
|
-
pulse_index = sc.zeros(sizes=etos.sizes)
|
|
405
|
-
|
|
406
382
|
# Create linear interpolator
|
|
407
383
|
interp = TofInterpolator(lookup, distance_unit=ltotal.unit, time_unit=eto_unit)
|
|
408
384
|
|
|
409
385
|
# Compute time-of-flight of the bin edges using the interpolator
|
|
410
386
|
tofs = interp(
|
|
411
|
-
pulse_index=pulse_index,
|
|
412
387
|
ltotal=ltotal.broadcast(sizes=etos.sizes),
|
|
413
388
|
event_time_offset=etos,
|
|
389
|
+
pulse_period=pulse_period,
|
|
414
390
|
)
|
|
415
391
|
|
|
416
392
|
return rebinned.assign_coords(tof=tofs)
|
|
@@ -420,6 +396,7 @@ def _guess_pulse_stride_offset(
|
|
|
420
396
|
pulse_index: sc.Variable,
|
|
421
397
|
ltotal: sc.Variable,
|
|
422
398
|
event_time_offset: sc.Variable,
|
|
399
|
+
pulse_period: sc.Variable,
|
|
423
400
|
pulse_stride: int,
|
|
424
401
|
interp: TofInterpolator,
|
|
425
402
|
) -> int:
|
|
@@ -446,6 +423,8 @@ def _guess_pulse_stride_offset(
|
|
|
446
423
|
Total length of the flight path from the source to the detector for each event.
|
|
447
424
|
event_time_offset:
|
|
448
425
|
Time of arrival of the neutron at the detector for each event.
|
|
426
|
+
pulse_period:
|
|
427
|
+
Period of the source pulses, i.e., time between consecutive pulse starts.
|
|
449
428
|
pulse_stride:
|
|
450
429
|
Stride of used pulses.
|
|
451
430
|
interp:
|
|
@@ -469,7 +448,12 @@ def _guess_pulse_stride_offset(
|
|
|
469
448
|
)
|
|
470
449
|
for i in range(pulse_stride):
|
|
471
450
|
pulse_inds = (pulse_index + i) % pulse_stride
|
|
472
|
-
tofs[i] = interp(
|
|
451
|
+
tofs[i] = interp(
|
|
452
|
+
ltotal=ltotal,
|
|
453
|
+
event_time_offset=etos,
|
|
454
|
+
pulse_index=pulse_inds,
|
|
455
|
+
pulse_period=pulse_period,
|
|
456
|
+
)
|
|
473
457
|
# Find the entry in the list with the least number of nan values
|
|
474
458
|
return sorted(tofs, key=lambda x: sc.isnan(tofs[x]).sum())[0]
|
|
475
459
|
|
|
@@ -478,8 +462,6 @@ def _time_of_flight_data_events(
|
|
|
478
462
|
da: sc.DataArray,
|
|
479
463
|
lookup: sc.DataArray,
|
|
480
464
|
ltotal: sc.Variable,
|
|
481
|
-
pulse_period: sc.Variable,
|
|
482
|
-
pulse_stride: int,
|
|
483
465
|
pulse_stride_offset: int,
|
|
484
466
|
) -> sc.DataArray:
|
|
485
467
|
etos = da.bins.coords["event_time_offset"].to(dtype=float, copy=False)
|
|
@@ -492,20 +474,21 @@ def _time_of_flight_data_events(
|
|
|
492
474
|
ltotal = sc.bins_like(etos, ltotal).bins.constituents["data"]
|
|
493
475
|
etos = etos.bins.constituents["data"]
|
|
494
476
|
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
477
|
+
pulse_index = None
|
|
478
|
+
pulse_period = lookup.coords["pulse_period"].to(unit=eto_unit)
|
|
479
|
+
pulse_stride = lookup.coords["pulse_stride"].value
|
|
480
|
+
|
|
481
|
+
if pulse_stride > 1:
|
|
482
|
+
# Compute a pulse index for every event: it is the index of the pulse within a
|
|
483
|
+
# frame period. The index ranges from zero to pulse_stride - 1.
|
|
501
484
|
etz_unit = 'ns'
|
|
502
485
|
etz = (
|
|
503
486
|
da.bins.coords["event_time_zero"]
|
|
504
487
|
.bins.constituents["data"]
|
|
505
488
|
.to(unit=etz_unit, copy=False)
|
|
506
489
|
)
|
|
507
|
-
|
|
508
|
-
frame_period =
|
|
490
|
+
pulse_period_ns = pulse_period.to(unit=etz_unit, dtype=int)
|
|
491
|
+
frame_period = pulse_period_ns * pulse_stride
|
|
509
492
|
# Define a common reference time using epoch as a base, but making sure that it
|
|
510
493
|
# is aligned with the pulse_period and the frame_period.
|
|
511
494
|
# We need to use a global reference time instead of simply taking the minimum
|
|
@@ -513,17 +496,17 @@ def _time_of_flight_data_events(
|
|
|
513
496
|
# may not be the first event of the first pulse for all chunks. This would lead
|
|
514
497
|
# to inconsistent pulse indices.
|
|
515
498
|
epoch = sc.datetime(0, unit=etz_unit)
|
|
516
|
-
diff_to_epoch = (etz.min() - epoch) %
|
|
499
|
+
diff_to_epoch = (etz.min() - epoch) % pulse_period_ns
|
|
517
500
|
# Here we offset the reference by half a pulse period to avoid errors from
|
|
518
501
|
# fluctuations in the event_time_zeros in the data. They are triggered by the
|
|
519
502
|
# neutron source, and may not always be exactly separated by the pulse period.
|
|
520
503
|
# While fluctuations will exist, they will be small, and offsetting the times
|
|
521
504
|
# by half a pulse period is a simple enough fix.
|
|
522
|
-
reference = epoch + diff_to_epoch - (
|
|
505
|
+
reference = epoch + diff_to_epoch - (pulse_period_ns // 2)
|
|
523
506
|
# Use in-place operations to avoid large allocations
|
|
524
507
|
pulse_index = etz - reference
|
|
525
508
|
pulse_index %= frame_period
|
|
526
|
-
pulse_index //=
|
|
509
|
+
pulse_index //= pulse_period_ns
|
|
527
510
|
|
|
528
511
|
# Apply the pulse_stride_offset
|
|
529
512
|
if pulse_stride_offset is None:
|
|
@@ -531,6 +514,7 @@ def _time_of_flight_data_events(
|
|
|
531
514
|
pulse_index=pulse_index,
|
|
532
515
|
ltotal=ltotal,
|
|
533
516
|
event_time_offset=etos,
|
|
517
|
+
pulse_period=pulse_period,
|
|
534
518
|
pulse_stride=pulse_stride,
|
|
535
519
|
interp=interp,
|
|
536
520
|
)
|
|
@@ -538,21 +522,86 @@ def _time_of_flight_data_events(
|
|
|
538
522
|
pulse_index %= pulse_stride
|
|
539
523
|
|
|
540
524
|
# Compute time-of-flight for all neutrons using the interpolator
|
|
541
|
-
tofs = interp(
|
|
525
|
+
tofs = interp(
|
|
526
|
+
ltotal=ltotal,
|
|
527
|
+
event_time_offset=etos,
|
|
528
|
+
pulse_index=pulse_index,
|
|
529
|
+
pulse_period=pulse_period,
|
|
530
|
+
)
|
|
542
531
|
|
|
543
532
|
parts = da.bins.constituents
|
|
544
533
|
parts["data"] = tofs
|
|
545
|
-
return da.bins.assign_coords(tof=
|
|
534
|
+
return da.bins.assign_coords(tof=sc.bins(**parts, validate_indices=False))
|
|
535
|
+
|
|
536
|
+
|
|
537
|
+
def detector_ltotal_from_straight_line_approximation(
|
|
538
|
+
detector_beamline: CalibratedBeamline[RunType],
|
|
539
|
+
) -> DetectorLtotal[RunType]:
|
|
540
|
+
"""
|
|
541
|
+
Compute Ltotal for the detector pixels.
|
|
542
|
+
This is a naive straight-line approximation to Ltotal based on basic component
|
|
543
|
+
positions.
|
|
544
|
+
|
|
545
|
+
Parameters
|
|
546
|
+
----------
|
|
547
|
+
detector_beamline:
|
|
548
|
+
Beamline data for the detector that contains the positions necessary to compute
|
|
549
|
+
the straight-line approximation to Ltotal (source, sample, and detector
|
|
550
|
+
positions).
|
|
551
|
+
"""
|
|
552
|
+
graph = scn.conversion.graph.beamline.beamline(scatter=True)
|
|
553
|
+
return DetectorLtotal[RunType](
|
|
554
|
+
detector_beamline.transform_coords(
|
|
555
|
+
"Ltotal", graph=graph, keep_intermediate=False
|
|
556
|
+
).coords["Ltotal"]
|
|
557
|
+
)
|
|
558
|
+
|
|
559
|
+
|
|
560
|
+
def monitor_ltotal_from_straight_line_approximation(
|
|
561
|
+
monitor_beamline: CalibratedMonitor[RunType, MonitorType],
|
|
562
|
+
) -> MonitorLtotal[RunType, MonitorType]:
|
|
563
|
+
"""
|
|
564
|
+
Compute Ltotal for the monitor.
|
|
565
|
+
This is a naive straight-line approximation to Ltotal based on basic component
|
|
566
|
+
positions.
|
|
567
|
+
|
|
568
|
+
Parameters
|
|
569
|
+
----------
|
|
570
|
+
monitor_beamline:
|
|
571
|
+
Beamline data for the monitor that contains the positions necessary to compute
|
|
572
|
+
the straight-line approximation to Ltotal (source and monitor positions).
|
|
573
|
+
"""
|
|
574
|
+
graph = scn.conversion.graph.beamline.beamline(scatter=False)
|
|
575
|
+
return MonitorLtotal[RunType, MonitorType](
|
|
576
|
+
monitor_beamline.transform_coords(
|
|
577
|
+
"Ltotal", graph=graph, keep_intermediate=False
|
|
578
|
+
).coords["Ltotal"]
|
|
579
|
+
)
|
|
546
580
|
|
|
547
581
|
|
|
548
|
-
def
|
|
549
|
-
da:
|
|
582
|
+
def _compute_tof_data(
|
|
583
|
+
da: sc.DataArray,
|
|
584
|
+
lookup: sc.DataArray,
|
|
585
|
+
ltotal: sc.Variable,
|
|
586
|
+
pulse_stride_offset: int,
|
|
587
|
+
) -> sc.DataArray:
|
|
588
|
+
if da.bins is None:
|
|
589
|
+
return _time_of_flight_data_histogram(da=da, lookup=lookup, ltotal=ltotal)
|
|
590
|
+
else:
|
|
591
|
+
return _time_of_flight_data_events(
|
|
592
|
+
da=da,
|
|
593
|
+
lookup=lookup,
|
|
594
|
+
ltotal=ltotal,
|
|
595
|
+
pulse_stride_offset=pulse_stride_offset,
|
|
596
|
+
)
|
|
597
|
+
|
|
598
|
+
|
|
599
|
+
def detector_time_of_flight_data(
|
|
600
|
+
detector_data: DetectorData[RunType],
|
|
550
601
|
lookup: TimeOfFlightLookupTable,
|
|
551
|
-
ltotal:
|
|
552
|
-
pulse_period: PulsePeriod,
|
|
553
|
-
pulse_stride: PulseStride,
|
|
602
|
+
ltotal: DetectorLtotal[RunType],
|
|
554
603
|
pulse_stride_offset: PulseStrideOffset,
|
|
555
|
-
) ->
|
|
604
|
+
) -> DetectorTofData[RunType]:
|
|
556
605
|
"""
|
|
557
606
|
Convert the time-of-arrival data to time-of-flight data using a lookup table.
|
|
558
607
|
The output data will have a time-of-flight coordinate.
|
|
@@ -567,33 +616,55 @@ def time_of_flight_data(
|
|
|
567
616
|
arrival.
|
|
568
617
|
ltotal:
|
|
569
618
|
Total length of the flight path from the source to the detector.
|
|
570
|
-
pulse_period:
|
|
571
|
-
Period of the source pulses, i.e., time between consecutive pulse starts.
|
|
572
|
-
pulse_stride:
|
|
573
|
-
Stride of used pulses. Usually 1, but may be a small integer when
|
|
574
|
-
pulse-skipping.
|
|
575
619
|
pulse_stride_offset:
|
|
576
620
|
When pulse-skipping, the offset of the first pulse in the stride. This is
|
|
577
621
|
typically zero but can be a small integer < pulse_stride.
|
|
578
622
|
"""
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
623
|
+
return DetectorTofData[RunType](
|
|
624
|
+
_compute_tof_data(
|
|
625
|
+
da=detector_data,
|
|
626
|
+
lookup=lookup,
|
|
627
|
+
ltotal=ltotal,
|
|
628
|
+
pulse_stride_offset=pulse_stride_offset,
|
|
583
629
|
)
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
def monitor_time_of_flight_data(
|
|
634
|
+
monitor_data: MonitorData[RunType, MonitorType],
|
|
635
|
+
lookup: TimeOfFlightLookupTable,
|
|
636
|
+
ltotal: MonitorLtotal[RunType, MonitorType],
|
|
637
|
+
pulse_stride_offset: PulseStrideOffset,
|
|
638
|
+
) -> MonitorTofData[RunType, MonitorType]:
|
|
639
|
+
"""
|
|
640
|
+
Convert the time-of-arrival data to time-of-flight data using a lookup table.
|
|
641
|
+
The output data will have a time-of-flight coordinate.
|
|
642
|
+
|
|
643
|
+
Parameters
|
|
644
|
+
----------
|
|
645
|
+
da:
|
|
646
|
+
Raw monitor data loaded from a NeXus file, e.g., NXmonitor containing
|
|
647
|
+
NXevent_data.
|
|
648
|
+
lookup:
|
|
649
|
+
Lookup table giving time-of-flight as a function of distance and time of
|
|
650
|
+
arrival.
|
|
651
|
+
ltotal:
|
|
652
|
+
Total length of the flight path from the source to the monitor.
|
|
653
|
+
pulse_stride_offset:
|
|
654
|
+
When pulse-skipping, the offset of the first pulse in the stride. This is
|
|
655
|
+
typically zero but can be a small integer < pulse_stride.
|
|
656
|
+
"""
|
|
657
|
+
return MonitorTofData[RunType, MonitorType](
|
|
658
|
+
_compute_tof_data(
|
|
659
|
+
da=monitor_data,
|
|
587
660
|
lookup=lookup,
|
|
588
661
|
ltotal=ltotal,
|
|
589
|
-
pulse_period=pulse_period,
|
|
590
|
-
pulse_stride=pulse_stride,
|
|
591
662
|
pulse_stride_offset=pulse_stride_offset,
|
|
592
663
|
)
|
|
593
|
-
|
|
664
|
+
)
|
|
594
665
|
|
|
595
666
|
|
|
596
|
-
def
|
|
667
|
+
def _resample_tof_data(da: sc.DataArray) -> sc.DataArray:
|
|
597
668
|
"""
|
|
598
669
|
Histogrammed data that has been converted to `tof` will typically have
|
|
599
670
|
unsorted bin edges (due to either wrapping of `time_of_flight` or wavelength
|
|
@@ -626,13 +697,29 @@ def resample_tof_data(da: TofData) -> ResampledTofData:
|
|
|
626
697
|
coord = da.coords["tof"]
|
|
627
698
|
bin_width = (coord[dim, 1:] - coord[dim, :-1]).nanmedian()
|
|
628
699
|
rehist = events.hist(tof=bin_width)
|
|
629
|
-
return
|
|
630
|
-
|
|
631
|
-
{key: var for key, var in da.coords.items() if dim not in var.dims}
|
|
632
|
-
)
|
|
700
|
+
return rehist.assign_coords(
|
|
701
|
+
{key: var for key, var in da.coords.items() if dim not in var.dims}
|
|
633
702
|
)
|
|
634
703
|
|
|
635
704
|
|
|
705
|
+
def resample_detector_time_of_flight_data(
|
|
706
|
+
da: DetectorTofData[RunType],
|
|
707
|
+
) -> ResampledDetectorTofData[RunType]:
|
|
708
|
+
"""
|
|
709
|
+
Resample the detector time-of-flight data to ensure that the bin edges are sorted.
|
|
710
|
+
"""
|
|
711
|
+
return ResampledDetectorTofData(_resample_tof_data(da))
|
|
712
|
+
|
|
713
|
+
|
|
714
|
+
def resample_monitor_time_of_flight_data(
|
|
715
|
+
da: MonitorTofData[RunType, MonitorType],
|
|
716
|
+
) -> ResampledMonitorTofData[RunType, MonitorType]:
|
|
717
|
+
"""
|
|
718
|
+
Resample the monitor time-of-flight data to ensure that the bin edges are sorted.
|
|
719
|
+
"""
|
|
720
|
+
return ResampledMonitorTofData(_resample_tof_data(da))
|
|
721
|
+
|
|
722
|
+
|
|
636
723
|
def default_parameters() -> dict:
|
|
637
724
|
"""
|
|
638
725
|
Default parameters of the time-of-flight workflow.
|
|
@@ -651,4 +738,10 @@ def providers() -> tuple[Callable]:
|
|
|
651
738
|
"""
|
|
652
739
|
Providers of the time-of-flight workflow.
|
|
653
740
|
"""
|
|
654
|
-
return (
|
|
741
|
+
return (
|
|
742
|
+
compute_tof_lookup_table,
|
|
743
|
+
detector_time_of_flight_data,
|
|
744
|
+
monitor_time_of_flight_data,
|
|
745
|
+
detector_ltotal_from_straight_line_approximation,
|
|
746
|
+
monitor_ltotal_from_straight_line_approximation,
|
|
747
|
+
)
|
|
@@ -28,9 +28,9 @@ class FakeBeamline:
|
|
|
28
28
|
import math
|
|
29
29
|
|
|
30
30
|
import tof as tof_pkg
|
|
31
|
-
from tof.facilities.ess_pulse import
|
|
31
|
+
from tof.facilities.ess_pulse import frequency as ess_frequency
|
|
32
32
|
|
|
33
|
-
self.frequency =
|
|
33
|
+
self.frequency = ess_frequency
|
|
34
34
|
self.npulses = math.ceil((run_length * self.frequency).to(unit="").value)
|
|
35
35
|
self.events_per_pulse = events_per_pulse
|
|
36
36
|
if source_position is None:
|