essreduce 24.11.3__py3-none-any.whl → 25.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,104 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
3
+
4
+ from functools import reduce
5
+
6
+ import numpy as np
7
+ import scipp as sc
8
+
9
+
10
+ def to_events(
11
+ da: sc.DataArray, event_dim: str, events_per_bin: int = 500
12
+ ) -> sc.DataArray:
13
+ """
14
+ Convert a histogrammed data array to an event list.
15
+ The generated events have a uniform distribution within each bin.
16
+ Each dimension with a bin-edge coordinate is converted to an event coordinate.
17
+ The contract is that if we re-histogram the event list with the same bin edges,
18
+ we should get the original counts back.
19
+ Masks on non-bin-edge dimensions are preserved.
20
+ If there are masks on bin-edge dimensions, the masked values are zeroed out in the
21
+ original data before the conversion to events.
22
+
23
+ Parameters
24
+ ----------
25
+ da:
26
+ DataArray to convert to events.
27
+ event_dim:
28
+ Name of the new event dimension.
29
+ events_per_bin:
30
+ Number of events to generate per bin.
31
+ """
32
+ if da.bins is not None:
33
+ raise ValueError("Cannot convert a binned DataArray to events.")
34
+ rng = np.random.default_rng()
35
+ event_coords = {}
36
+ edge_dims = []
37
+ midp_dims = []
38
+ # Separate bin-edge and midpoints coords
39
+ for dim in da.dims:
40
+ if da.coords.is_edges(dim):
41
+ edge_dims.append(dim)
42
+ else:
43
+ midp_dims.append(dim)
44
+
45
+ edge_sizes = {dim: da.sizes[dim] for dim in edge_dims}
46
+ for dim in edge_dims:
47
+ coord = da.coords[dim]
48
+ low = sc.broadcast(coord[dim, :-1], sizes=edge_sizes).values
49
+ high = sc.broadcast(coord[dim, 1:], sizes=edge_sizes).values
50
+
51
+ # The numpy.random.uniform function below does not support NaNs, so we need to
52
+ # replace them with zeros, and then replace them back after the random numbers
53
+ # have been generated.
54
+ nans = np.isnan(low) | np.isnan(high)
55
+ low = np.where(nans, 0.0, low)
56
+ high = np.where(nans, 0.0, high)
57
+
58
+ # In each bin, we generate a number of events with a uniform distribution.
59
+ events = rng.uniform(
60
+ low, high, size=(events_per_bin, *list(edge_sizes.values()))
61
+ )
62
+ events[..., nans] = np.nan
63
+ event_coords[dim] = sc.array(
64
+ dims=[event_dim, *edge_dims], values=events, unit=coord.unit
65
+ )
66
+
67
+ # Find and apply masks that are on a bin-edge dimension
68
+ event_masks = {}
69
+ other_masks = {}
70
+ edge_dims_set = set(edge_dims)
71
+ for key, mask in da.masks.items():
72
+ if set(mask.dims) & edge_dims_set:
73
+ event_masks[key] = mask
74
+ else:
75
+ other_masks[key] = mask
76
+
77
+ data = da.data
78
+ if event_masks:
79
+ inv_mask = (~reduce(lambda a, b: a | b, event_masks.values())).to(dtype=int)
80
+ inv_mask.unit = ''
81
+ data = data * inv_mask
82
+
83
+ # Create the data counts, which are the original counts divided by the number of
84
+ # events per bin
85
+ sizes = {event_dim: events_per_bin} | da.sizes
86
+ val = sc.broadcast(sc.values(data) / float(events_per_bin), sizes=sizes)
87
+ kwargs = {'dims': sizes.keys(), 'values': val.values, 'unit': data.unit}
88
+ if data.variances is not None:
89
+ # Note here that all the events are correlated.
90
+ # If we later histogram the events with different edges than the original
91
+ # histogram, then neighboring bins will be correlated, and the error obtained
92
+ # will be too small. It is however not clear what can be done to improve this.
93
+ kwargs['variances'] = sc.broadcast(
94
+ sc.variances(data) / float(events_per_bin), sizes=sizes
95
+ ).values
96
+ new_data = sc.array(**kwargs)
97
+
98
+ new = sc.DataArray(data=new_data, coords=event_coords)
99
+ new = new.transpose((*midp_dims, *edge_dims, event_dim)).flatten(
100
+ dims=[*edge_dims, event_dim], to=event_dim
101
+ )
102
+ return new.assign_coords(
103
+ {dim: da.coords[dim].copy() for dim in midp_dims}
104
+ ).assign_masks({key: mask.copy() for key, mask in other_masks.items()})
@@ -0,0 +1,541 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
3
+ """
4
+ Time-of-flight workflow for unwrapping the time of arrival of the neutron at the
5
+ detector.
6
+ This workflow is used to convert raw detector data with event_time_zero and
7
+ event_time_offset coordinates to data with a time-of-flight coordinate.
8
+ """
9
+
10
+ from collections.abc import Callable
11
+
12
+ import numpy as np
13
+ import scipp as sc
14
+ from scipp._scipp.core import _bins_no_validate
15
+ from scippneutron._utils import elem_unit
16
+
17
+ from .to_events import to_events
18
+ from .types import (
19
+ DistanceResolution,
20
+ FastestNeutron,
21
+ FrameFoldedTimeOfArrival,
22
+ FramePeriod,
23
+ LookupTableRelativeErrorThreshold,
24
+ Ltotal,
25
+ LtotalRange,
26
+ MaskedTimeOfFlightLookupTable,
27
+ PivotTimeAtDetector,
28
+ PulsePeriod,
29
+ PulseStride,
30
+ PulseStrideOffset,
31
+ RawData,
32
+ ResampledTofData,
33
+ SimulationResults,
34
+ TimeOfArrivalMinusPivotTimeModuloPeriod,
35
+ TimeOfArrivalResolution,
36
+ TimeOfFlightLookupTable,
37
+ TofData,
38
+ UnwrappedTimeOfArrival,
39
+ UnwrappedTimeOfArrivalMinusPivotTime,
40
+ )
41
+
42
+
43
+ def frame_period(pulse_period: PulsePeriod, pulse_stride: PulseStride) -> FramePeriod:
44
+ """
45
+ Return the period of a frame, which is defined by the pulse period times the pulse
46
+ stride.
47
+
48
+ Parameters
49
+ ----------
50
+ pulse_period:
51
+ Period of the source pulses, i.e., time between consecutive pulse starts.
52
+ pulse_stride:
53
+ Stride of used pulses. Usually 1, but may be a small integer when
54
+ pulse-skipping.
55
+ """
56
+ return FramePeriod(pulse_period * pulse_stride)
57
+
58
+
59
+ def extract_ltotal(da: RawData) -> Ltotal:
60
+ """
61
+ Extract the total length of the flight path from the source to the detector from the
62
+ detector data.
63
+
64
+ Parameters
65
+ ----------
66
+ da:
67
+ Raw detector data loaded from a NeXus file, e.g., NXdetector containing
68
+ NXevent_data.
69
+ """
70
+ return Ltotal(da.coords["Ltotal"])
71
+
72
+
73
+ def compute_tof_lookup_table(
74
+ simulation: SimulationResults,
75
+ ltotal_range: LtotalRange,
76
+ distance_resolution: DistanceResolution,
77
+ toa_resolution: TimeOfArrivalResolution,
78
+ ) -> TimeOfFlightLookupTable:
79
+ """
80
+ Compute a lookup table for time-of-flight as a function of distance and
81
+ time-of-arrival.
82
+
83
+ Parameters
84
+ ----------
85
+ simulation:
86
+ Results of a time-of-flight simulation used to create a lookup table.
87
+ The results should be a flat table with columns for time-of-arrival, speed,
88
+ wavelength, and weight.
89
+ ltotal_range:
90
+ Range of total flight path lengths from the source to the detector.
91
+ distance_resolution:
92
+ Resolution of the distance axis in the lookup table.
93
+ toa_resolution:
94
+ Resolution of the time-of-arrival axis in the lookup table.
95
+ """
96
+ distance_unit = "m"
97
+ res = distance_resolution.to(unit=distance_unit)
98
+ simulation_distance = simulation.distance.to(unit=distance_unit)
99
+
100
+ min_dist, max_dist = (
101
+ x.to(unit=distance_unit) - simulation_distance for x in ltotal_range
102
+ )
103
+ # We need to bin the data below, to compute the weighted mean of the wavelength.
104
+ # This results in data with bin edges.
105
+ # However, the 2d interpolator expects bin centers.
106
+ # We want to give the 2d interpolator a table that covers the requested range,
107
+ # hence we need to extend the range by at least half a resolution in each direction.
108
+ # Then, we make the choice that the resolution in distance is the quantity that
109
+ # should be preserved. Because the difference between min and max distance is
110
+ # not necessarily an integer multiple of the resolution, we need to add a pad to
111
+ # ensure that the last bin is not cut off. We want the upper edge to be higher than
112
+ # the maximum distance, hence we pad with an additional 1.5 x resolution.
113
+ pad = 2.0 * res
114
+ dist_edges = sc.array(
115
+ dims=["distance"],
116
+ values=np.arange((min_dist - pad).value, (max_dist + pad).value, res.value),
117
+ unit=distance_unit,
118
+ )
119
+ distances = sc.midpoints(dist_edges)
120
+
121
+ time_unit = simulation.time_of_arrival.unit
122
+ toas = simulation.time_of_arrival + (distances / simulation.speed).to(
123
+ unit=time_unit, copy=False
124
+ )
125
+
126
+ # Compute time-of-flight for all neutrons
127
+ wavs = sc.broadcast(simulation.wavelength.to(unit="m"), sizes=toas.sizes).flatten(
128
+ to="event"
129
+ )
130
+ dist = sc.broadcast(distances + simulation_distance, sizes=toas.sizes).flatten(
131
+ to="event"
132
+ )
133
+ tofs = dist * sc.constants.m_n
134
+ tofs *= wavs
135
+ tofs /= sc.constants.h
136
+
137
+ data = sc.DataArray(
138
+ data=sc.broadcast(simulation.weight, sizes=toas.sizes).flatten(to="event"),
139
+ coords={
140
+ "toa": toas.flatten(to="event"),
141
+ "tof": tofs.to(unit=time_unit, copy=False),
142
+ "distance": dist,
143
+ },
144
+ )
145
+
146
+ binned = data.bin(distance=dist_edges + simulation_distance, toa=toa_resolution)
147
+ # Weighted mean of tof inside each bin
148
+ mean_tof = (
149
+ binned.bins.data * binned.bins.coords["tof"]
150
+ ).bins.sum() / binned.bins.sum()
151
+ # Compute the variance of the tofs to track regions with large uncertainty
152
+ variance = (
153
+ binned.bins.data * (binned.bins.coords["tof"] - mean_tof) ** 2
154
+ ).bins.sum() / binned.bins.sum()
155
+
156
+ mean_tof.variances = variance.values
157
+
158
+ # Convert coordinates to midpoints
159
+ mean_tof.coords["toa"] = sc.midpoints(mean_tof.coords["toa"])
160
+ mean_tof.coords["distance"] = sc.midpoints(mean_tof.coords["distance"])
161
+
162
+ return TimeOfFlightLookupTable(mean_tof)
163
+
164
+
165
+ def masked_tof_lookup_table(
166
+ tof_lookup: TimeOfFlightLookupTable,
167
+ error_threshold: LookupTableRelativeErrorThreshold,
168
+ ) -> MaskedTimeOfFlightLookupTable:
169
+ """
170
+ Mask regions of the lookup table where the variance of the projected time-of-flight
171
+ is larger than a given threshold.
172
+
173
+ Parameters
174
+ ----------
175
+ tof_lookup:
176
+ Lookup table giving time-of-flight as a function of distance and
177
+ time-of-arrival.
178
+ variance_threshold:
179
+ Threshold for the variance of the projected time-of-flight above which regions
180
+ are masked.
181
+ """
182
+ relative_error = sc.stddevs(tof_lookup.data) / sc.values(tof_lookup.data)
183
+ mask = relative_error > sc.scalar(error_threshold)
184
+ out = tof_lookup.copy()
185
+ # Use numpy for indexing as table is 2D
186
+ out.values[mask.values] = np.nan
187
+ return MaskedTimeOfFlightLookupTable(out)
188
+
189
+
190
+ def find_fastest_neutron(simulation: SimulationResults) -> FastestNeutron:
191
+ """
192
+ Find the fastest neutron in the simulation results.
193
+ """
194
+ ind = np.argmax(simulation.speed.values)
195
+ return FastestNeutron(
196
+ time_of_arrival=simulation.time_of_arrival[ind],
197
+ speed=simulation.speed[ind],
198
+ distance=simulation.distance,
199
+ )
200
+
201
+
202
+ def pivot_time_at_detector(
203
+ fastest_neutron: FastestNeutron, ltotal: Ltotal
204
+ ) -> PivotTimeAtDetector:
205
+ """
206
+ Compute the pivot time at the detector, i.e., the time of the start of the frame at
207
+ the detector.
208
+ The assumption here is that the fastest neutron in the simulation results is the one
209
+ that arrives at the detector first.
210
+ One could have an edge case where a slightly slower neutron which is born earlier
211
+ could arrive at the detector first, but this edge case is most probably uncommon,
212
+ and the difference in arrival times is likely to be small.
213
+
214
+ Parameters
215
+ ----------
216
+ fastest_neutron:
217
+ Properties of the fastest neutron in the simulation results.
218
+ ltotal:
219
+ Total length of the flight path from the source to the detector.
220
+ """
221
+ dist = ltotal - fastest_neutron.distance.to(unit=ltotal.unit)
222
+ toa = fastest_neutron.time_of_arrival + (dist / fastest_neutron.speed).to(
223
+ unit=fastest_neutron.time_of_arrival.unit, copy=False
224
+ )
225
+ return PivotTimeAtDetector(toa)
226
+
227
+
228
+ def unwrapped_time_of_arrival(
229
+ da: RawData, offset: PulseStrideOffset, pulse_period: PulsePeriod
230
+ ) -> UnwrappedTimeOfArrival:
231
+ """
232
+ Compute the unwrapped time of arrival of the neutron at the detector.
233
+ For event data, this is essentially ``event_time_offset + event_time_zero``.
234
+
235
+ Parameters
236
+ ----------
237
+ da:
238
+ Raw detector data loaded from a NeXus file, e.g., NXdetector containing
239
+ NXevent_data.
240
+ offset:
241
+ Integer offset of the first pulse in the stride (typically zero unless we are
242
+ using pulse-skipping and the events do not begin with the first pulse in the
243
+ stride).
244
+ pulse_period:
245
+ Period of the source pulses, i.e., time between consecutive pulse starts.
246
+ """
247
+ if da.bins is None:
248
+ # 'time_of_flight' is the canonical name in NXmonitor, but in some files, it
249
+ # may be called 'tof'.
250
+ key = next(iter(set(da.coords.keys()) & {"time_of_flight", "tof"}))
251
+ toa = da.coords[key]
252
+ else:
253
+ # To unwrap the time of arrival, we want to add the event_time_zero to the
254
+ # event_time_offset. However, we do not really care about the exact datetimes,
255
+ # we just want to know the offsets with respect to the start of the run.
256
+ # Hence we use the smallest event_time_zero as the time origin.
257
+ time_zero = da.coords["event_time_zero"] - da.coords["event_time_zero"].min()
258
+ coord = da.bins.coords["event_time_offset"]
259
+ unit = elem_unit(coord)
260
+ toa = (
261
+ coord
262
+ + time_zero.to(dtype=float, unit=unit, copy=False)
263
+ - (offset * pulse_period).to(unit=unit, copy=False)
264
+ )
265
+ return UnwrappedTimeOfArrival(toa)
266
+
267
+
268
+ def unwrapped_time_of_arrival_minus_frame_pivot_time(
269
+ toa: UnwrappedTimeOfArrival, pivot_time: PivotTimeAtDetector
270
+ ) -> UnwrappedTimeOfArrivalMinusPivotTime:
271
+ """
272
+ Compute the time of arrival of the neutron at the detector, unwrapped at the pulse
273
+ period, minus the start time of the frame.
274
+ We subtract the start time of the frame so that we can use a modulo operation to
275
+ wrap the time of arrival at the frame period in the case of pulse-skipping.
276
+
277
+ Parameters
278
+ ----------
279
+ toa:
280
+ Time of arrival of the neutron at the detector, unwrapped at the pulse period.
281
+ pivot_time:
282
+ Pivot time at the detector, i.e., the time of the start of the frame at the
283
+ detector.
284
+ """
285
+ # Order of operation to preserve dimension order
286
+ return UnwrappedTimeOfArrivalMinusPivotTime(
287
+ -pivot_time.to(unit=elem_unit(toa), copy=False) + toa
288
+ )
289
+
290
+
291
+ def time_of_arrival_minus_pivot_time_modulo_period(
292
+ toa_minus_pivot_time: UnwrappedTimeOfArrivalMinusPivotTime,
293
+ frame_period: FramePeriod,
294
+ ) -> TimeOfArrivalMinusPivotTimeModuloPeriod:
295
+ """
296
+ Compute the time of arrival of the neutron at the detector, unwrapped at the pulse
297
+ period, minus the start time of the frame, modulo the frame period.
298
+
299
+ Parameters
300
+ ----------
301
+ toa_minus_pivot_time:
302
+ Time of arrival of the neutron at the detector, unwrapped at the pulse period,
303
+ minus the start time of the frame.
304
+ frame_period:
305
+ Period of the frame, i.e., time between the start of two consecutive frames.
306
+ """
307
+ return TimeOfArrivalMinusPivotTimeModuloPeriod(
308
+ toa_minus_pivot_time
309
+ % frame_period.to(unit=elem_unit(toa_minus_pivot_time), copy=False)
310
+ )
311
+
312
+
313
+ def time_of_arrival_folded_by_frame(
314
+ toa: TimeOfArrivalMinusPivotTimeModuloPeriod,
315
+ pivot_time: PivotTimeAtDetector,
316
+ ) -> FrameFoldedTimeOfArrival:
317
+ """
318
+ The time of arrival of the neutron at the detector, folded by the frame period.
319
+
320
+ Parameters
321
+ ----------
322
+ toa:
323
+ Time of arrival of the neutron at the detector, unwrapped at the pulse period,
324
+ minus the start time of the frame, modulo the frame period.
325
+ pivot_time:
326
+ Pivot time at the detector, i.e., the time of the start of the frame at the
327
+ detector.
328
+ """
329
+ return FrameFoldedTimeOfArrival(
330
+ toa + pivot_time.to(unit=elem_unit(toa), copy=False)
331
+ )
332
+
333
+
334
+ def time_of_flight_data(
335
+ da: RawData,
336
+ lookup: MaskedTimeOfFlightLookupTable,
337
+ ltotal: Ltotal,
338
+ toas: FrameFoldedTimeOfArrival,
339
+ ) -> TofData:
340
+ """
341
+ Convert the time-of-arrival data to time-of-flight data using a lookup table.
342
+ The output data will have a time-of-flight coordinate.
343
+
344
+ Parameters
345
+ ----------
346
+ da:
347
+ Raw detector data loaded from a NeXus file, e.g., NXdetector containing
348
+ NXevent_data.
349
+ lookup:
350
+ Lookup table giving time-of-flight as a function of distance and time of
351
+ arrival.
352
+ ltotal:
353
+ Total length of the flight path from the source to the detector.
354
+ toas:
355
+ Time of arrival of the neutron at the detector, folded by the frame period.
356
+ """
357
+ from scipy.interpolate import RegularGridInterpolator
358
+
359
+ # TODO: to make use of multi-threading, we could write our own interpolator.
360
+ # This should be simple enough as we are making the bins linspace, so computing
361
+ # bin indices is fast.
362
+ f = RegularGridInterpolator(
363
+ (
364
+ lookup.coords["toa"].to(unit=elem_unit(toas), copy=False).values,
365
+ lookup.coords["distance"].to(unit=ltotal.unit, copy=False).values,
366
+ ),
367
+ lookup.data.to(unit=elem_unit(toas), copy=False).values.T,
368
+ method="linear",
369
+ bounds_error=False,
370
+ )
371
+
372
+ if da.bins is not None:
373
+ ltotal = sc.bins_like(toas, ltotal).bins.constituents["data"]
374
+ toas = toas.bins.constituents["data"]
375
+
376
+ tofs = sc.array(
377
+ dims=toas.dims, values=f((toas.values, ltotal.values)), unit=elem_unit(toas)
378
+ )
379
+
380
+ if da.bins is not None:
381
+ parts = da.bins.constituents
382
+ parts["data"] = tofs
383
+ out = da.bins.assign_coords(tof=_bins_no_validate(**parts))
384
+ else:
385
+ out = da.assign_coords(tof=tofs)
386
+
387
+ return TofData(out)
388
+
389
+
390
+ def resample_tof_data(da: TofData) -> ResampledTofData:
391
+ """
392
+ Histogrammed data that has been converted to `tof` will typically have
393
+ unsorted bin edges (due to either wrapping of `time_of_flight` or wavelength
394
+ overlap between subframes).
395
+ This function re-histograms the data to ensure that the bin edges are sorted.
396
+ It makes use of the ``to_events`` helper which generates a number of events in each
397
+ bin with a uniform distribution. The new events are then histogrammed using a set of
398
+ sorted bin edges.
399
+
400
+ WARNING:
401
+ This function is highly experimental, has limitations and should be used with
402
+ caution. It is a workaround to the issue that rebinning data with unsorted bin
403
+ edges is not supported in scipp.
404
+ As such, this function is not part of the default set of providers, and needs to be
405
+ inserted manually into the workflow.
406
+
407
+ Parameters
408
+ ----------
409
+ da:
410
+ Histogrammed data with the time-of-flight coordinate.
411
+ """
412
+ dim = next(iter(set(da.dims) & {"time_of_flight", "tof"}))
413
+ events = to_events(da.rename_dims({dim: "tof"}), "event")
414
+
415
+ # Define a new bin width, close to the original bin width.
416
+ # TODO: this could be a workflow parameter
417
+ coord = da.coords["tof"]
418
+ bin_width = (coord[dim, 1:] - coord[dim, :-1]).nanmedian()
419
+ rehist = events.hist(tof=bin_width)
420
+ for key, var in da.coords.items():
421
+ if dim not in var.dims:
422
+ rehist.coords[key] = var
423
+ return ResampledTofData(rehist)
424
+
425
+
426
+ def default_parameters() -> dict:
427
+ """
428
+ Default parameters of the time-of-flight workflow.
429
+ """
430
+ return {
431
+ PulsePeriod: 1.0 / sc.scalar(14.0, unit="Hz"),
432
+ PulseStride: 1,
433
+ PulseStrideOffset: 0,
434
+ DistanceResolution: sc.scalar(0.1, unit="m"),
435
+ TimeOfArrivalResolution: 500,
436
+ LookupTableRelativeErrorThreshold: 0.1,
437
+ }
438
+
439
+
440
+ def providers() -> tuple[Callable]:
441
+ """
442
+ Providers of the time-of-flight workflow.
443
+ """
444
+ return (
445
+ compute_tof_lookup_table,
446
+ extract_ltotal,
447
+ find_fastest_neutron,
448
+ frame_period,
449
+ masked_tof_lookup_table,
450
+ pivot_time_at_detector,
451
+ time_of_arrival_folded_by_frame,
452
+ time_of_arrival_minus_pivot_time_modulo_period,
453
+ time_of_flight_data,
454
+ unwrapped_time_of_arrival,
455
+ unwrapped_time_of_arrival_minus_frame_pivot_time,
456
+ )
457
+
458
+
459
+ class TofWorkflow:
460
+ """
461
+ Helper class to build a time-of-flight workflow and cache the expensive part of the
462
+ computation: running the simulation and building the lookup table.
463
+
464
+ Parameters
465
+ ----------
466
+ simulated_neutrons:
467
+ Results of a time-of-flight simulation used to create a lookup table.
468
+ The results should be a flat table with columns for time-of-arrival, speed,
469
+ wavelength, and weight.
470
+ ltotal_range:
471
+ Range of total flight path lengths from the source to the detector.
472
+ This is used to create the lookup table to compute the neutron
473
+ time-of-flight.
474
+ Note that the resulting table will extend slightly beyond this range, as the
475
+ supplied range is not necessarily a multiple of the distance resolution.
476
+ pulse_stride:
477
+ Stride of used pulses. Usually 1, but may be a small integer when
478
+ pulse-skipping.
479
+ pulse_stride_offset:
480
+ Integer offset of the first pulse in the stride (typically zero unless we
481
+ are using pulse-skipping and the events do not begin with the first pulse in
482
+ the stride).
483
+ distance_resolution:
484
+ Resolution of the distance axis in the lookup table.
485
+ Should be a single scalar value with a unit of length.
486
+ This is typically of the order of 1-10 cm.
487
+ toa_resolution:
488
+ Resolution of the time of arrival axis in the lookup table.
489
+ Can be an integer (number of bins) or a sc.Variable (bin width).
490
+ error_threshold:
491
+ Threshold for the variance of the projected time-of-flight above which
492
+ regions are masked.
493
+ """
494
+
495
+ def __init__(
496
+ self,
497
+ simulated_neutrons: SimulationResults,
498
+ ltotal_range: LtotalRange,
499
+ pulse_stride: PulseStride | None = None,
500
+ pulse_stride_offset: PulseStrideOffset | None = None,
501
+ distance_resolution: DistanceResolution | None = None,
502
+ toa_resolution: TimeOfArrivalResolution | None = None,
503
+ error_threshold: LookupTableRelativeErrorThreshold | None = None,
504
+ ):
505
+ import sciline as sl
506
+
507
+ self.pipeline = sl.Pipeline(providers())
508
+ self.pipeline[SimulationResults] = simulated_neutrons
509
+ self.pipeline[LtotalRange] = ltotal_range
510
+
511
+ params = default_parameters()
512
+ self.pipeline[PulsePeriod] = params[PulsePeriod]
513
+ self.pipeline[PulseStride] = pulse_stride or params[PulseStride]
514
+ self.pipeline[PulseStrideOffset] = (
515
+ pulse_stride_offset or params[PulseStrideOffset]
516
+ )
517
+ self.pipeline[DistanceResolution] = (
518
+ distance_resolution or params[DistanceResolution]
519
+ )
520
+ self.pipeline[TimeOfArrivalResolution] = (
521
+ toa_resolution or params[TimeOfArrivalResolution]
522
+ )
523
+ self.pipeline[LookupTableRelativeErrorThreshold] = (
524
+ error_threshold or params[LookupTableRelativeErrorThreshold]
525
+ )
526
+
527
+ def cache_results(
528
+ self,
529
+ results=(SimulationResults, MaskedTimeOfFlightLookupTable, FastestNeutron),
530
+ ) -> None:
531
+ """
532
+ Cache a list of (usually expensive to compute) intermediate results of the
533
+ time-of-flight workflow.
534
+
535
+ Parameters
536
+ ----------
537
+ results:
538
+ List of results to cache.
539
+ """
540
+ for t in results:
541
+ self.pipeline[t] = self.pipeline.compute(t)