essreduce 25.5.2__py3-none-any.whl → 25.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,57 +6,54 @@ Utilities for computing real neutron time-of-flight from chopper settings and
6
6
  neutron time-of-arrival at the detectors.
7
7
  """
8
8
 
9
- from .eto_to_tof import (
10
- default_parameters,
11
- providers,
12
- resample_detector_time_of_flight_data,
13
- resample_monitor_time_of_flight_data,
9
+ from ..nexus.types import DiskChoppers
10
+ from .eto_to_tof import providers
11
+ from .lut import (
12
+ DistanceResolution,
13
+ LookupTableRelativeErrorThreshold,
14
+ LtotalRange,
15
+ NumberOfSimulatedNeutrons,
16
+ PulsePeriod,
17
+ PulseStride,
18
+ SimulationResults,
19
+ SimulationSeed,
20
+ SourcePosition,
21
+ TimeResolution,
22
+ TofLookupTableWorkflow,
23
+ simulate_chopper_cascade_using_tof,
14
24
  )
15
- from .simulation import simulate_beamline
16
- from .to_events import to_events
17
25
  from .types import (
18
26
  DetectorLtotal,
19
27
  DetectorTofData,
20
- DistanceResolution,
21
- LookupTableRelativeErrorThreshold,
22
- LtotalRange,
23
28
  MonitorLtotal,
24
29
  MonitorTofData,
25
- PulsePeriod,
26
- PulseStride,
27
30
  PulseStrideOffset,
28
- ResampledDetectorTofData,
29
- ResampledMonitorTofData,
30
- SimulationResults,
31
31
  TimeOfFlightLookupTable,
32
32
  TimeOfFlightLookupTableFilename,
33
- TimeResolution,
34
33
  )
35
- from .workflow import GenericTofWorkflow, TofLutProvider
34
+ from .workflow import GenericTofWorkflow
36
35
 
37
36
  __all__ = [
38
37
  "DetectorLtotal",
39
38
  "DetectorTofData",
39
+ "DiskChoppers",
40
40
  "DistanceResolution",
41
41
  "GenericTofWorkflow",
42
42
  "LookupTableRelativeErrorThreshold",
43
43
  "LtotalRange",
44
44
  "MonitorLtotal",
45
45
  "MonitorTofData",
46
+ "NumberOfSimulatedNeutrons",
46
47
  "PulsePeriod",
47
48
  "PulseStride",
48
49
  "PulseStrideOffset",
49
- "ResampledDetectorTofData",
50
- "ResampledMonitorTofData",
51
50
  "SimulationResults",
51
+ "SimulationSeed",
52
+ "SourcePosition",
52
53
  "TimeOfFlightLookupTable",
53
54
  "TimeOfFlightLookupTableFilename",
54
55
  "TimeResolution",
55
- "TofLutProvider",
56
- "default_parameters",
56
+ "TofLookupTableWorkflow",
57
57
  "providers",
58
- "resample_detector_time_of_flight_data",
59
- "resample_monitor_time_of_flight_data",
60
- "simulate_beamline",
61
- "to_events",
58
+ "simulate_chopper_cascade_using_tof",
62
59
  ]
@@ -27,284 +27,17 @@ from ..nexus.types import (
27
27
  MonitorType,
28
28
  RunType,
29
29
  )
30
- from .to_events import to_events
30
+ from .resample import rebin_strictly_increasing
31
31
  from .types import (
32
32
  DetectorLtotal,
33
33
  DetectorTofData,
34
- DistanceResolution,
35
- LookupTableRelativeErrorThreshold,
36
- LtotalRange,
37
34
  MonitorLtotal,
38
35
  MonitorTofData,
39
- PulsePeriod,
40
- PulseStride,
41
36
  PulseStrideOffset,
42
- ResampledDetectorTofData,
43
- ResampledMonitorTofData,
44
- SimulationResults,
45
37
  TimeOfFlightLookupTable,
46
- TimeResolution,
47
38
  )
48
39
 
49
40
 
50
- def _mask_large_uncertainty(table: sc.DataArray, error_threshold: float):
51
- """
52
- Mask regions with large uncertainty with NaNs.
53
- The values are modified in place in the input table.
54
-
55
- Parameters
56
- ----------
57
- table:
58
- Lookup table with time-of-flight as a function of distance and time-of-arrival.
59
- error_threshold:
60
- Threshold for the relative standard deviation (coefficient of variation) of the
61
- projected time-of-flight above which values are masked.
62
- """
63
- # Finally, mask regions with large uncertainty with NaNs.
64
- relative_error = sc.stddevs(table.data) / sc.values(table.data)
65
- mask = relative_error > sc.scalar(error_threshold)
66
- # Use numpy for indexing as table is 2D
67
- table.values[mask.values] = np.nan
68
-
69
-
70
- def _compute_mean_tof_in_distance_range(
71
- simulation: SimulationResults,
72
- distance_bins: sc.Variable,
73
- time_bins: sc.Variable,
74
- distance_unit: str,
75
- time_unit: str,
76
- frame_period: sc.Variable,
77
- time_bins_half_width: sc.Variable,
78
- ) -> sc.DataArray:
79
- """
80
- Compute the mean time-of-flight inside event_time_offset bins for a given range of
81
- distances.
82
-
83
- Parameters
84
- ----------
85
- simulation:
86
- Results of a time-of-flight simulation used to create a lookup table.
87
- distance_bins:
88
- Bin edges for the distance axis in the lookup table.
89
- time_bins:
90
- Bin edges for the event_time_offset axis in the lookup table.
91
- distance_unit:
92
- Unit of the distance axis.
93
- time_unit:
94
- Unit of the event_time_offset axis.
95
- frame_period:
96
- Period of the source pulses, i.e., time between consecutive pulse starts.
97
- time_bins_half_width:
98
- Half width of the time bins in the event_time_offset axis.
99
- """
100
- simulation_distance = simulation.distance.to(unit=distance_unit)
101
- distances = sc.midpoints(distance_bins)
102
- # Compute arrival and flight times for all neutrons
103
- toas = simulation.time_of_arrival + (distances / simulation.speed).to(
104
- unit=time_unit, copy=False
105
- )
106
- dist = distances + simulation_distance
107
- tofs = dist * (sc.constants.m_n / sc.constants.h) * simulation.wavelength
108
-
109
- data = sc.DataArray(
110
- data=sc.broadcast(simulation.weight, sizes=toas.sizes),
111
- coords={
112
- "toa": toas,
113
- "tof": tofs.to(unit=time_unit, copy=False),
114
- "distance": dist,
115
- },
116
- ).flatten(to="event")
117
-
118
- # Add the event_time_offset coordinate, wrapped to the frame_period
119
- data.coords['event_time_offset'] = data.coords['toa'] % frame_period
120
-
121
- # Because we staggered the mesh by half a bin width, we want the values above
122
- # the last bin edge to wrap around to the first bin.
123
- # Technically, those values should end up between -0.5*bin_width and 0, but
124
- # a simple modulo also works here because even if they end up between 0 and
125
- # 0.5*bin_width, we are (below) computing the mean between -0.5*bin_width and
126
- # 0.5*bin_width and it yields the same result.
127
- # data.coords['event_time_offset'] %= pulse_period - time_bins_half_width
128
- data.coords['event_time_offset'] %= frame_period - time_bins_half_width
129
-
130
- binned = data.bin(
131
- distance=distance_bins + simulation_distance, event_time_offset=time_bins
132
- )
133
-
134
- # Weighted mean of tof inside each bin
135
- mean_tof = (
136
- binned.bins.data * binned.bins.coords["tof"]
137
- ).bins.sum() / binned.bins.sum()
138
- # Compute the variance of the tofs to track regions with large uncertainty
139
- variance = (
140
- binned.bins.data * (binned.bins.coords["tof"] - mean_tof) ** 2
141
- ).bins.sum() / binned.bins.sum()
142
-
143
- mean_tof.variances = variance.values
144
- return mean_tof
145
-
146
-
147
- def compute_tof_lookup_table(
148
- simulation: SimulationResults,
149
- ltotal_range: LtotalRange,
150
- distance_resolution: DistanceResolution,
151
- time_resolution: TimeResolution,
152
- pulse_period: PulsePeriod,
153
- pulse_stride: PulseStride,
154
- error_threshold: LookupTableRelativeErrorThreshold,
155
- ) -> TimeOfFlightLookupTable:
156
- """
157
- Compute a lookup table for time-of-flight as a function of distance and
158
- time-of-arrival.
159
-
160
- Parameters
161
- ----------
162
- simulation:
163
- Results of a time-of-flight simulation used to create a lookup table.
164
- The results should be a flat table with columns for time-of-arrival, speed,
165
- wavelength, and weight.
166
- ltotal_range:
167
- Range of total flight path lengths from the source to the detector.
168
- distance_resolution:
169
- Resolution of the distance axis in the lookup table.
170
- time_resolution:
171
- Resolution of the time-of-arrival axis in the lookup table. Must be an integer.
172
- pulse_period:
173
- Period of the source pulses, i.e., time between consecutive pulse starts.
174
- pulse_stride:
175
- Stride of used pulses. Usually 1, but may be a small integer when
176
- pulse-skipping.
177
- error_threshold:
178
- Threshold for the relative standard deviation (coefficient of variation) of the
179
- projected time-of-flight above which values are masked.
180
-
181
- Notes
182
- -----
183
-
184
- Below are some details about the binning and wrapping around frame period in the
185
- time dimension.
186
-
187
- We have some simulated ``toa`` (events) from a Tof/McStas simulation.
188
- Those are absolute ``toa``, unwrapped.
189
- First we compute the usual ``event_time_offset = toa % frame_period``.
190
-
191
- Now, we want to ensure periodic boundaries. If we make a bin centered around 0,
192
- and a bin centered around 71ms: the first bin will use events between 0 and
193
- ``0.5 * dt`` (where ``dt`` is the bin width).
194
- The last bin will use events between ``frame_period - 0.5*dt`` and
195
- ``frame_period + 0.5 * dt``. So when we compute the mean inside those two bins,
196
- they will not yield the same results.
197
- It is as if the first bin is missing the events it should have between
198
- ``-0.5 * dt`` and 0 (because of the modulo we computed above).
199
-
200
- To fix this, we do not make a last bin around 71ms (the bins stop at
201
- ``frame_period - 0.5*dt``). Instead, we compute modulo a second time,
202
- but this time using ``event_time_offset %= (frame_period - 0.5*dt)``.
203
- (we cannot directly do ``event_time_offset = toa % (frame_period - 0.5*dt)`` in a
204
- single step because it would introduce a gradual shift,
205
- as the pulse number increases).
206
-
207
- This second modulo effectively takes all the events that would have gone in the
208
- last bin (between ``frame_period - 0.5*dt`` and ``frame_period``) and puts them in
209
- the first bin. Instead of placing them between ``-0.5*dt`` and 0,
210
- it places them between 0 and ``0.5*dt``, but this does not really matter,
211
- because we then take the mean inside the first bin.
212
- Whether the events are on the left or right side of zero does not matter.
213
-
214
- Finally, we make a copy of the left edge, and append it to the right of the table,
215
- thus ensuring that the values on the right edge are strictly the same as on the
216
- left edge.
217
- """
218
- distance_unit = "m"
219
- time_unit = simulation.time_of_arrival.unit
220
- res = distance_resolution.to(unit=distance_unit)
221
- pulse_period = pulse_period.to(unit=time_unit)
222
- frame_period = pulse_period * pulse_stride
223
-
224
- min_dist, max_dist = (
225
- x.to(unit=distance_unit) - simulation.distance.to(unit=distance_unit)
226
- for x in ltotal_range
227
- )
228
- # We need to bin the data below, to compute the weighted mean of the wavelength.
229
- # This results in data with bin edges.
230
- # However, the 2d interpolator expects bin centers.
231
- # We want to give the 2d interpolator a table that covers the requested range,
232
- # hence we need to extend the range by at least half a resolution in each direction.
233
- # Then, we make the choice that the resolution in distance is the quantity that
234
- # should be preserved. Because the difference between min and max distance is
235
- # not necessarily an integer multiple of the resolution, we need to add a pad to
236
- # ensure that the last bin is not cut off. We want the upper edge to be higher than
237
- # the maximum distance, hence we pad with an additional 1.5 x resolution.
238
- pad = 2.0 * res
239
- distance_bins = sc.arange('distance', min_dist - pad, max_dist + pad, res)
240
-
241
- # Create some time bins for event_time_offset.
242
- # We want our final table to strictly cover the range [0, frame_period].
243
- # However, binning the data associates mean values inside the bins to the bin
244
- # centers. Instead, we stagger the mesh by half a bin width so we are computing
245
- # values for the final mesh edges (the bilinear interpolation needs values on the
246
- # edges/corners).
247
- nbins = int(frame_period / time_resolution.to(unit=time_unit)) + 1
248
- time_bins = sc.linspace(
249
- 'event_time_offset', 0.0, frame_period.value, nbins + 1, unit=pulse_period.unit
250
- )
251
- time_bins_half_width = 0.5 * (time_bins[1] - time_bins[0])
252
- time_bins -= time_bins_half_width
253
-
254
- # To avoid a too large RAM usage, we compute the table in chunks, and piece them
255
- # together at the end.
256
- ndist = len(distance_bins) - 1
257
- max_size = 2e7
258
- total_size = ndist * len(simulation.time_of_arrival)
259
- nchunks = total_size / max_size
260
- chunk_size = int(ndist / nchunks) + 1
261
- pieces = []
262
- for i in range(int(nchunks) + 1):
263
- dist_edges = distance_bins[i * chunk_size : (i + 1) * chunk_size + 1]
264
-
265
- pieces.append(
266
- _compute_mean_tof_in_distance_range(
267
- simulation=simulation,
268
- distance_bins=dist_edges,
269
- time_bins=time_bins,
270
- distance_unit=distance_unit,
271
- time_unit=time_unit,
272
- frame_period=frame_period,
273
- time_bins_half_width=time_bins_half_width,
274
- )
275
- )
276
-
277
- table = sc.concat(pieces, 'distance')
278
- table.coords["distance"] = sc.midpoints(table.coords["distance"])
279
- table.coords["event_time_offset"] = sc.midpoints(table.coords["event_time_offset"])
280
-
281
- # Copy the left edge to the right to create periodic boundary conditions
282
- table = sc.DataArray(
283
- data=sc.concat(
284
- [table.data, table.data['event_time_offset', 0]], dim='event_time_offset'
285
- ),
286
- coords={
287
- "distance": table.coords["distance"],
288
- "event_time_offset": sc.concat(
289
- [table.coords["event_time_offset"], frame_period],
290
- dim='event_time_offset',
291
- ),
292
- "pulse_period": pulse_period,
293
- "pulse_stride": sc.scalar(pulse_stride, unit=None),
294
- "distance_resolution": table.coords["distance"][1]
295
- - table.coords["distance"][0],
296
- "time_resolution": table.coords["event_time_offset"][1]
297
- - table.coords["event_time_offset"][0],
298
- "error_threshold": sc.scalar(error_threshold),
299
- },
300
- )
301
-
302
- # In-place masking for better performance
303
- _mask_large_uncertainty(table, error_threshold)
304
-
305
- return TimeOfFlightLookupTable(table)
306
-
307
-
308
41
  class TofInterpolator:
309
42
  def __init__(self, lookup: sc.DataArray, distance_unit: str, time_unit: str):
310
43
  self._distance_unit = distance_unit
@@ -362,8 +95,8 @@ def _time_of_flight_data_histogram(
362
95
  da: sc.DataArray, lookup: sc.DataArray, ltotal: sc.Variable
363
96
  ) -> sc.DataArray:
364
97
  # In NeXus, 'time_of_flight' is the canonical name in NXmonitor, but in some files,
365
- # it may be called 'tof'.
366
- key = next(iter(set(da.coords.keys()) & {"time_of_flight", "tof"}))
98
+ # it may be called 'tof' or 'frame_time'.
99
+ key = next(iter(set(da.coords.keys()) & {"time_of_flight", "tof", "frame_time"}))
367
100
  raw_eto = da.coords[key].to(dtype=float, copy=False)
368
101
  eto_unit = raw_eto.unit
369
102
  pulse_period = lookup.coords["pulse_period"].to(unit=eto_unit)
@@ -389,7 +122,9 @@ def _time_of_flight_data_histogram(
389
122
  pulse_period=pulse_period,
390
123
  )
391
124
 
392
- return rebinned.assign_coords(tof=tofs)
125
+ return rebinned.assign_coords(tof=tofs).drop_coords(
126
+ list({key} & {"time_of_flight", "frame_time"})
127
+ )
393
128
 
394
129
 
395
130
  def _guess_pulse_stride_offset(
@@ -531,7 +266,8 @@ def _time_of_flight_data_events(
531
266
 
532
267
  parts = da.bins.constituents
533
268
  parts["data"] = tofs
534
- return da.bins.assign_coords(tof=sc.bins(**parts, validate_indices=False))
269
+ result = da.bins.assign_coords(tof=sc.bins(**parts, validate_indices=False))
270
+ return result.bins.drop_coords("event_time_offset")
535
271
 
536
272
 
537
273
  def detector_ltotal_from_straight_line_approximation(
@@ -586,7 +322,8 @@ def _compute_tof_data(
586
322
  pulse_stride_offset: int,
587
323
  ) -> sc.DataArray:
588
324
  if da.bins is None:
589
- return _time_of_flight_data_histogram(da=da, lookup=lookup, ltotal=ltotal)
325
+ data = _time_of_flight_data_histogram(da=da, lookup=lookup, ltotal=ltotal)
326
+ return rebin_strictly_increasing(data, dim='tof')
590
327
  else:
591
328
  return _time_of_flight_data_events(
592
329
  da=da,
@@ -664,82 +401,11 @@ def monitor_time_of_flight_data(
664
401
  )
665
402
 
666
403
 
667
- def _resample_tof_data(da: sc.DataArray) -> sc.DataArray:
668
- """
669
- Histogrammed data that has been converted to `tof` will typically have
670
- unsorted bin edges (due to either wrapping of `time_of_flight` or wavelength
671
- overlap between subframes).
672
- This function re-histograms the data to ensure that the bin edges are sorted.
673
- It makes use of the ``to_events`` helper which generates a number of events in each
674
- bin with a uniform distribution. The new events are then histogrammed using a set of
675
- sorted bin edges.
676
-
677
- WARNING:
678
- This function is highly experimental, has limitations and should be used with
679
- caution. It is a workaround to the issue that rebinning data with unsorted bin
680
- edges is not supported in scipp.
681
- As such, this function is not part of the default set of providers, and needs to be
682
- inserted manually into the workflow.
683
-
684
- Parameters
685
- ----------
686
- da:
687
- Histogrammed data with the time-of-flight coordinate.
688
- """
689
- dim = next(iter(set(da.dims) & {"time_of_flight", "tof"}))
690
- data = da.rename_dims({dim: "tof"}).drop_coords(
691
- [name for name in da.coords if name != "tof"]
692
- )
693
- events = to_events(data, "event")
694
-
695
- # Define a new bin width, close to the original bin width.
696
- # TODO: this could be a workflow parameter
697
- coord = da.coords["tof"]
698
- bin_width = (coord[dim, 1:] - coord[dim, :-1]).nanmedian()
699
- rehist = events.hist(tof=bin_width)
700
- return rehist.assign_coords(
701
- {key: var for key, var in da.coords.items() if dim not in var.dims}
702
- )
703
-
704
-
705
- def resample_detector_time_of_flight_data(
706
- da: DetectorTofData[RunType],
707
- ) -> ResampledDetectorTofData[RunType]:
708
- """
709
- Resample the detector time-of-flight data to ensure that the bin edges are sorted.
710
- """
711
- return ResampledDetectorTofData(_resample_tof_data(da))
712
-
713
-
714
- def resample_monitor_time_of_flight_data(
715
- da: MonitorTofData[RunType, MonitorType],
716
- ) -> ResampledMonitorTofData[RunType, MonitorType]:
717
- """
718
- Resample the monitor time-of-flight data to ensure that the bin edges are sorted.
719
- """
720
- return ResampledMonitorTofData(_resample_tof_data(da))
721
-
722
-
723
- def default_parameters() -> dict:
724
- """
725
- Default parameters of the time-of-flight workflow.
726
- """
727
- return {
728
- PulsePeriod: 1.0 / sc.scalar(14.0, unit="Hz"),
729
- PulseStride: 1,
730
- PulseStrideOffset: None,
731
- DistanceResolution: sc.scalar(0.1, unit="m"),
732
- TimeResolution: sc.scalar(250.0, unit='us'),
733
- LookupTableRelativeErrorThreshold: 0.1,
734
- }
735
-
736
-
737
404
  def providers() -> tuple[Callable]:
738
405
  """
739
406
  Providers of the time-of-flight workflow.
740
407
  """
741
408
  return (
742
- compute_tof_lookup_table,
743
409
  detector_time_of_flight_data,
744
410
  monitor_time_of_flight_data,
745
411
  detector_ltotal_from_straight_line_approximation,