essreduce 25.4.0__py3-none-any.whl → 25.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,6 +11,7 @@ from collections.abc import Callable
11
11
 
12
12
  import numpy as np
13
13
  import scipp as sc
14
+ import scippneutron as scn
14
15
  from scipp._scipp.core import _bins_no_validate
15
16
  from scippneutron._utils import elem_unit
16
17
 
@@ -18,21 +19,32 @@ try:
18
19
  from .interpolator_numba import Interpolator as InterpolatorImpl
19
20
  except ImportError:
20
21
  from .interpolator_scipy import Interpolator as InterpolatorImpl
22
+
23
+ from ..nexus.types import (
24
+ CalibratedBeamline,
25
+ CalibratedMonitor,
26
+ DetectorData,
27
+ MonitorData,
28
+ MonitorType,
29
+ RunType,
30
+ )
21
31
  from .to_events import to_events
22
32
  from .types import (
33
+ DetectorLtotal,
34
+ DetectorTofData,
23
35
  DistanceResolution,
24
36
  LookupTableRelativeErrorThreshold,
25
- Ltotal,
26
37
  LtotalRange,
38
+ MonitorLtotal,
39
+ MonitorTofData,
27
40
  PulsePeriod,
28
41
  PulseStride,
29
42
  PulseStrideOffset,
30
- RawData,
31
- ResampledTofData,
43
+ ResampledDetectorTofData,
44
+ ResampledMonitorTofData,
32
45
  SimulationResults,
33
46
  TimeOfFlightLookupTable,
34
47
  TimeResolution,
35
- TofData,
36
48
  )
37
49
 
38
50
 
@@ -84,7 +96,7 @@ def _compute_mean_tof_in_distance_range(
84
96
  frame_period:
85
97
  Period of the source pulses, i.e., time between consecutive pulse starts.
86
98
  time_bins_half_width:
87
- Half the width of the time bins.
99
+ Half width of the time bins in the event_time_offset axis.
88
100
  """
89
101
  simulation_distance = simulation.distance.to(unit=distance_unit)
90
102
  distances = sc.midpoints(distance_bins)
@@ -104,8 +116,7 @@ def _compute_mean_tof_in_distance_range(
104
116
  },
105
117
  ).flatten(to="event")
106
118
 
107
- # Add the event_time_offset coordinate to the data. We first operate on the
108
- # frame period. The table will later be folded to the pulse period.
119
+ # Add the event_time_offset coordinate, wrapped to the frame_period
109
120
  data.coords['event_time_offset'] = data.coords['toa'] % frame_period
110
121
 
111
122
  # Because we staggered the mesh by half a bin width, we want the values above
@@ -134,51 +145,6 @@ def _compute_mean_tof_in_distance_range(
134
145
  return mean_tof
135
146
 
136
147
 
137
- def _fold_table_to_pulse_period(
138
- table: sc.DataArray, pulse_period: sc.Variable, pulse_stride: int
139
- ) -> sc.DataArray:
140
- """
141
- Fold the lookup table to the pulse period. We make sure the left and right edges of
142
- the table wrap around the ``event_time_offset`` dimension.
143
-
144
- Parameters
145
- ----------
146
- table:
147
- Lookup table with time-of-flight as a function of distance and time-of-arrival.
148
- pulse_period:
149
- Period of the source pulses, i.e., time between consecutive pulse starts.
150
- pulse_stride:
151
- Stride of used pulses. Usually 1, but may be a small integer when
152
- pulse-skipping.
153
- """
154
- size = table.sizes['event_time_offset']
155
- if (size % pulse_stride) != 0:
156
- raise ValueError(
157
- "TimeOfFlightLookupTable: the number of time bins must be a multiple of "
158
- f"the pulse stride, but got {size} time bins and a pulse stride of "
159
- f"{pulse_stride}."
160
- )
161
-
162
- size = size // pulse_stride
163
- out = sc.concat([table, table['event_time_offset', 0]], dim='event_time_offset')
164
- out = sc.concat(
165
- [
166
- out['event_time_offset', (i * size) : (i + 1) * size + 1]
167
- for i in range(pulse_stride)
168
- ],
169
- dim='pulse',
170
- )
171
- return out.assign_coords(
172
- event_time_offset=sc.concat(
173
- [
174
- table.coords['event_time_offset']['event_time_offset', :size],
175
- pulse_period,
176
- ],
177
- 'event_time_offset',
178
- )
179
- )
180
-
181
-
182
148
  def compute_tof_lookup_table(
183
149
  simulation: SimulationResults,
184
150
  ltotal_range: LtotalRange,
@@ -212,6 +178,43 @@ def compute_tof_lookup_table(
212
178
  error_threshold:
213
179
  Threshold for the relative standard deviation (coefficient of variation) of the
214
180
  projected time-of-flight above which values are masked.
181
+
182
+ Notes
183
+ -----
184
+
185
+ Below are some details about the binning and wrapping around frame period in the
186
+ time dimension.
187
+
188
+ We have some simulated ``toa`` (events) from a Tof/McStas simulation.
189
+ Those are absolute ``toa``, unwrapped.
190
+ First we compute the usual ``event_time_offset = toa % frame_period``.
191
+
192
+ Now, we want to ensure periodic boundaries. If we make a bin centered around 0,
193
+ and a bin centered around 71ms: the first bin will use events between 0 and
194
+ ``0.5 * dt`` (where ``dt`` is the bin width).
195
+ The last bin will use events between ``frame_period - 0.5*dt`` and
196
+ ``frame_period + 0.5 * dt``. So when we compute the mean inside those two bins,
197
+ they will not yield the same results.
198
+ It is as if the first bin is missing the events it should have between
199
+ ``-0.5 * dt`` and 0 (because of the modulo we computed above).
200
+
201
+ To fix this, we do not make a last bin around 71ms (the bins stop at
202
+ ``frame_period - 0.5*dt``). Instead, we compute modulo a second time,
203
+ but this time using ``event_time_offset %= (frame_period - 0.5*dt)``.
204
+ (we cannot directly do ``event_time_offset = toa % (frame_period - 0.5*dt)`` in a
205
+ single step because it would introduce a gradual shift,
206
+ as the pulse number increases).
207
+
208
+ This second modulo effectively takes all the events that would have gone in the
209
+ last bin (between ``frame_period - 0.5*dt`` and ``frame_period``) and puts them in
210
+ the first bin. Instead of placing them between ``-0.5*dt`` and 0,
211
+ it places them between 0 and ``0.5*dt``, but this does not really matter,
212
+ because we then take the mean inside the first bin.
213
+ Whether the events are on the left or right side of zero does not matter.
214
+
215
+ Finally, we make a copy of the left edge, and append it to the right of the table,
216
+ thus ensuring that the values on the right edge are strictly the same as on the
217
+ left edge.
215
218
  """
216
219
  distance_unit = "m"
217
220
  time_unit = simulation.time_of_arrival.unit
@@ -276,16 +279,31 @@ def compute_tof_lookup_table(
276
279
  table.coords["distance"] = sc.midpoints(table.coords["distance"])
277
280
  table.coords["event_time_offset"] = sc.midpoints(table.coords["event_time_offset"])
278
281
 
279
- table = _fold_table_to_pulse_period(
280
- table=table, pulse_period=pulse_period, pulse_stride=pulse_stride
282
+ # Copy the left edge to the right to create periodic boundary conditions
283
+ table = sc.DataArray(
284
+ data=sc.concat(
285
+ [table.data, table.data['event_time_offset', 0]], dim='event_time_offset'
286
+ ),
287
+ coords={
288
+ "distance": table.coords["distance"],
289
+ "event_time_offset": sc.concat(
290
+ [table.coords["event_time_offset"], frame_period],
291
+ dim='event_time_offset',
292
+ ),
293
+ "pulse_period": pulse_period,
294
+ "pulse_stride": sc.scalar(pulse_stride, unit=None),
295
+ "distance_resolution": table.coords["distance"][1]
296
+ - table.coords["distance"][0],
297
+ "time_resolution": table.coords["event_time_offset"][1]
298
+ - table.coords["event_time_offset"][0],
299
+ "error_threshold": sc.scalar(error_threshold),
300
+ },
281
301
  )
282
302
 
283
303
  # In-place masking for better performance
284
304
  _mask_large_uncertainty(table, error_threshold)
285
305
 
286
- return TimeOfFlightLookupTable(
287
- table.transpose(('pulse', 'distance', 'event_time_offset'))
288
- )
306
+ return TimeOfFlightLookupTable(table)
289
307
 
290
308
 
291
309
  class TofInterpolator:
@@ -293,22 +311,6 @@ class TofInterpolator:
293
311
  self._distance_unit = distance_unit
294
312
  self._time_unit = time_unit
295
313
 
296
- # In the pulse dimension, it could be that for a given event_time_offset and
297
- # distance, a tof value is finite in one pulse and NaN in the other.
298
- # When using the bilinear interpolation, even if the value of the requested
299
- # point is exactly 0 or 1 (in the case of pulse_stride=2), the interpolator
300
- # will still use all 4 corners surrounding the point. This means that if one of
301
- # the corners is NaN, the result will be NaN.
302
- # Here, we use a trick where we duplicate the lookup values in the 'pulse'
303
- # dimension so that the interpolator has values on bin edges for that dimension.
304
- # The interpolator raises an error if axes coordinates are not strictly
305
- # monotonic, so we cannot use e.g. [-0.5, 0.5, 0.5, 1.5] in the case of
306
- # pulse_stride=2. Instead we use [-0.25, 0.25, 0.75, 1.25].
307
- base_grid = np.arange(float(lookup.sizes["pulse"]))
308
- self._pulse_edges = np.sort(
309
- np.concatenate([base_grid - 0.25, base_grid + 0.25])
310
- )
311
-
312
314
  self._time_edges = (
313
315
  lookup.coords["event_time_offset"]
314
316
  .to(unit=self._time_unit, copy=False)
@@ -321,23 +323,16 @@ class TofInterpolator:
321
323
  self._interpolator = InterpolatorImpl(
322
324
  time_edges=self._time_edges,
323
325
  distance_edges=self._distance_edges,
324
- pulse_edges=self._pulse_edges,
325
- values=np.repeat(
326
- lookup.data.to(unit=self._time_unit, copy=False).values, 2, axis=0
327
- ),
326
+ values=lookup.data.to(unit=self._time_unit, copy=False).values,
328
327
  )
329
328
 
330
329
  def __call__(
331
330
  self,
332
- pulse_index: sc.Variable,
333
331
  ltotal: sc.Variable,
334
332
  event_time_offset: sc.Variable,
333
+ pulse_period: sc.Variable,
334
+ pulse_index: sc.Variable | None = None,
335
335
  ) -> sc.Variable:
336
- if pulse_index.unit not in ("", None):
337
- raise sc.UnitError(
338
- "pulse_index must have unit dimensionless or None, "
339
- f"but got unit: {pulse_index.unit}."
340
- )
341
336
  if ltotal.unit != self._distance_unit:
342
337
  raise sc.UnitError(
343
338
  f"ltotal must have unit: {self._distance_unit}, "
@@ -349,31 +344,30 @@ class TofInterpolator:
349
344
  f"but got unit: {event_time_offset.unit}."
350
345
  )
351
346
  out_dims = event_time_offset.dims
352
- pulse_index = pulse_index.values
353
347
  ltotal = ltotal.values
354
348
  event_time_offset = event_time_offset.values
355
349
 
356
350
  return sc.array(
357
351
  dims=out_dims,
358
352
  values=self._interpolator(
359
- times=event_time_offset, distances=ltotal, pulse_indices=pulse_index
353
+ times=event_time_offset,
354
+ distances=ltotal,
355
+ pulse_index=pulse_index.values if pulse_index is not None else None,
356
+ pulse_period=pulse_period.value,
360
357
  ),
361
358
  unit=self._time_unit,
362
359
  )
363
360
 
364
361
 
365
362
  def _time_of_flight_data_histogram(
366
- da: sc.DataArray,
367
- lookup: sc.DataArray,
368
- ltotal: sc.Variable,
369
- pulse_period: sc.Variable,
363
+ da: sc.DataArray, lookup: sc.DataArray, ltotal: sc.Variable
370
364
  ) -> sc.DataArray:
371
365
  # In NeXus, 'time_of_flight' is the canonical name in NXmonitor, but in some files,
372
366
  # it may be called 'tof'.
373
367
  key = next(iter(set(da.coords.keys()) & {"time_of_flight", "tof"}))
374
368
  raw_eto = da.coords[key].to(dtype=float, copy=False)
375
369
  eto_unit = raw_eto.unit
376
- pulse_period = pulse_period.to(unit=eto_unit)
370
+ pulse_period = lookup.coords["pulse_period"].to(unit=eto_unit)
377
371
 
378
372
  # In histogram mode, because there is a wrap around at the end of the pulse, we
379
373
  # need to insert a bin edge at that exact location to avoid having the last bin
@@ -386,31 +380,14 @@ def _time_of_flight_data_histogram(
386
380
  rebinned = da.rebin({key: new_bins})
387
381
  etos = rebinned.coords[key]
388
382
 
389
- # In histogram mode, the lookup table cannot have a pulse dimension because we
390
- # cannot know in the histogrammed data which pulse the events belong to.
391
- # So we merge the pulse dimension in the lookup table. A quick way to do this
392
- # is to take the mean of the data along the pulse dimension (there should
393
- # only be regions that are NaN in one pulse and finite in the other).
394
- merged = lookup.data.nanmean('pulse')
395
- dim = merged.dims[0]
396
- lookup = sc.DataArray(
397
- data=merged.fold(dim=dim, sizes={'pulse': 1, dim: merged.sizes[dim]}),
398
- coords={
399
- 'pulse': sc.arange('pulse', 1.0),
400
- 'distance': lookup.coords['distance'],
401
- 'event_time_offset': lookup.coords['event_time_offset'],
402
- },
403
- )
404
- pulse_index = sc.zeros(sizes=etos.sizes)
405
-
406
383
  # Create linear interpolator
407
384
  interp = TofInterpolator(lookup, distance_unit=ltotal.unit, time_unit=eto_unit)
408
385
 
409
386
  # Compute time-of-flight of the bin edges using the interpolator
410
387
  tofs = interp(
411
- pulse_index=pulse_index,
412
388
  ltotal=ltotal.broadcast(sizes=etos.sizes),
413
389
  event_time_offset=etos,
390
+ pulse_period=pulse_period,
414
391
  )
415
392
 
416
393
  return rebinned.assign_coords(tof=tofs)
@@ -420,6 +397,7 @@ def _guess_pulse_stride_offset(
420
397
  pulse_index: sc.Variable,
421
398
  ltotal: sc.Variable,
422
399
  event_time_offset: sc.Variable,
400
+ pulse_period: sc.Variable,
423
401
  pulse_stride: int,
424
402
  interp: TofInterpolator,
425
403
  ) -> int:
@@ -446,6 +424,8 @@ def _guess_pulse_stride_offset(
446
424
  Total length of the flight path from the source to the detector for each event.
447
425
  event_time_offset:
448
426
  Time of arrival of the neutron at the detector for each event.
427
+ pulse_period:
428
+ Period of the source pulses, i.e., time between consecutive pulse starts.
449
429
  pulse_stride:
450
430
  Stride of used pulses.
451
431
  interp:
@@ -469,7 +449,12 @@ def _guess_pulse_stride_offset(
469
449
  )
470
450
  for i in range(pulse_stride):
471
451
  pulse_inds = (pulse_index + i) % pulse_stride
472
- tofs[i] = interp(pulse_index=pulse_inds, ltotal=ltotal, event_time_offset=etos)
452
+ tofs[i] = interp(
453
+ ltotal=ltotal,
454
+ event_time_offset=etos,
455
+ pulse_index=pulse_inds,
456
+ pulse_period=pulse_period,
457
+ )
473
458
  # Find the entry in the list with the least number of nan values
474
459
  return sorted(tofs, key=lambda x: sc.isnan(tofs[x]).sum())[0]
475
460
 
@@ -478,8 +463,6 @@ def _time_of_flight_data_events(
478
463
  da: sc.DataArray,
479
464
  lookup: sc.DataArray,
480
465
  ltotal: sc.Variable,
481
- pulse_period: sc.Variable,
482
- pulse_stride: int,
483
466
  pulse_stride_offset: int,
484
467
  ) -> sc.DataArray:
485
468
  etos = da.bins.coords["event_time_offset"].to(dtype=float, copy=False)
@@ -492,20 +475,21 @@ def _time_of_flight_data_events(
492
475
  ltotal = sc.bins_like(etos, ltotal).bins.constituents["data"]
493
476
  etos = etos.bins.constituents["data"]
494
477
 
495
- # Compute a pulse index for every event: it is the index of the pulse within a
496
- # frame period. When there is no pulse skipping, those are all zero. When there is
497
- # pulse skipping, the index ranges from zero to pulse_stride - 1.
498
- if pulse_stride == 1:
499
- pulse_index = sc.zeros(sizes=etos.sizes)
500
- else:
478
+ pulse_index = None
479
+ pulse_period = lookup.coords["pulse_period"].to(unit=eto_unit)
480
+ pulse_stride = lookup.coords["pulse_stride"].value
481
+
482
+ if pulse_stride > 1:
483
+ # Compute a pulse index for every event: it is the index of the pulse within a
484
+ # frame period. The index ranges from zero to pulse_stride - 1.
501
485
  etz_unit = 'ns'
502
486
  etz = (
503
487
  da.bins.coords["event_time_zero"]
504
488
  .bins.constituents["data"]
505
489
  .to(unit=etz_unit, copy=False)
506
490
  )
507
- pulse_period = pulse_period.to(unit=etz_unit, dtype=int)
508
- frame_period = pulse_period * pulse_stride
491
+ pulse_period_ns = pulse_period.to(unit=etz_unit, dtype=int)
492
+ frame_period = pulse_period_ns * pulse_stride
509
493
  # Define a common reference time using epoch as a base, but making sure that it
510
494
  # is aligned with the pulse_period and the frame_period.
511
495
  # We need to use a global reference time instead of simply taking the minimum
@@ -513,17 +497,17 @@ def _time_of_flight_data_events(
513
497
  # may not be the first event of the first pulse for all chunks. This would lead
514
498
  # to inconsistent pulse indices.
515
499
  epoch = sc.datetime(0, unit=etz_unit)
516
- diff_to_epoch = (etz.min() - epoch) % pulse_period
500
+ diff_to_epoch = (etz.min() - epoch) % pulse_period_ns
517
501
  # Here we offset the reference by half a pulse period to avoid errors from
518
502
  # fluctuations in the event_time_zeros in the data. They are triggered by the
519
503
  # neutron source, and may not always be exactly separated by the pulse period.
520
504
  # While fluctuations will exist, they will be small, and offsetting the times
521
505
  # by half a pulse period is a simple enough fix.
522
- reference = epoch + diff_to_epoch - (pulse_period // 2)
506
+ reference = epoch + diff_to_epoch - (pulse_period_ns // 2)
523
507
  # Use in-place operations to avoid large allocations
524
508
  pulse_index = etz - reference
525
509
  pulse_index %= frame_period
526
- pulse_index //= pulse_period
510
+ pulse_index //= pulse_period_ns
527
511
 
528
512
  # Apply the pulse_stride_offset
529
513
  if pulse_stride_offset is None:
@@ -531,6 +515,7 @@ def _time_of_flight_data_events(
531
515
  pulse_index=pulse_index,
532
516
  ltotal=ltotal,
533
517
  event_time_offset=etos,
518
+ pulse_period=pulse_period,
534
519
  pulse_stride=pulse_stride,
535
520
  interp=interp,
536
521
  )
@@ -538,21 +523,86 @@ def _time_of_flight_data_events(
538
523
  pulse_index %= pulse_stride
539
524
 
540
525
  # Compute time-of-flight for all neutrons using the interpolator
541
- tofs = interp(pulse_index=pulse_index, ltotal=ltotal, event_time_offset=etos)
526
+ tofs = interp(
527
+ ltotal=ltotal,
528
+ event_time_offset=etos,
529
+ pulse_index=pulse_index,
530
+ pulse_period=pulse_period,
531
+ )
542
532
 
543
533
  parts = da.bins.constituents
544
534
  parts["data"] = tofs
545
535
  return da.bins.assign_coords(tof=_bins_no_validate(**parts))
546
536
 
547
537
 
548
- def time_of_flight_data(
549
- da: RawData,
538
+ def detector_ltotal_from_straight_line_approximation(
539
+ detector_beamline: CalibratedBeamline[RunType],
540
+ ) -> DetectorLtotal[RunType]:
541
+ """
542
+ Compute Ltotal for the detector pixels.
543
+ This is a naive straight-line approximation to Ltotal based on basic component
544
+ positions.
545
+
546
+ Parameters
547
+ ----------
548
+ detector_beamline:
549
+ Beamline data for the detector that contains the positions necessary to compute
550
+ the straight-line approximation to Ltotal (source, sample, and detector
551
+ positions).
552
+ """
553
+ graph = scn.conversion.graph.beamline.beamline(scatter=True)
554
+ return DetectorLtotal[RunType](
555
+ detector_beamline.transform_coords(
556
+ "Ltotal", graph=graph, keep_intermediate=False
557
+ ).coords["Ltotal"]
558
+ )
559
+
560
+
561
+ def monitor_ltotal_from_straight_line_approximation(
562
+ monitor_beamline: CalibratedMonitor[RunType, MonitorType],
563
+ ) -> MonitorLtotal[RunType, MonitorType]:
564
+ """
565
+ Compute Ltotal for the monitor.
566
+ This is a naive straight-line approximation to Ltotal based on basic component
567
+ positions.
568
+
569
+ Parameters
570
+ ----------
571
+ monitor_beamline:
572
+ Beamline data for the monitor that contains the positions necessary to compute
573
+ the straight-line approximation to Ltotal (source and monitor positions).
574
+ """
575
+ graph = scn.conversion.graph.beamline.beamline(scatter=False)
576
+ return MonitorLtotal[RunType, MonitorType](
577
+ monitor_beamline.transform_coords(
578
+ "Ltotal", graph=graph, keep_intermediate=False
579
+ ).coords["Ltotal"]
580
+ )
581
+
582
+
583
+ def _compute_tof_data(
584
+ da: sc.DataArray,
585
+ lookup: sc.DataArray,
586
+ ltotal: sc.Variable,
587
+ pulse_stride_offset: int,
588
+ ) -> sc.DataArray:
589
+ if da.bins is None:
590
+ return _time_of_flight_data_histogram(da=da, lookup=lookup, ltotal=ltotal)
591
+ else:
592
+ return _time_of_flight_data_events(
593
+ da=da,
594
+ lookup=lookup,
595
+ ltotal=ltotal,
596
+ pulse_stride_offset=pulse_stride_offset,
597
+ )
598
+
599
+
600
+ def detector_time_of_flight_data(
601
+ detector_data: DetectorData[RunType],
550
602
  lookup: TimeOfFlightLookupTable,
551
- ltotal: Ltotal,
552
- pulse_period: PulsePeriod,
553
- pulse_stride: PulseStride,
603
+ ltotal: DetectorLtotal[RunType],
554
604
  pulse_stride_offset: PulseStrideOffset,
555
- ) -> TofData:
605
+ ) -> DetectorTofData[RunType]:
556
606
  """
557
607
  Convert the time-of-arrival data to time-of-flight data using a lookup table.
558
608
  The output data will have a time-of-flight coordinate.
@@ -567,33 +617,55 @@ def time_of_flight_data(
567
617
  arrival.
568
618
  ltotal:
569
619
  Total length of the flight path from the source to the detector.
570
- pulse_period:
571
- Period of the source pulses, i.e., time between consecutive pulse starts.
572
- pulse_stride:
573
- Stride of used pulses. Usually 1, but may be a small integer when
574
- pulse-skipping.
575
620
  pulse_stride_offset:
576
621
  When pulse-skipping, the offset of the first pulse in the stride. This is
577
622
  typically zero but can be a small integer < pulse_stride.
578
623
  """
579
-
580
- if da.bins is None:
581
- out = _time_of_flight_data_histogram(
582
- da=da, lookup=lookup, ltotal=ltotal, pulse_period=pulse_period
624
+ return DetectorTofData[RunType](
625
+ _compute_tof_data(
626
+ da=detector_data,
627
+ lookup=lookup,
628
+ ltotal=ltotal,
629
+ pulse_stride_offset=pulse_stride_offset,
583
630
  )
584
- else:
585
- out = _time_of_flight_data_events(
586
- da=da,
631
+ )
632
+
633
+
634
+ def monitor_time_of_flight_data(
635
+ monitor_data: MonitorData[RunType, MonitorType],
636
+ lookup: TimeOfFlightLookupTable,
637
+ ltotal: MonitorLtotal[RunType, MonitorType],
638
+ pulse_stride_offset: PulseStrideOffset,
639
+ ) -> MonitorTofData[RunType, MonitorType]:
640
+ """
641
+ Convert the time-of-arrival data to time-of-flight data using a lookup table.
642
+ The output data will have a time-of-flight coordinate.
643
+
644
+ Parameters
645
+ ----------
646
+ da:
647
+ Raw monitor data loaded from a NeXus file, e.g., NXmonitor containing
648
+ NXevent_data.
649
+ lookup:
650
+ Lookup table giving time-of-flight as a function of distance and time of
651
+ arrival.
652
+ ltotal:
653
+ Total length of the flight path from the source to the monitor.
654
+ pulse_stride_offset:
655
+ When pulse-skipping, the offset of the first pulse in the stride. This is
656
+ typically zero but can be a small integer < pulse_stride.
657
+ """
658
+ return MonitorTofData[RunType, MonitorType](
659
+ _compute_tof_data(
660
+ da=monitor_data,
587
661
  lookup=lookup,
588
662
  ltotal=ltotal,
589
- pulse_period=pulse_period,
590
- pulse_stride=pulse_stride,
591
663
  pulse_stride_offset=pulse_stride_offset,
592
664
  )
593
- return TofData(out)
665
+ )
594
666
 
595
667
 
596
- def resample_tof_data(da: TofData) -> ResampledTofData:
668
+ def _resample_tof_data(da: sc.DataArray) -> sc.DataArray:
597
669
  """
598
670
  Histogrammed data that has been converted to `tof` will typically have
599
671
  unsorted bin edges (due to either wrapping of `time_of_flight` or wavelength
@@ -626,13 +698,29 @@ def resample_tof_data(da: TofData) -> ResampledTofData:
626
698
  coord = da.coords["tof"]
627
699
  bin_width = (coord[dim, 1:] - coord[dim, :-1]).nanmedian()
628
700
  rehist = events.hist(tof=bin_width)
629
- return ResampledTofData(
630
- rehist.assign_coords(
631
- {key: var for key, var in da.coords.items() if dim not in var.dims}
632
- )
701
+ return rehist.assign_coords(
702
+ {key: var for key, var in da.coords.items() if dim not in var.dims}
633
703
  )
634
704
 
635
705
 
706
+ def resample_detector_time_of_flight_data(
707
+ da: DetectorTofData[RunType],
708
+ ) -> ResampledDetectorTofData[RunType]:
709
+ """
710
+ Resample the detector time-of-flight data to ensure that the bin edges are sorted.
711
+ """
712
+ return ResampledDetectorTofData(_resample_tof_data(da))
713
+
714
+
715
+ def resample_monitor_time_of_flight_data(
716
+ da: MonitorTofData[RunType, MonitorType],
717
+ ) -> ResampledMonitorTofData[RunType, MonitorType]:
718
+ """
719
+ Resample the monitor time-of-flight data to ensure that the bin edges are sorted.
720
+ """
721
+ return ResampledMonitorTofData(_resample_tof_data(da))
722
+
723
+
636
724
  def default_parameters() -> dict:
637
725
  """
638
726
  Default parameters of the time-of-flight workflow.
@@ -651,4 +739,10 @@ def providers() -> tuple[Callable]:
651
739
  """
652
740
  Providers of the time-of-flight workflow.
653
741
  """
654
- return (compute_tof_lookup_table, time_of_flight_data)
742
+ return (
743
+ compute_tof_lookup_table,
744
+ detector_time_of_flight_data,
745
+ monitor_time_of_flight_data,
746
+ detector_ltotal_from_straight_line_approximation,
747
+ monitor_ltotal_from_straight_line_approximation,
748
+ )
@@ -28,9 +28,9 @@ class FakeBeamline:
28
28
  import math
29
29
 
30
30
  import tof as tof_pkg
31
- from tof.facilities.ess_pulse import pulse
31
+ from tof.facilities.ess_pulse import frequency as ess_frequency
32
32
 
33
- self.frequency = pulse.frequency
33
+ self.frequency = ess_frequency
34
34
  self.npulses = math.ceil((run_length * self.frequency).to(unit="").value)
35
35
  self.events_per_pulse = events_per_pulse
36
36
  if source_position is None: