essreduce 25.2.2__py3-none-any.whl → 25.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ess/reduce/live/raw.py CHANGED
@@ -639,7 +639,7 @@ def position_with_noisy_replicas(
639
639
  # "Paint" the short array of noise on top of the (replicated) position data.
640
640
  noise = sc.concat(
641
641
  [position_noise] * ceil(size / position_noise.size), dim=noise_dim
642
- )[:size].fold(dim=noise_dim, sizes={'replica': replicas, position.dim: -1})
642
+ )[:size].fold(dim=noise_dim, sizes={'replica': replicas, **position.sizes})
643
643
  return sc.concat([position, noise + position], dim='replica')
644
644
 
645
645
 
@@ -8,14 +8,16 @@ from collections.abc import Generator, Mapping
8
8
  from contextlib import AbstractContextManager, contextmanager, nullcontext
9
9
  from dataclasses import dataclass
10
10
  from math import prod
11
- from typing import cast
11
+ from typing import TypeVar, cast
12
12
 
13
13
  import scipp as sc
14
14
  import scippnexus as snx
15
15
 
16
16
  from ..logging import get_logger
17
17
  from .types import (
18
+ Beamline,
18
19
  FilePath,
20
+ Measurement,
19
21
  NeXusAllLocationSpec,
20
22
  NeXusEntryName,
21
23
  NeXusFile,
@@ -27,6 +29,8 @@ from .types import (
27
29
  class NoNewDefinitionsType: ...
28
30
 
29
31
 
32
+ _Model = TypeVar('_Model', Beamline, Measurement)
33
+
30
34
  NoNewDefinitions = NoNewDefinitionsType()
31
35
 
32
36
 
@@ -74,6 +78,18 @@ def load_all_components(
74
78
  return components
75
79
 
76
80
 
81
+ def load_metadata(
82
+ file_path: FilePath | NeXusFile | NeXusGroup,
83
+ model: type[_Model],
84
+ *,
85
+ entry_name: NeXusEntryName | None = None,
86
+ definitions: Mapping | NoNewDefinitionsType = NoNewDefinitions,
87
+ ) -> _Model:
88
+ with _open_nexus_file(file_path, definitions=definitions) as f:
89
+ entry = _unique_child_group(f, snx.NXentry, entry_name)
90
+ return model.from_nexus_entry(entry)
91
+
92
+
77
93
  def compute_component_position(dg: sc.DataGroup) -> sc.DataGroup:
78
94
  # In some downstream packages we use some of the Nexus components which attempt
79
95
  # to compute positions without having actual Nexus data defining depends_on chains.
ess/reduce/nexus/types.py CHANGED
@@ -7,6 +7,7 @@ from typing import Any, BinaryIO, Generic, NewType, TypeVar
7
7
  import sciline
8
8
  import scipp as sc
9
9
  import scippnexus as snx
10
+ from scippneutron import metadata as scn_meta
10
11
 
11
12
  FilePath = NewType('FilePath', Path)
12
13
  """Full path to a NeXus file on disk."""
@@ -177,6 +178,13 @@ Component = TypeVar(
177
178
  UniqueComponent = TypeVar('UniqueComponent', snx.NXsample, snx.NXsource)
178
179
  """Components that can be identified by their type as there will only be one."""
179
180
 
181
+ Beamline = scn_meta.Beamline
182
+ """Beamline metadata."""
183
+ Measurement = scn_meta.Measurement
184
+ """measurement metadata."""
185
+ Source = scn_meta.Source
186
+ """Neutron source metadata."""
187
+
180
188
 
181
189
  class NeXusName(sciline.Scope[Component, str], str):
182
190
  """Name of a component in a NeXus file."""
@@ -19,6 +19,7 @@ from . import _nexus_loader as nexus
19
19
  from .types import (
20
20
  AllNeXusComponents,
21
21
  Analyzers,
22
+ Beamline,
22
23
  CalibratedBeamline,
23
24
  CalibratedDetector,
24
25
  CalibratedMonitor,
@@ -29,6 +30,7 @@ from .types import (
29
30
  DetectorPositionOffset,
30
31
  Filename,
31
32
  GravityVector,
33
+ Measurement,
32
34
  MonitorData,
33
35
  MonitorPositionOffset,
34
36
  MonitorType,
@@ -45,6 +47,7 @@ from .types import (
45
47
  Position,
46
48
  PreopenNeXusFile,
47
49
  RunType,
50
+ SampleRun,
48
51
  TimeInterval,
49
52
  UniqueComponent,
50
53
  )
@@ -586,6 +589,18 @@ def _add_variances(da: sc.DataArray) -> sc.DataArray:
586
589
  return out
587
590
 
588
591
 
592
+ def load_beamline_metadata_from_nexus(file_spec: NeXusFileSpec[SampleRun]) -> Beamline:
593
+ """Load beamline metadata from a sample NeXus file."""
594
+ return nexus.load_metadata(file_spec.value, Beamline)
595
+
596
+
597
+ def load_measurement_metadata_from_nexus(
598
+ file_spec: NeXusFileSpec[SampleRun],
599
+ ) -> Measurement:
600
+ """Load measurement metadata from a sample NeXus file."""
601
+ return nexus.load_metadata(file_spec.value, Measurement)
602
+
603
+
589
604
  definitions = snx.base_definitions()
590
605
  definitions["NXdetector"] = _StrippedDetector
591
606
  definitions["NXmonitor"] = _StrippedMonitor
@@ -631,6 +646,11 @@ _chopper_providers = (parse_disk_choppers,)
631
646
 
632
647
  _analyzer_providers = (parse_analyzers,)
633
648
 
649
+ _metadata_providers = (
650
+ load_beamline_metadata_from_nexus,
651
+ load_measurement_metadata_from_nexus,
652
+ )
653
+
634
654
 
635
655
  def LoadMonitorWorkflow() -> sciline.Pipeline:
636
656
  """Generic workflow for loading monitor data from a NeXus file."""
@@ -689,6 +709,7 @@ def GenericNeXusWorkflow(
689
709
  *_detector_providers,
690
710
  *_chopper_providers,
691
711
  *_analyzer_providers,
712
+ *_metadata_providers,
692
713
  )
693
714
  )
694
715
  wf[DetectorBankSizes] = DetectorBankSizes({})
ess/reduce/streaming.py CHANGED
@@ -147,7 +147,7 @@ class StreamProcessor:
147
147
  *,
148
148
  dynamic_keys: tuple[sciline.typing.Key, ...],
149
149
  target_keys: tuple[sciline.typing.Key, ...],
150
- accumulators: dict[sciline.typing.Key, Accumulator, Callable[..., Accumulator]]
150
+ accumulators: dict[sciline.typing.Key, Accumulator | Callable[..., Accumulator]]
151
151
  | tuple[sciline.typing.Key, ...],
152
152
  allow_bypass: bool = False,
153
153
  ) -> None:
@@ -180,6 +180,8 @@ class StreamProcessor:
180
180
  for key in dynamic_keys:
181
181
  workflow[key] = None # hack to prune branches
182
182
 
183
+ self._dynamic_keys = set(dynamic_keys)
184
+
183
185
  # Find and pre-compute static nodes as far down the graph as possible
184
186
  # See also https://github.com/scipp/sciline/issues/148.
185
187
  nodes = _find_descendants(workflow, dynamic_keys)
@@ -194,12 +196,19 @@ class StreamProcessor:
194
196
  if isinstance(accumulators, dict)
195
197
  else {key: EternalAccumulator() for key in accumulators}
196
198
  )
199
+
200
+ # Map each accumulator to its dependent dynamic keys
201
+ graph = workflow.underlying_graph
202
+ self._accumulator_dependencies = {
203
+ acc_key: nx.ancestors(graph, acc_key) & self._dynamic_keys
204
+ for acc_key in self._accumulators
205
+ if acc_key in graph
206
+ }
207
+
197
208
  # Depending on the target_keys, some accumulators can be unused and should not
198
209
  # be computed when adding a chunk.
199
210
  self._accumulators = {
200
- key: value
201
- for key, value in self._accumulators.items()
202
- if key in self._process_chunk_workflow.underlying_graph
211
+ key: value for key, value in self._accumulators.items() if key in graph
203
212
  }
204
213
  # Create accumulators unless instances were passed. This allows for initializing
205
214
  # accumulators with arguments that depend on the workflow such as bin edges,
@@ -242,7 +251,30 @@ class StreamProcessor:
242
251
  ----------
243
252
  chunks:
244
253
  Chunks to be processed.
254
+
255
+ Raises
256
+ ------
257
+ ValueError
258
+ If non-dynamic keys are provided in chunks.
259
+ If accumulator computation requires dynamic keys not provided in chunks.
245
260
  """
261
+ non_dynamic = set(chunks) - self._dynamic_keys
262
+ if non_dynamic:
263
+ raise ValueError(
264
+ f"Can only update dynamic keys. Got non-dynamic keys: {non_dynamic}"
265
+ )
266
+
267
+ accumulators_to_update = []
268
+ for acc_key, deps in self._accumulator_dependencies.items():
269
+ if deps.isdisjoint(chunks.keys()):
270
+ continue
271
+ if not deps.issubset(chunks.keys()):
272
+ raise ValueError(
273
+ f"Accumulator '{acc_key}' requires dynamic keys "
274
+ f"{deps - chunks.keys()} not provided in the current chunk."
275
+ )
276
+ accumulators_to_update.append(acc_key)
277
+
246
278
  for key, value in chunks.items():
247
279
  self._process_chunk_workflow[key] = value
248
280
  # There can be dynamic keys that do not "terminate" in any accumulator. In
@@ -250,7 +282,7 @@ class StreamProcessor:
250
282
  # the target keys.
251
283
  if self._allow_bypass:
252
284
  self._finalize_workflow[key] = value
253
- to_accumulate = self._process_chunk_workflow.compute(self._accumulators)
285
+ to_accumulate = self._process_chunk_workflow.compute(accumulators_to_update)
254
286
  for key, processed in to_accumulate.items():
255
287
  self._accumulators[key].push(processed)
256
288
 
@@ -79,144 +79,41 @@ class FakeBeamline:
79
79
  return nx_event_data, raw_data
80
80
 
81
81
 
82
- wfm1_chopper = DiskChopper(
83
- frequency=sc.scalar(-70.0, unit="Hz"),
84
- beam_position=sc.scalar(0.0, unit="deg"),
85
- phase=sc.scalar(-47.10, unit="deg"),
86
- axle_position=sc.vector(value=[0, 0, 6.6], unit="m"),
87
- slit_begin=sc.array(
88
- dims=["cutout"],
89
- values=np.array([83.71, 140.49, 193.26, 242.32, 287.91, 330.3]) + 15.0,
90
- unit="deg",
91
- ),
92
- slit_end=sc.array(
93
- dims=["cutout"],
94
- values=np.array([94.7, 155.79, 212.56, 265.33, 314.37, 360.0]) + 15.0,
95
- unit="deg",
96
- ),
97
- slit_height=sc.scalar(10.0, unit="cm"),
98
- radius=sc.scalar(30.0, unit="cm"),
99
- )
100
-
101
- wfm2_chopper = DiskChopper(
102
- frequency=sc.scalar(-70.0, unit="Hz"),
103
- beam_position=sc.scalar(0.0, unit="deg"),
104
- phase=sc.scalar(-76.76, unit="deg"),
105
- axle_position=sc.vector(value=[0, 0, 7.1], unit="m"),
106
- slit_begin=sc.array(
107
- dims=["cutout"],
108
- values=np.array([65.04, 126.1, 182.88, 235.67, 284.73, 330.32]) + 15.0,
109
- unit="deg",
110
- ),
111
- slit_end=sc.array(
112
- dims=["cutout"],
113
- values=np.array([76.03, 141.4, 202.18, 254.97, 307.74, 360.0]) + 15.0,
114
- unit="deg",
115
- ),
116
- slit_height=sc.scalar(10.0, unit="cm"),
117
- radius=sc.scalar(30.0, unit="cm"),
118
- )
119
-
120
- foc1_chopper = DiskChopper(
121
- frequency=sc.scalar(-56.0, unit="Hz"),
122
- beam_position=sc.scalar(0.0, unit="deg"),
123
- phase=sc.scalar(-62.40, unit="deg"),
124
- axle_position=sc.vector(value=[0, 0, 8.8], unit="m"),
125
- slit_begin=sc.array(
126
- dims=["cutout"],
127
- values=np.array([74.6, 139.6, 194.3, 245.3, 294.8, 347.2]),
128
- unit="deg",
129
- ),
130
- slit_end=sc.array(
131
- dims=["cutout"],
132
- values=np.array([95.2, 162.8, 216.1, 263.1, 310.5, 371.6]),
133
- unit="deg",
134
- ),
135
- slit_height=sc.scalar(10.0, unit="cm"),
136
- radius=sc.scalar(30.0, unit="cm"),
137
- )
138
-
139
- foc2_chopper = DiskChopper(
140
- frequency=sc.scalar(-28.0, unit="Hz"),
141
- beam_position=sc.scalar(0.0, unit="deg"),
142
- phase=sc.scalar(-12.27, unit="deg"),
143
- axle_position=sc.vector(value=[0, 0, 15.9], unit="m"),
144
- slit_begin=sc.array(
145
- dims=["cutout"],
146
- values=np.array([98.0, 154.0, 206.8, 255.0, 299.0, 344.65]),
147
- unit="deg",
148
- ),
149
- slit_end=sc.array(
150
- dims=["cutout"],
151
- values=np.array([134.6, 190.06, 237.01, 280.88, 323.56, 373.76]),
152
- unit="deg",
153
- ),
154
- slit_height=sc.scalar(10.0, unit="cm"),
155
- radius=sc.scalar(30.0, unit="cm"),
156
- )
157
-
158
- pol_chopper = DiskChopper(
159
- frequency=sc.scalar(-14.0, unit="Hz"),
160
- beam_position=sc.scalar(0.0, unit="deg"),
161
- phase=sc.scalar(0.0, unit="deg"),
162
- axle_position=sc.vector(value=[0, 0, 17.0], unit="m"),
163
- slit_begin=sc.array(
164
- dims=["cutout"],
165
- values=np.array([40.0]),
166
- unit="deg",
167
- ),
168
- slit_end=sc.array(
169
- dims=["cutout"],
170
- values=np.array([240.0]),
171
- unit="deg",
172
- ),
173
- slit_height=sc.scalar(10.0, unit="cm"),
174
- radius=sc.scalar(30.0, unit="cm"),
175
- )
176
-
177
-
178
- def wfm_choppers():
179
- return {
180
- "wfm1": wfm1_chopper,
181
- "wfm2": wfm2_chopper,
182
- "foc1": foc1_chopper,
183
- "foc2": foc2_chopper,
184
- "pol": pol_chopper,
185
- }
186
-
187
-
188
82
  def psc_choppers():
189
83
  return {
190
- name: DiskChopper(
191
- frequency=ch.frequency,
192
- beam_position=ch.beam_position,
193
- phase=ch.phase,
194
- axle_position=ch.axle_position,
195
- slit_begin=ch.slit_begin[0:1],
196
- slit_end=ch.slit_end[0:1],
197
- slit_height=ch.slit_height[0:1],
198
- radius=ch.radius,
84
+ "chopper": DiskChopper(
85
+ frequency=sc.scalar(-14.0, unit="Hz"),
86
+ beam_position=sc.scalar(0.0, unit="deg"),
87
+ phase=sc.scalar(-85.0, unit="deg"),
88
+ axle_position=sc.vector(value=[0, 0, 8.0], unit="m"),
89
+ slit_begin=sc.array(dims=["cutout"], values=[0.0], unit="deg"),
90
+ slit_end=sc.array(dims=["cutout"], values=[3.0], unit="deg"),
91
+ slit_height=sc.scalar(10.0, unit="cm"),
92
+ radius=sc.scalar(30.0, unit="cm"),
199
93
  )
200
- for name, ch in wfm_choppers().items()
201
94
  }
202
95
 
203
96
 
204
- def pulse_skipping_chopper():
205
- return DiskChopper(
206
- frequency=sc.scalar(-7.0, unit="Hz"),
207
- beam_position=sc.scalar(0.0, unit="deg"),
208
- phase=sc.scalar(0.0, unit="deg"),
209
- axle_position=sc.vector(value=[0, 0, 30.0], unit="m"),
210
- slit_begin=sc.array(
211
- dims=["cutout"],
212
- values=np.array([40.0]),
213
- unit="deg",
97
+ def pulse_skipping_choppers():
98
+ return {
99
+ "chopper": DiskChopper(
100
+ frequency=sc.scalar(-14.0, unit="Hz"),
101
+ beam_position=sc.scalar(0.0, unit="deg"),
102
+ phase=sc.scalar(-35.0, unit="deg"),
103
+ axle_position=sc.vector(value=[0, 0, 8.0], unit="m"),
104
+ slit_begin=sc.array(dims=["cutout"], values=np.array([0.0]), unit="deg"),
105
+ slit_end=sc.array(dims=["cutout"], values=np.array([33.0]), unit="deg"),
106
+ slit_height=sc.scalar(10.0, unit="cm"),
107
+ radius=sc.scalar(30.0, unit="cm"),
214
108
  ),
215
- slit_end=sc.array(
216
- dims=["cutout"],
217
- values=np.array([140.0]),
218
- unit="deg",
109
+ "pulse_skipping": DiskChopper(
110
+ frequency=sc.scalar(-7.0, unit="Hz"),
111
+ beam_position=sc.scalar(0.0, unit="deg"),
112
+ phase=sc.scalar(-10.0, unit="deg"),
113
+ axle_position=sc.vector(value=[0, 0, 15.0], unit="m"),
114
+ slit_begin=sc.array(dims=["cutout"], values=np.array([0.0]), unit="deg"),
115
+ slit_end=sc.array(dims=["cutout"], values=np.array([120.0]), unit="deg"),
116
+ slit_height=sc.scalar(10.0, unit="cm"),
117
+ radius=sc.scalar(30.0, unit="cm"),
219
118
  ),
220
- slit_height=sc.scalar(10.0, unit="cm"),
221
- radius=sc.scalar(30.0, unit="cm"),
222
- )
119
+ }
@@ -32,20 +32,6 @@ from .types import (
32
32
  )
33
33
 
34
34
 
35
- def extract_ltotal(da: RawData) -> Ltotal:
36
- """
37
- Extract the total length of the flight path from the source to the detector from the
38
- detector data.
39
-
40
- Parameters
41
- ----------
42
- da:
43
- Raw detector data loaded from a NeXus file, e.g., NXdetector containing
44
- NXevent_data.
45
- """
46
- return Ltotal(da.coords["Ltotal"])
47
-
48
-
49
35
  def _mask_large_uncertainty(table: sc.DataArray, error_threshold: float):
50
36
  """
51
37
  Mask regions with large uncertainty with NaNs.
@@ -389,6 +375,56 @@ def _time_of_flight_data_histogram(
389
375
  return rebinned.assign_coords(tof=tofs)
390
376
 
391
377
 
378
+ def _guess_pulse_stride_offset(
379
+ pulse_index: sc.Variable,
380
+ ltotal: sc.Variable,
381
+ event_time_offset: sc.Variable,
382
+ pulse_stride: int,
383
+ interp: Callable,
384
+ ) -> int:
385
+ """
386
+ Using the minimum ``event_time_zero`` to calculate a reference time when computing
387
+ the time-of-flight for the neutron events makes the workflow depend on when the
388
+ first event was recorded. There is no straightforward way to know if we started
389
+ recording at the beginning of a frame, or half-way through a frame, without looking
390
+ at the chopper logs. This can be manually corrected using the pulse_stride_offset
391
+ parameter, but this makes automatic reduction of the data difficult.
392
+ See https://github.com/scipp/essreduce/issues/184.
393
+
394
+ Here, we perform a simple guess for the ``pulse_stride_offset`` if it is not
395
+ provided.
396
+ We choose a few random events, compute the time-of-flight for every possible value
397
+ of pulse_stride_offset, and return the value that yields the least number of NaNs
398
+ in the computed time-of-flight.
399
+
400
+ Parameters
401
+ ----------
402
+ pulse_index:
403
+ Pulse index for every event.
404
+ ltotal:
405
+ Total length of the flight path from the source to the detector for each event.
406
+ event_time_offset:
407
+ Time of arrival of the neutron at the detector for each event.
408
+ pulse_stride:
409
+ Stride of used pulses.
410
+ interp:
411
+ 2D interpolator for the lookup table.
412
+ """
413
+ tofs = {}
414
+ # Choose a few random events to compute the time-of-flight
415
+ inds = np.random.choice(
416
+ len(event_time_offset), min(5000, len(event_time_offset)), replace=False
417
+ )
418
+ pulse_index_values = pulse_index.values[inds]
419
+ ltotal_values = ltotal.values[inds]
420
+ etos_values = event_time_offset.values[inds]
421
+ for i in range(pulse_stride):
422
+ pulse_inds = (pulse_index_values + i) % pulse_stride
423
+ tofs[i] = interp((pulse_inds, ltotal_values, etos_values))
424
+ # Find the entry in the list with the least number of nan values
425
+ return sorted(tofs, key=lambda x: np.isnan(tofs[x]).sum())[0]
426
+
427
+
392
428
  def _time_of_flight_data_events(
393
429
  da: sc.DataArray,
394
430
  lookup: sc.DataArray,
@@ -399,28 +435,6 @@ def _time_of_flight_data_events(
399
435
  ) -> sc.DataArray:
400
436
  etos = da.bins.coords["event_time_offset"]
401
437
  eto_unit = elem_unit(etos)
402
- pulse_period = pulse_period.to(unit=eto_unit)
403
- frame_period = pulse_period * pulse_stride
404
-
405
- # TODO: Finding the `tmin` below will not work in the case were data is processed
406
- # in chunks, as taking the minimum time in each chunk will lead to inconsistent
407
- # pulse indices (this will be the case in live data, or when using the
408
- # StreamProcessor). We could instead read it from the first chunk and store it?
409
-
410
- # Compute a pulse index for every event: it is the index of the pulse within a
411
- # frame period. When there is no pulse skipping, those are all zero. When there is
412
- # pulse skipping, the index ranges from zero to pulse_stride - 1.
413
- tmin = da.bins.coords['event_time_zero'].min()
414
- pulse_index = (
415
- (
416
- (da.bins.coords['event_time_zero'] - tmin).to(unit=eto_unit)
417
- + 0.5 * pulse_period
418
- )
419
- % frame_period
420
- ) // pulse_period
421
- # Apply the pulse_stride_offset
422
- pulse_index += pulse_stride_offset
423
- pulse_index %= pulse_stride
424
438
 
425
439
  # Create 2D interpolator
426
440
  interp = _make_tof_interpolator(
@@ -430,7 +444,51 @@ def _time_of_flight_data_events(
430
444
  # Operate on events (broadcast distances to all events)
431
445
  ltotal = sc.bins_like(etos, ltotal).bins.constituents["data"]
432
446
  etos = etos.bins.constituents["data"]
433
- pulse_index = pulse_index.bins.constituents["data"]
447
+
448
+ # Compute a pulse index for every event: it is the index of the pulse within a
449
+ # frame period. When there is no pulse skipping, those are all zero. When there is
450
+ # pulse skipping, the index ranges from zero to pulse_stride - 1.
451
+ if pulse_stride == 1:
452
+ pulse_index = sc.zeros(sizes=etos.sizes)
453
+ else:
454
+ etz_unit = 'ns'
455
+ etz = (
456
+ da.bins.coords["event_time_zero"]
457
+ .bins.constituents["data"]
458
+ .to(unit=etz_unit, copy=False)
459
+ )
460
+ pulse_period = pulse_period.to(unit=etz_unit, dtype=int)
461
+ frame_period = pulse_period * pulse_stride
462
+ # Define a common reference time using epoch as a base, but making sure that it
463
+ # is aligned with the pulse_period and the frame_period.
464
+ # We need to use a global reference time instead of simply taking the minimum
465
+ # event_time_zero because the events may arrive in chunks, and the first event
466
+ # may not be the first event of the first pulse for all chunks. This would lead
467
+ # to inconsistent pulse indices.
468
+ epoch = sc.datetime(0, unit=etz_unit)
469
+ diff_to_epoch = (etz.min() - epoch) % pulse_period
470
+ # Here we offset the reference by half a pulse period to avoid errors from
471
+ # fluctuations in the event_time_zeros in the data. They are triggered by the
472
+ # neutron source, and may not always be exactly separated by the pulse period.
473
+ # While fluctuations will exist, they will be small, and offsetting the times
474
+ # by half a pulse period is a simple enough fix.
475
+ reference = epoch + diff_to_epoch - (pulse_period // 2)
476
+ # Use in-place operations to avoid large allocations
477
+ pulse_index = etz - reference
478
+ pulse_index %= frame_period
479
+ pulse_index //= pulse_period
480
+
481
+ # Apply the pulse_stride_offset
482
+ if pulse_stride_offset is None:
483
+ pulse_stride_offset = _guess_pulse_stride_offset(
484
+ pulse_index=pulse_index,
485
+ ltotal=ltotal,
486
+ event_time_offset=etos,
487
+ pulse_stride=pulse_stride,
488
+ interp=interp,
489
+ )
490
+ pulse_index += pulse_stride_offset
491
+ pulse_index %= pulse_stride
434
492
 
435
493
  # Compute time-of-flight for all neutrons using the interpolator
436
494
  tofs = sc.array(
@@ -535,7 +593,7 @@ def default_parameters() -> dict:
535
593
  return {
536
594
  PulsePeriod: 1.0 / sc.scalar(14.0, unit="Hz"),
537
595
  PulseStride: 1,
538
- PulseStrideOffset: 0,
596
+ PulseStrideOffset: None,
539
597
  DistanceResolution: sc.scalar(0.1, unit="m"),
540
598
  TimeResolution: sc.scalar(250.0, unit='us'),
541
599
  LookupTableRelativeErrorThreshold: 0.1,
@@ -546,4 +604,4 @@ def providers() -> tuple[Callable]:
546
604
  """
547
605
  Providers of the time-of-flight workflow.
548
606
  """
549
- return (compute_tof_lookup_table, extract_ltotal, time_of_flight_data)
607
+ return (compute_tof_lookup_table, time_of_flight_data)
@@ -101,10 +101,10 @@ PulseStride = NewType("PulseStride", int)
101
101
  Stride of used pulses. Usually 1, but may be a small integer when pulse-skipping.
102
102
  """
103
103
 
104
- PulseStrideOffset = NewType("PulseStrideOffset", int)
104
+ PulseStrideOffset = NewType("PulseStrideOffset", int | None)
105
105
  """
106
106
  When pulse-skipping, the offset of the first pulse in the stride. This is typically
107
- zero but can be a small integer < pulse_stride.
107
+ zero but can be a small integer < pulse_stride. If None, a guess is made.
108
108
  """
109
109
 
110
110
  RawData = NewType("RawData", sc.DataArray)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: essreduce
3
- Version: 25.2.2
3
+ Version: 25.2.4
4
4
  Summary: Common data reduction tools for the ESS facility
5
5
  Author: Scipp contributors
6
6
  License: BSD 3-Clause License
@@ -53,7 +53,7 @@ Description-Content-Type: text/markdown
53
53
  License-File: LICENSE
54
54
  Requires-Dist: sciline>=24.06.2
55
55
  Requires-Dist: scipp>=25.01.0
56
- Requires-Dist: scippneutron>=24.11.0
56
+ Requires-Dist: scippneutron>=25.02.0
57
57
  Requires-Dist: scippnexus>=24.11.0
58
58
  Provides-Extra: test
59
59
  Requires-Dist: ipywidgets; extra == "test"
@@ -3,27 +3,27 @@ ess/reduce/data.py,sha256=vaoeAJ6EpK1YghOiAALLdWiW17TgUnnnt0H-RGiGzXk,3756
3
3
  ess/reduce/logging.py,sha256=6n8Czq4LZ3OK9ENlKsWSI1M3KvKv6_HSoUiV4__IUlU,357
4
4
  ess/reduce/parameter.py,sha256=4sCfoKOI2HuO_Q7JLH_jAXnEOFANSn5P3NdaOBzhJxc,4635
5
5
  ess/reduce/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- ess/reduce/streaming.py,sha256=-ffg5pq0ShVPatLzHqKfWU-qpYmuYZjOrwoKd8J4aJU,9129
6
+ ess/reduce/streaming.py,sha256=ffFiWpq9AK1GOfRG-rlvT_Gz7HMBnE4FD0qPZiej4Gg,10306
7
7
  ess/reduce/ui.py,sha256=zmorAbDwX1cU3ygDT--OP58o0qU7OBcmJz03jPeYSLA,10884
8
8
  ess/reduce/uncertainty.py,sha256=LR4O6ApB6Z-W9gC_XW0ajupl8yFG-du0eee1AX_R-gk,6990
9
9
  ess/reduce/workflow.py,sha256=sL34T_2Cjl_8iFlegujxI9VyOUwo6erVC8pOXnfWgYw,3060
10
10
  ess/reduce/live/__init__.py,sha256=jPQVhihRVNtEDrE20PoKkclKV2aBF1lS7cCHootgFgI,204
11
- ess/reduce/live/raw.py,sha256=W7AxwFApqBzYki-Y_pIt9jZqzcvJnRMsdt1Hu1He4v0,25262
11
+ ess/reduce/live/raw.py,sha256=pzXsPZQERtUm5tabTXjxd-XHH4WDDP13TTBG0lGPcqg,25262
12
12
  ess/reduce/live/roi.py,sha256=Hs-pW98k41WU6Kl3UQ41kQawk80c2QNOQ_WNctLzDPE,3795
13
13
  ess/reduce/live/workflow.py,sha256=bsbwvTqPhRO6mC__3b7MgU7DWwAnOvGvG-t2n22EKq8,4285
14
14
  ess/reduce/nexus/__init__.py,sha256=59bxKkNYg8DYcSykNvH6nCa5SYchJC4SbgZEKhkNdYc,967
15
- ess/reduce/nexus/_nexus_loader.py,sha256=NbKIepTxv-UirVlViImh8Ozm16k-ZIiF6AQ9-oKuDHU,19222
15
+ ess/reduce/nexus/_nexus_loader.py,sha256=Y8ILMFEP9KxVfyEMGSFKoZZS79DIs0niRqI2Lq2TqZk,19720
16
16
  ess/reduce/nexus/json_generator.py,sha256=ME2Xn8L7Oi3uHJk9ZZdCRQTRX-OV_wh9-DJn07Alplk,2529
17
17
  ess/reduce/nexus/json_nexus.py,sha256=QrVc0p424nZ5dHX9gebAJppTw6lGZq9404P_OFl1giA,10282
18
- ess/reduce/nexus/types.py,sha256=Az_pZtaTIlEAA4Po_YOLabez8w4HeHcr0asY3rS6BXg,9676
19
- ess/reduce/nexus/workflow.py,sha256=jzdh0ubp9Mmb98a04KIeM8Xo9bpAqpnsfwFWz2VllnQ,23676
18
+ ess/reduce/nexus/types.py,sha256=15XcHbNbOfnAYjWXzzKyYDVNyNixRnP0hJ-Q2duwMWE,9896
19
+ ess/reduce/nexus/workflow.py,sha256=ABVc9E1Qcos0wLcDE8bGDAOz3aPpHrj4TJyfHsQbx7I,24297
20
20
  ess/reduce/scripts/grow_nexus.py,sha256=hET3h06M0xlJd62E3palNLFvJMyNax2kK4XyJcOhl-I,3387
21
21
  ess/reduce/time_of_flight/__init__.py,sha256=92w88NpGIBysuqCPSvdZ_XgBd7cFAk9qaO9zflpUbfM,1097
22
- ess/reduce/time_of_flight/fakes.py,sha256=UyYxvtnb8QDdCgwaOhe0guRGCKP1DFr0wPXUPO3RNYU,6730
22
+ ess/reduce/time_of_flight/fakes.py,sha256=rlBgceFVbHIhP_xPyUzYVf-2wEu--G8hA-kxPzAnPbM,4236
23
23
  ess/reduce/time_of_flight/simulation.py,sha256=CireE9m9kFbUXhGUeY2L3SoMy7kpqopxKj__h4tSKzo,2614
24
24
  ess/reduce/time_of_flight/to_events.py,sha256=_5CcUOWvguDcK8uo2pPZWzXnWoiZhC1w-zF8xysaIvU,4339
25
- ess/reduce/time_of_flight/toa_to_tof.py,sha256=crvNe6GIwHLrSUE_C3omDfroIujoH1rphwWhX8Dj25U,20601
26
- ess/reduce/time_of_flight/types.py,sha256=fIgnLKv6QolevXwrY5hD2-eC_7wtNlVJ-k6CvaeRJQk,5014
25
+ ess/reduce/time_of_flight/toa_to_tof.py,sha256=bt28z6wixS4AegBxsl1uYBREP08TyAs8Y9Z738YcXE4,23476
26
+ ess/reduce/time_of_flight/types.py,sha256=Iv1XGLbrZ9bD4CPAVhsIPkAaB46YC7l7yf5XweljLqk,5047
27
27
  ess/reduce/widgets/__init__.py,sha256=SoSHBv8Dc3QXV9HUvPhjSYWMwKTGYZLpsWwsShIO97Q,5325
28
28
  ess/reduce/widgets/_base.py,sha256=_wN3FOlXgx_u0c-A_3yyoIH-SdUvDENGgquh9S-h5GI,4852
29
29
  ess/reduce/widgets/_binedges_widget.py,sha256=ZCQsGjYHnJr9GFUn7NjoZc1CdsnAzm_fMzyF-fTKKVY,2785
@@ -36,9 +36,9 @@ ess/reduce/widgets/_spinner.py,sha256=2VY4Fhfa7HMXox2O7UbofcdKsYG-AJGrsgGJB85nDX
36
36
  ess/reduce/widgets/_string_widget.py,sha256=iPAdfANyXHf-nkfhgkyH6gQDklia0LebLTmwi3m-iYQ,1482
37
37
  ess/reduce/widgets/_switchable_widget.py,sha256=fjKz99SKLhIF1BLgGVBSKKn3Lu_jYBwDYGeAjbJY3Q8,2390
38
38
  ess/reduce/widgets/_vector_widget.py,sha256=aTaBqCFHZQhrIoX6-sSqFWCPePEW8HQt5kUio8jP1t8,1203
39
- essreduce-25.2.2.dist-info/LICENSE,sha256=nVEiume4Qj6jMYfSRjHTM2jtJ4FGu0g-5Sdh7osfEYw,1553
40
- essreduce-25.2.2.dist-info/METADATA,sha256=l7vAVarbV9x-4mXfgstnKGyX0w4Yzcan8JCtwEPQbJM,3708
41
- essreduce-25.2.2.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
42
- essreduce-25.2.2.dist-info/entry_points.txt,sha256=PMZOIYzCifHMTe4pK3HbhxUwxjFaZizYlLD0td4Isb0,66
43
- essreduce-25.2.2.dist-info/top_level.txt,sha256=0JxTCgMKPLKtp14wb1-RKisQPQWX7i96innZNvHBr-s,4
44
- essreduce-25.2.2.dist-info/RECORD,,
39
+ essreduce-25.2.4.dist-info/LICENSE,sha256=nVEiume4Qj6jMYfSRjHTM2jtJ4FGu0g-5Sdh7osfEYw,1553
40
+ essreduce-25.2.4.dist-info/METADATA,sha256=4esgkOAUN-XvPn_1HnmisKvJvG5RA0UGFUrmVQPyg50,3708
41
+ essreduce-25.2.4.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
42
+ essreduce-25.2.4.dist-info/entry_points.txt,sha256=PMZOIYzCifHMTe4pK3HbhxUwxjFaZizYlLD0td4Isb0,66
43
+ essreduce-25.2.4.dist-info/top_level.txt,sha256=0JxTCgMKPLKtp14wb1-RKisQPQWX7i96innZNvHBr-s,4
44
+ essreduce-25.2.4.dist-info/RECORD,,