essreduce 25.2.2__py3-none-any.whl → 25.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ess/reduce/live/raw.py +1 -1
- ess/reduce/streaming.py +37 -5
- ess/reduce/time_of_flight/fakes.py +30 -133
- ess/reduce/time_of_flight/toa_to_tof.py +97 -39
- ess/reduce/time_of_flight/types.py +2 -2
- {essreduce-25.2.2.dist-info → essreduce-25.2.3.dist-info}/METADATA +1 -1
- {essreduce-25.2.2.dist-info → essreduce-25.2.3.dist-info}/RECORD +11 -11
- {essreduce-25.2.2.dist-info → essreduce-25.2.3.dist-info}/LICENSE +0 -0
- {essreduce-25.2.2.dist-info → essreduce-25.2.3.dist-info}/WHEEL +0 -0
- {essreduce-25.2.2.dist-info → essreduce-25.2.3.dist-info}/entry_points.txt +0 -0
- {essreduce-25.2.2.dist-info → essreduce-25.2.3.dist-info}/top_level.txt +0 -0
ess/reduce/live/raw.py
CHANGED
|
@@ -639,7 +639,7 @@ def position_with_noisy_replicas(
|
|
|
639
639
|
# "Paint" the short array of noise on top of the (replicated) position data.
|
|
640
640
|
noise = sc.concat(
|
|
641
641
|
[position_noise] * ceil(size / position_noise.size), dim=noise_dim
|
|
642
|
-
)[:size].fold(dim=noise_dim, sizes={'replica': replicas, position.
|
|
642
|
+
)[:size].fold(dim=noise_dim, sizes={'replica': replicas, **position.sizes})
|
|
643
643
|
return sc.concat([position, noise + position], dim='replica')
|
|
644
644
|
|
|
645
645
|
|
ess/reduce/streaming.py
CHANGED
|
@@ -147,7 +147,7 @@ class StreamProcessor:
|
|
|
147
147
|
*,
|
|
148
148
|
dynamic_keys: tuple[sciline.typing.Key, ...],
|
|
149
149
|
target_keys: tuple[sciline.typing.Key, ...],
|
|
150
|
-
accumulators: dict[sciline.typing.Key, Accumulator
|
|
150
|
+
accumulators: dict[sciline.typing.Key, Accumulator | Callable[..., Accumulator]]
|
|
151
151
|
| tuple[sciline.typing.Key, ...],
|
|
152
152
|
allow_bypass: bool = False,
|
|
153
153
|
) -> None:
|
|
@@ -180,6 +180,8 @@ class StreamProcessor:
|
|
|
180
180
|
for key in dynamic_keys:
|
|
181
181
|
workflow[key] = None # hack to prune branches
|
|
182
182
|
|
|
183
|
+
self._dynamic_keys = set(dynamic_keys)
|
|
184
|
+
|
|
183
185
|
# Find and pre-compute static nodes as far down the graph as possible
|
|
184
186
|
# See also https://github.com/scipp/sciline/issues/148.
|
|
185
187
|
nodes = _find_descendants(workflow, dynamic_keys)
|
|
@@ -194,12 +196,19 @@ class StreamProcessor:
|
|
|
194
196
|
if isinstance(accumulators, dict)
|
|
195
197
|
else {key: EternalAccumulator() for key in accumulators}
|
|
196
198
|
)
|
|
199
|
+
|
|
200
|
+
# Map each accumulator to its dependent dynamic keys
|
|
201
|
+
graph = workflow.underlying_graph
|
|
202
|
+
self._accumulator_dependencies = {
|
|
203
|
+
acc_key: nx.ancestors(graph, acc_key) & self._dynamic_keys
|
|
204
|
+
for acc_key in self._accumulators
|
|
205
|
+
if acc_key in graph
|
|
206
|
+
}
|
|
207
|
+
|
|
197
208
|
# Depending on the target_keys, some accumulators can be unused and should not
|
|
198
209
|
# be computed when adding a chunk.
|
|
199
210
|
self._accumulators = {
|
|
200
|
-
key: value
|
|
201
|
-
for key, value in self._accumulators.items()
|
|
202
|
-
if key in self._process_chunk_workflow.underlying_graph
|
|
211
|
+
key: value for key, value in self._accumulators.items() if key in graph
|
|
203
212
|
}
|
|
204
213
|
# Create accumulators unless instances were passed. This allows for initializing
|
|
205
214
|
# accumulators with arguments that depend on the workflow such as bin edges,
|
|
@@ -242,7 +251,30 @@ class StreamProcessor:
|
|
|
242
251
|
----------
|
|
243
252
|
chunks:
|
|
244
253
|
Chunks to be processed.
|
|
254
|
+
|
|
255
|
+
Raises
|
|
256
|
+
------
|
|
257
|
+
ValueError
|
|
258
|
+
If non-dynamic keys are provided in chunks.
|
|
259
|
+
If accumulator computation requires dynamic keys not provided in chunks.
|
|
245
260
|
"""
|
|
261
|
+
non_dynamic = set(chunks) - self._dynamic_keys
|
|
262
|
+
if non_dynamic:
|
|
263
|
+
raise ValueError(
|
|
264
|
+
f"Can only update dynamic keys. Got non-dynamic keys: {non_dynamic}"
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
accumulators_to_update = []
|
|
268
|
+
for acc_key, deps in self._accumulator_dependencies.items():
|
|
269
|
+
if deps.isdisjoint(chunks.keys()):
|
|
270
|
+
continue
|
|
271
|
+
if not deps.issubset(chunks.keys()):
|
|
272
|
+
raise ValueError(
|
|
273
|
+
f"Accumulator '{acc_key}' requires dynamic keys "
|
|
274
|
+
f"{deps - chunks.keys()} not provided in the current chunk."
|
|
275
|
+
)
|
|
276
|
+
accumulators_to_update.append(acc_key)
|
|
277
|
+
|
|
246
278
|
for key, value in chunks.items():
|
|
247
279
|
self._process_chunk_workflow[key] = value
|
|
248
280
|
# There can be dynamic keys that do not "terminate" in any accumulator. In
|
|
@@ -250,7 +282,7 @@ class StreamProcessor:
|
|
|
250
282
|
# the target keys.
|
|
251
283
|
if self._allow_bypass:
|
|
252
284
|
self._finalize_workflow[key] = value
|
|
253
|
-
to_accumulate = self._process_chunk_workflow.compute(
|
|
285
|
+
to_accumulate = self._process_chunk_workflow.compute(accumulators_to_update)
|
|
254
286
|
for key, processed in to_accumulate.items():
|
|
255
287
|
self._accumulators[key].push(processed)
|
|
256
288
|
|
|
@@ -79,144 +79,41 @@ class FakeBeamline:
|
|
|
79
79
|
return nx_event_data, raw_data
|
|
80
80
|
|
|
81
81
|
|
|
82
|
-
wfm1_chopper = DiskChopper(
|
|
83
|
-
frequency=sc.scalar(-70.0, unit="Hz"),
|
|
84
|
-
beam_position=sc.scalar(0.0, unit="deg"),
|
|
85
|
-
phase=sc.scalar(-47.10, unit="deg"),
|
|
86
|
-
axle_position=sc.vector(value=[0, 0, 6.6], unit="m"),
|
|
87
|
-
slit_begin=sc.array(
|
|
88
|
-
dims=["cutout"],
|
|
89
|
-
values=np.array([83.71, 140.49, 193.26, 242.32, 287.91, 330.3]) + 15.0,
|
|
90
|
-
unit="deg",
|
|
91
|
-
),
|
|
92
|
-
slit_end=sc.array(
|
|
93
|
-
dims=["cutout"],
|
|
94
|
-
values=np.array([94.7, 155.79, 212.56, 265.33, 314.37, 360.0]) + 15.0,
|
|
95
|
-
unit="deg",
|
|
96
|
-
),
|
|
97
|
-
slit_height=sc.scalar(10.0, unit="cm"),
|
|
98
|
-
radius=sc.scalar(30.0, unit="cm"),
|
|
99
|
-
)
|
|
100
|
-
|
|
101
|
-
wfm2_chopper = DiskChopper(
|
|
102
|
-
frequency=sc.scalar(-70.0, unit="Hz"),
|
|
103
|
-
beam_position=sc.scalar(0.0, unit="deg"),
|
|
104
|
-
phase=sc.scalar(-76.76, unit="deg"),
|
|
105
|
-
axle_position=sc.vector(value=[0, 0, 7.1], unit="m"),
|
|
106
|
-
slit_begin=sc.array(
|
|
107
|
-
dims=["cutout"],
|
|
108
|
-
values=np.array([65.04, 126.1, 182.88, 235.67, 284.73, 330.32]) + 15.0,
|
|
109
|
-
unit="deg",
|
|
110
|
-
),
|
|
111
|
-
slit_end=sc.array(
|
|
112
|
-
dims=["cutout"],
|
|
113
|
-
values=np.array([76.03, 141.4, 202.18, 254.97, 307.74, 360.0]) + 15.0,
|
|
114
|
-
unit="deg",
|
|
115
|
-
),
|
|
116
|
-
slit_height=sc.scalar(10.0, unit="cm"),
|
|
117
|
-
radius=sc.scalar(30.0, unit="cm"),
|
|
118
|
-
)
|
|
119
|
-
|
|
120
|
-
foc1_chopper = DiskChopper(
|
|
121
|
-
frequency=sc.scalar(-56.0, unit="Hz"),
|
|
122
|
-
beam_position=sc.scalar(0.0, unit="deg"),
|
|
123
|
-
phase=sc.scalar(-62.40, unit="deg"),
|
|
124
|
-
axle_position=sc.vector(value=[0, 0, 8.8], unit="m"),
|
|
125
|
-
slit_begin=sc.array(
|
|
126
|
-
dims=["cutout"],
|
|
127
|
-
values=np.array([74.6, 139.6, 194.3, 245.3, 294.8, 347.2]),
|
|
128
|
-
unit="deg",
|
|
129
|
-
),
|
|
130
|
-
slit_end=sc.array(
|
|
131
|
-
dims=["cutout"],
|
|
132
|
-
values=np.array([95.2, 162.8, 216.1, 263.1, 310.5, 371.6]),
|
|
133
|
-
unit="deg",
|
|
134
|
-
),
|
|
135
|
-
slit_height=sc.scalar(10.0, unit="cm"),
|
|
136
|
-
radius=sc.scalar(30.0, unit="cm"),
|
|
137
|
-
)
|
|
138
|
-
|
|
139
|
-
foc2_chopper = DiskChopper(
|
|
140
|
-
frequency=sc.scalar(-28.0, unit="Hz"),
|
|
141
|
-
beam_position=sc.scalar(0.0, unit="deg"),
|
|
142
|
-
phase=sc.scalar(-12.27, unit="deg"),
|
|
143
|
-
axle_position=sc.vector(value=[0, 0, 15.9], unit="m"),
|
|
144
|
-
slit_begin=sc.array(
|
|
145
|
-
dims=["cutout"],
|
|
146
|
-
values=np.array([98.0, 154.0, 206.8, 255.0, 299.0, 344.65]),
|
|
147
|
-
unit="deg",
|
|
148
|
-
),
|
|
149
|
-
slit_end=sc.array(
|
|
150
|
-
dims=["cutout"],
|
|
151
|
-
values=np.array([134.6, 190.06, 237.01, 280.88, 323.56, 373.76]),
|
|
152
|
-
unit="deg",
|
|
153
|
-
),
|
|
154
|
-
slit_height=sc.scalar(10.0, unit="cm"),
|
|
155
|
-
radius=sc.scalar(30.0, unit="cm"),
|
|
156
|
-
)
|
|
157
|
-
|
|
158
|
-
pol_chopper = DiskChopper(
|
|
159
|
-
frequency=sc.scalar(-14.0, unit="Hz"),
|
|
160
|
-
beam_position=sc.scalar(0.0, unit="deg"),
|
|
161
|
-
phase=sc.scalar(0.0, unit="deg"),
|
|
162
|
-
axle_position=sc.vector(value=[0, 0, 17.0], unit="m"),
|
|
163
|
-
slit_begin=sc.array(
|
|
164
|
-
dims=["cutout"],
|
|
165
|
-
values=np.array([40.0]),
|
|
166
|
-
unit="deg",
|
|
167
|
-
),
|
|
168
|
-
slit_end=sc.array(
|
|
169
|
-
dims=["cutout"],
|
|
170
|
-
values=np.array([240.0]),
|
|
171
|
-
unit="deg",
|
|
172
|
-
),
|
|
173
|
-
slit_height=sc.scalar(10.0, unit="cm"),
|
|
174
|
-
radius=sc.scalar(30.0, unit="cm"),
|
|
175
|
-
)
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
def wfm_choppers():
|
|
179
|
-
return {
|
|
180
|
-
"wfm1": wfm1_chopper,
|
|
181
|
-
"wfm2": wfm2_chopper,
|
|
182
|
-
"foc1": foc1_chopper,
|
|
183
|
-
"foc2": foc2_chopper,
|
|
184
|
-
"pol": pol_chopper,
|
|
185
|
-
}
|
|
186
|
-
|
|
187
|
-
|
|
188
82
|
def psc_choppers():
|
|
189
83
|
return {
|
|
190
|
-
|
|
191
|
-
frequency=
|
|
192
|
-
beam_position=
|
|
193
|
-
phase=
|
|
194
|
-
axle_position=
|
|
195
|
-
slit_begin=
|
|
196
|
-
slit_end=
|
|
197
|
-
slit_height=
|
|
198
|
-
radius=
|
|
84
|
+
"chopper": DiskChopper(
|
|
85
|
+
frequency=sc.scalar(-14.0, unit="Hz"),
|
|
86
|
+
beam_position=sc.scalar(0.0, unit="deg"),
|
|
87
|
+
phase=sc.scalar(-85.0, unit="deg"),
|
|
88
|
+
axle_position=sc.vector(value=[0, 0, 8.0], unit="m"),
|
|
89
|
+
slit_begin=sc.array(dims=["cutout"], values=[0.0], unit="deg"),
|
|
90
|
+
slit_end=sc.array(dims=["cutout"], values=[3.0], unit="deg"),
|
|
91
|
+
slit_height=sc.scalar(10.0, unit="cm"),
|
|
92
|
+
radius=sc.scalar(30.0, unit="cm"),
|
|
199
93
|
)
|
|
200
|
-
for name, ch in wfm_choppers().items()
|
|
201
94
|
}
|
|
202
95
|
|
|
203
96
|
|
|
204
|
-
def
|
|
205
|
-
return
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
dims=["cutout"],
|
|
212
|
-
values=np.array([
|
|
213
|
-
unit="
|
|
97
|
+
def pulse_skipping_choppers():
|
|
98
|
+
return {
|
|
99
|
+
"chopper": DiskChopper(
|
|
100
|
+
frequency=sc.scalar(-14.0, unit="Hz"),
|
|
101
|
+
beam_position=sc.scalar(0.0, unit="deg"),
|
|
102
|
+
phase=sc.scalar(-35.0, unit="deg"),
|
|
103
|
+
axle_position=sc.vector(value=[0, 0, 8.0], unit="m"),
|
|
104
|
+
slit_begin=sc.array(dims=["cutout"], values=np.array([0.0]), unit="deg"),
|
|
105
|
+
slit_end=sc.array(dims=["cutout"], values=np.array([33.0]), unit="deg"),
|
|
106
|
+
slit_height=sc.scalar(10.0, unit="cm"),
|
|
107
|
+
radius=sc.scalar(30.0, unit="cm"),
|
|
214
108
|
),
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
unit="deg",
|
|
109
|
+
"pulse_skipping": DiskChopper(
|
|
110
|
+
frequency=sc.scalar(-7.0, unit="Hz"),
|
|
111
|
+
beam_position=sc.scalar(0.0, unit="deg"),
|
|
112
|
+
phase=sc.scalar(-10.0, unit="deg"),
|
|
113
|
+
axle_position=sc.vector(value=[0, 0, 15.0], unit="m"),
|
|
114
|
+
slit_begin=sc.array(dims=["cutout"], values=np.array([0.0]), unit="deg"),
|
|
115
|
+
slit_end=sc.array(dims=["cutout"], values=np.array([120.0]), unit="deg"),
|
|
116
|
+
slit_height=sc.scalar(10.0, unit="cm"),
|
|
117
|
+
radius=sc.scalar(30.0, unit="cm"),
|
|
219
118
|
),
|
|
220
|
-
|
|
221
|
-
radius=sc.scalar(30.0, unit="cm"),
|
|
222
|
-
)
|
|
119
|
+
}
|
|
@@ -32,20 +32,6 @@ from .types import (
|
|
|
32
32
|
)
|
|
33
33
|
|
|
34
34
|
|
|
35
|
-
def extract_ltotal(da: RawData) -> Ltotal:
|
|
36
|
-
"""
|
|
37
|
-
Extract the total length of the flight path from the source to the detector from the
|
|
38
|
-
detector data.
|
|
39
|
-
|
|
40
|
-
Parameters
|
|
41
|
-
----------
|
|
42
|
-
da:
|
|
43
|
-
Raw detector data loaded from a NeXus file, e.g., NXdetector containing
|
|
44
|
-
NXevent_data.
|
|
45
|
-
"""
|
|
46
|
-
return Ltotal(da.coords["Ltotal"])
|
|
47
|
-
|
|
48
|
-
|
|
49
35
|
def _mask_large_uncertainty(table: sc.DataArray, error_threshold: float):
|
|
50
36
|
"""
|
|
51
37
|
Mask regions with large uncertainty with NaNs.
|
|
@@ -389,6 +375,56 @@ def _time_of_flight_data_histogram(
|
|
|
389
375
|
return rebinned.assign_coords(tof=tofs)
|
|
390
376
|
|
|
391
377
|
|
|
378
|
+
def _guess_pulse_stride_offset(
|
|
379
|
+
pulse_index: sc.Variable,
|
|
380
|
+
ltotal: sc.Variable,
|
|
381
|
+
event_time_offset: sc.Variable,
|
|
382
|
+
pulse_stride: int,
|
|
383
|
+
interp: Callable,
|
|
384
|
+
) -> int:
|
|
385
|
+
"""
|
|
386
|
+
Using the minimum ``event_time_zero`` to calculate a reference time when computing
|
|
387
|
+
the time-of-flight for the neutron events makes the workflow depend on when the
|
|
388
|
+
first event was recorded. There is no straightforward way to know if we started
|
|
389
|
+
recording at the beginning of a frame, or half-way through a frame, without looking
|
|
390
|
+
at the chopper logs. This can be manually corrected using the pulse_stride_offset
|
|
391
|
+
parameter, but this makes automatic reduction of the data difficult.
|
|
392
|
+
See https://github.com/scipp/essreduce/issues/184.
|
|
393
|
+
|
|
394
|
+
Here, we perform a simple guess for the ``pulse_stride_offset`` if it is not
|
|
395
|
+
provided.
|
|
396
|
+
We choose a few random events, compute the time-of-flight for every possible value
|
|
397
|
+
of pulse_stride_offset, and return the value that yields the least number of NaNs
|
|
398
|
+
in the computed time-of-flight.
|
|
399
|
+
|
|
400
|
+
Parameters
|
|
401
|
+
----------
|
|
402
|
+
pulse_index:
|
|
403
|
+
Pulse index for every event.
|
|
404
|
+
ltotal:
|
|
405
|
+
Total length of the flight path from the source to the detector for each event.
|
|
406
|
+
event_time_offset:
|
|
407
|
+
Time of arrival of the neutron at the detector for each event.
|
|
408
|
+
pulse_stride:
|
|
409
|
+
Stride of used pulses.
|
|
410
|
+
interp:
|
|
411
|
+
2D interpolator for the lookup table.
|
|
412
|
+
"""
|
|
413
|
+
tofs = {}
|
|
414
|
+
# Choose a few random events to compute the time-of-flight
|
|
415
|
+
inds = np.random.choice(
|
|
416
|
+
len(event_time_offset), min(5000, len(event_time_offset)), replace=False
|
|
417
|
+
)
|
|
418
|
+
pulse_index_values = pulse_index.values[inds]
|
|
419
|
+
ltotal_values = ltotal.values[inds]
|
|
420
|
+
etos_values = event_time_offset.values[inds]
|
|
421
|
+
for i in range(pulse_stride):
|
|
422
|
+
pulse_inds = (pulse_index_values + i) % pulse_stride
|
|
423
|
+
tofs[i] = interp((pulse_inds, ltotal_values, etos_values))
|
|
424
|
+
# Find the entry in the list with the least number of nan values
|
|
425
|
+
return sorted(tofs, key=lambda x: np.isnan(tofs[x]).sum())[0]
|
|
426
|
+
|
|
427
|
+
|
|
392
428
|
def _time_of_flight_data_events(
|
|
393
429
|
da: sc.DataArray,
|
|
394
430
|
lookup: sc.DataArray,
|
|
@@ -399,28 +435,6 @@ def _time_of_flight_data_events(
|
|
|
399
435
|
) -> sc.DataArray:
|
|
400
436
|
etos = da.bins.coords["event_time_offset"]
|
|
401
437
|
eto_unit = elem_unit(etos)
|
|
402
|
-
pulse_period = pulse_period.to(unit=eto_unit)
|
|
403
|
-
frame_period = pulse_period * pulse_stride
|
|
404
|
-
|
|
405
|
-
# TODO: Finding the `tmin` below will not work in the case were data is processed
|
|
406
|
-
# in chunks, as taking the minimum time in each chunk will lead to inconsistent
|
|
407
|
-
# pulse indices (this will be the case in live data, or when using the
|
|
408
|
-
# StreamProcessor). We could instead read it from the first chunk and store it?
|
|
409
|
-
|
|
410
|
-
# Compute a pulse index for every event: it is the index of the pulse within a
|
|
411
|
-
# frame period. When there is no pulse skipping, those are all zero. When there is
|
|
412
|
-
# pulse skipping, the index ranges from zero to pulse_stride - 1.
|
|
413
|
-
tmin = da.bins.coords['event_time_zero'].min()
|
|
414
|
-
pulse_index = (
|
|
415
|
-
(
|
|
416
|
-
(da.bins.coords['event_time_zero'] - tmin).to(unit=eto_unit)
|
|
417
|
-
+ 0.5 * pulse_period
|
|
418
|
-
)
|
|
419
|
-
% frame_period
|
|
420
|
-
) // pulse_period
|
|
421
|
-
# Apply the pulse_stride_offset
|
|
422
|
-
pulse_index += pulse_stride_offset
|
|
423
|
-
pulse_index %= pulse_stride
|
|
424
438
|
|
|
425
439
|
# Create 2D interpolator
|
|
426
440
|
interp = _make_tof_interpolator(
|
|
@@ -430,7 +444,51 @@ def _time_of_flight_data_events(
|
|
|
430
444
|
# Operate on events (broadcast distances to all events)
|
|
431
445
|
ltotal = sc.bins_like(etos, ltotal).bins.constituents["data"]
|
|
432
446
|
etos = etos.bins.constituents["data"]
|
|
433
|
-
|
|
447
|
+
|
|
448
|
+
# Compute a pulse index for every event: it is the index of the pulse within a
|
|
449
|
+
# frame period. When there is no pulse skipping, those are all zero. When there is
|
|
450
|
+
# pulse skipping, the index ranges from zero to pulse_stride - 1.
|
|
451
|
+
if pulse_stride == 1:
|
|
452
|
+
pulse_index = sc.zeros(sizes=etos.sizes)
|
|
453
|
+
else:
|
|
454
|
+
etz_unit = 'ns'
|
|
455
|
+
etz = (
|
|
456
|
+
da.bins.coords["event_time_zero"]
|
|
457
|
+
.bins.constituents["data"]
|
|
458
|
+
.to(unit=etz_unit, copy=False)
|
|
459
|
+
)
|
|
460
|
+
pulse_period = pulse_period.to(unit=etz_unit, dtype=int)
|
|
461
|
+
frame_period = pulse_period * pulse_stride
|
|
462
|
+
# Define a common reference time using epoch as a base, but making sure that it
|
|
463
|
+
# is aligned with the pulse_period and the frame_period.
|
|
464
|
+
# We need to use a global reference time instead of simply taking the minimum
|
|
465
|
+
# event_time_zero because the events may arrive in chunks, and the first event
|
|
466
|
+
# may not be the first event of the first pulse for all chunks. This would lead
|
|
467
|
+
# to inconsistent pulse indices.
|
|
468
|
+
epoch = sc.datetime(0, unit=etz_unit)
|
|
469
|
+
diff_to_epoch = (etz.min() - epoch) % pulse_period
|
|
470
|
+
# Here we offset the reference by half a pulse period to avoid errors from
|
|
471
|
+
# fluctuations in the event_time_zeros in the data. They are triggered by the
|
|
472
|
+
# neutron source, and may not always be exactly separated by the pulse period.
|
|
473
|
+
# While fluctuations will exist, they will be small, and offsetting the times
|
|
474
|
+
# by half a pulse period is a simple enough fix.
|
|
475
|
+
reference = epoch + diff_to_epoch - (pulse_period // 2)
|
|
476
|
+
# Use in-place operations to avoid large allocations
|
|
477
|
+
pulse_index = etz - reference
|
|
478
|
+
pulse_index %= frame_period
|
|
479
|
+
pulse_index //= pulse_period
|
|
480
|
+
|
|
481
|
+
# Apply the pulse_stride_offset
|
|
482
|
+
if pulse_stride_offset is None:
|
|
483
|
+
pulse_stride_offset = _guess_pulse_stride_offset(
|
|
484
|
+
pulse_index=pulse_index,
|
|
485
|
+
ltotal=ltotal,
|
|
486
|
+
event_time_offset=etos,
|
|
487
|
+
pulse_stride=pulse_stride,
|
|
488
|
+
interp=interp,
|
|
489
|
+
)
|
|
490
|
+
pulse_index += pulse_stride_offset
|
|
491
|
+
pulse_index %= pulse_stride
|
|
434
492
|
|
|
435
493
|
# Compute time-of-flight for all neutrons using the interpolator
|
|
436
494
|
tofs = sc.array(
|
|
@@ -535,7 +593,7 @@ def default_parameters() -> dict:
|
|
|
535
593
|
return {
|
|
536
594
|
PulsePeriod: 1.0 / sc.scalar(14.0, unit="Hz"),
|
|
537
595
|
PulseStride: 1,
|
|
538
|
-
PulseStrideOffset:
|
|
596
|
+
PulseStrideOffset: None,
|
|
539
597
|
DistanceResolution: sc.scalar(0.1, unit="m"),
|
|
540
598
|
TimeResolution: sc.scalar(250.0, unit='us'),
|
|
541
599
|
LookupTableRelativeErrorThreshold: 0.1,
|
|
@@ -546,4 +604,4 @@ def providers() -> tuple[Callable]:
|
|
|
546
604
|
"""
|
|
547
605
|
Providers of the time-of-flight workflow.
|
|
548
606
|
"""
|
|
549
|
-
return (compute_tof_lookup_table,
|
|
607
|
+
return (compute_tof_lookup_table, time_of_flight_data)
|
|
@@ -101,10 +101,10 @@ PulseStride = NewType("PulseStride", int)
|
|
|
101
101
|
Stride of used pulses. Usually 1, but may be a small integer when pulse-skipping.
|
|
102
102
|
"""
|
|
103
103
|
|
|
104
|
-
PulseStrideOffset = NewType("PulseStrideOffset", int)
|
|
104
|
+
PulseStrideOffset = NewType("PulseStrideOffset", int | None)
|
|
105
105
|
"""
|
|
106
106
|
When pulse-skipping, the offset of the first pulse in the stride. This is typically
|
|
107
|
-
zero but can be a small integer < pulse_stride.
|
|
107
|
+
zero but can be a small integer < pulse_stride. If None, a guess is made.
|
|
108
108
|
"""
|
|
109
109
|
|
|
110
110
|
RawData = NewType("RawData", sc.DataArray)
|
|
@@ -3,12 +3,12 @@ ess/reduce/data.py,sha256=vaoeAJ6EpK1YghOiAALLdWiW17TgUnnnt0H-RGiGzXk,3756
|
|
|
3
3
|
ess/reduce/logging.py,sha256=6n8Czq4LZ3OK9ENlKsWSI1M3KvKv6_HSoUiV4__IUlU,357
|
|
4
4
|
ess/reduce/parameter.py,sha256=4sCfoKOI2HuO_Q7JLH_jAXnEOFANSn5P3NdaOBzhJxc,4635
|
|
5
5
|
ess/reduce/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
-
ess/reduce/streaming.py,sha256
|
|
6
|
+
ess/reduce/streaming.py,sha256=ffFiWpq9AK1GOfRG-rlvT_Gz7HMBnE4FD0qPZiej4Gg,10306
|
|
7
7
|
ess/reduce/ui.py,sha256=zmorAbDwX1cU3ygDT--OP58o0qU7OBcmJz03jPeYSLA,10884
|
|
8
8
|
ess/reduce/uncertainty.py,sha256=LR4O6ApB6Z-W9gC_XW0ajupl8yFG-du0eee1AX_R-gk,6990
|
|
9
9
|
ess/reduce/workflow.py,sha256=sL34T_2Cjl_8iFlegujxI9VyOUwo6erVC8pOXnfWgYw,3060
|
|
10
10
|
ess/reduce/live/__init__.py,sha256=jPQVhihRVNtEDrE20PoKkclKV2aBF1lS7cCHootgFgI,204
|
|
11
|
-
ess/reduce/live/raw.py,sha256=
|
|
11
|
+
ess/reduce/live/raw.py,sha256=pzXsPZQERtUm5tabTXjxd-XHH4WDDP13TTBG0lGPcqg,25262
|
|
12
12
|
ess/reduce/live/roi.py,sha256=Hs-pW98k41WU6Kl3UQ41kQawk80c2QNOQ_WNctLzDPE,3795
|
|
13
13
|
ess/reduce/live/workflow.py,sha256=bsbwvTqPhRO6mC__3b7MgU7DWwAnOvGvG-t2n22EKq8,4285
|
|
14
14
|
ess/reduce/nexus/__init__.py,sha256=59bxKkNYg8DYcSykNvH6nCa5SYchJC4SbgZEKhkNdYc,967
|
|
@@ -19,11 +19,11 @@ ess/reduce/nexus/types.py,sha256=Az_pZtaTIlEAA4Po_YOLabez8w4HeHcr0asY3rS6BXg,967
|
|
|
19
19
|
ess/reduce/nexus/workflow.py,sha256=jzdh0ubp9Mmb98a04KIeM8Xo9bpAqpnsfwFWz2VllnQ,23676
|
|
20
20
|
ess/reduce/scripts/grow_nexus.py,sha256=hET3h06M0xlJd62E3palNLFvJMyNax2kK4XyJcOhl-I,3387
|
|
21
21
|
ess/reduce/time_of_flight/__init__.py,sha256=92w88NpGIBysuqCPSvdZ_XgBd7cFAk9qaO9zflpUbfM,1097
|
|
22
|
-
ess/reduce/time_of_flight/fakes.py,sha256=
|
|
22
|
+
ess/reduce/time_of_flight/fakes.py,sha256=rlBgceFVbHIhP_xPyUzYVf-2wEu--G8hA-kxPzAnPbM,4236
|
|
23
23
|
ess/reduce/time_of_flight/simulation.py,sha256=CireE9m9kFbUXhGUeY2L3SoMy7kpqopxKj__h4tSKzo,2614
|
|
24
24
|
ess/reduce/time_of_flight/to_events.py,sha256=_5CcUOWvguDcK8uo2pPZWzXnWoiZhC1w-zF8xysaIvU,4339
|
|
25
|
-
ess/reduce/time_of_flight/toa_to_tof.py,sha256=
|
|
26
|
-
ess/reduce/time_of_flight/types.py,sha256=
|
|
25
|
+
ess/reduce/time_of_flight/toa_to_tof.py,sha256=bt28z6wixS4AegBxsl1uYBREP08TyAs8Y9Z738YcXE4,23476
|
|
26
|
+
ess/reduce/time_of_flight/types.py,sha256=Iv1XGLbrZ9bD4CPAVhsIPkAaB46YC7l7yf5XweljLqk,5047
|
|
27
27
|
ess/reduce/widgets/__init__.py,sha256=SoSHBv8Dc3QXV9HUvPhjSYWMwKTGYZLpsWwsShIO97Q,5325
|
|
28
28
|
ess/reduce/widgets/_base.py,sha256=_wN3FOlXgx_u0c-A_3yyoIH-SdUvDENGgquh9S-h5GI,4852
|
|
29
29
|
ess/reduce/widgets/_binedges_widget.py,sha256=ZCQsGjYHnJr9GFUn7NjoZc1CdsnAzm_fMzyF-fTKKVY,2785
|
|
@@ -36,9 +36,9 @@ ess/reduce/widgets/_spinner.py,sha256=2VY4Fhfa7HMXox2O7UbofcdKsYG-AJGrsgGJB85nDX
|
|
|
36
36
|
ess/reduce/widgets/_string_widget.py,sha256=iPAdfANyXHf-nkfhgkyH6gQDklia0LebLTmwi3m-iYQ,1482
|
|
37
37
|
ess/reduce/widgets/_switchable_widget.py,sha256=fjKz99SKLhIF1BLgGVBSKKn3Lu_jYBwDYGeAjbJY3Q8,2390
|
|
38
38
|
ess/reduce/widgets/_vector_widget.py,sha256=aTaBqCFHZQhrIoX6-sSqFWCPePEW8HQt5kUio8jP1t8,1203
|
|
39
|
-
essreduce-25.2.
|
|
40
|
-
essreduce-25.2.
|
|
41
|
-
essreduce-25.2.
|
|
42
|
-
essreduce-25.2.
|
|
43
|
-
essreduce-25.2.
|
|
44
|
-
essreduce-25.2.
|
|
39
|
+
essreduce-25.2.3.dist-info/LICENSE,sha256=nVEiume4Qj6jMYfSRjHTM2jtJ4FGu0g-5Sdh7osfEYw,1553
|
|
40
|
+
essreduce-25.2.3.dist-info/METADATA,sha256=aKumfO4ID_7M42EvaeTFiVAW2acxYhi0--N7E4U9lfY,3708
|
|
41
|
+
essreduce-25.2.3.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
|
42
|
+
essreduce-25.2.3.dist-info/entry_points.txt,sha256=PMZOIYzCifHMTe4pK3HbhxUwxjFaZizYlLD0td4Isb0,66
|
|
43
|
+
essreduce-25.2.3.dist-info/top_level.txt,sha256=0JxTCgMKPLKtp14wb1-RKisQPQWX7i96innZNvHBr-s,4
|
|
44
|
+
essreduce-25.2.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|