essreduce 25.2.6__py3-none-any.whl → 25.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ess/reduce/nexus/_nexus_loader.py +5 -5
- ess/reduce/nexus/workflow.py +3 -1
- ess/reduce/streaming.py +91 -8
- ess/reduce/time_of_flight/__init__.py +1 -1
- ess/reduce/time_of_flight/{toa_to_tof.py → eto_to_tof.py} +106 -58
- {essreduce-25.2.6.dist-info → essreduce-25.3.1.dist-info}/METADATA +1 -1
- {essreduce-25.2.6.dist-info → essreduce-25.3.1.dist-info}/RECORD +11 -11
- {essreduce-25.2.6.dist-info → essreduce-25.3.1.dist-info}/WHEEL +1 -1
- {essreduce-25.2.6.dist-info → essreduce-25.3.1.dist-info}/LICENSE +0 -0
- {essreduce-25.2.6.dist-info → essreduce-25.3.1.dist-info}/entry_points.txt +0 -0
- {essreduce-25.2.6.dist-info → essreduce-25.3.1.dist-info}/top_level.txt +0 -0
|
@@ -85,7 +85,7 @@ def load_metadata(
|
|
|
85
85
|
entry_name: NeXusEntryName | None = None,
|
|
86
86
|
definitions: Mapping | NoNewDefinitionsType = NoNewDefinitions,
|
|
87
87
|
) -> _Model:
|
|
88
|
-
with
|
|
88
|
+
with open_nexus_file(file_path, definitions=definitions) as f:
|
|
89
89
|
entry = _unique_child_group(f, snx.NXentry, entry_name)
|
|
90
90
|
return model.from_nexus_entry(entry)
|
|
91
91
|
|
|
@@ -113,7 +113,7 @@ def compute_component_position(dg: sc.DataGroup) -> sc.DataGroup:
|
|
|
113
113
|
)
|
|
114
114
|
|
|
115
115
|
|
|
116
|
-
def
|
|
116
|
+
def open_nexus_file(
|
|
117
117
|
file_path: FilePath | NeXusFile | NeXusGroup,
|
|
118
118
|
definitions: Mapping | None | NoNewDefinitionsType = NoNewDefinitions,
|
|
119
119
|
*,
|
|
@@ -212,7 +212,7 @@ def _open_component_parent(
|
|
|
212
212
|
"""Locate the parent group of a NeXus component."""
|
|
213
213
|
file_path = location.filename
|
|
214
214
|
entry_name = location.entry_name
|
|
215
|
-
with
|
|
215
|
+
with open_nexus_file(file_path, definitions=definitions) as f:
|
|
216
216
|
entry = _unique_child_group(f, snx.NXentry, entry_name)
|
|
217
217
|
if nx_class is snx.NXsample:
|
|
218
218
|
yield entry
|
|
@@ -357,7 +357,7 @@ def load_data(
|
|
|
357
357
|
:
|
|
358
358
|
Data array with events or a histogram.
|
|
359
359
|
"""
|
|
360
|
-
with
|
|
360
|
+
with open_nexus_file(file_path, definitions=definitions) as f:
|
|
361
361
|
entry = _unique_child_group(f, snx.NXentry, entry_name)
|
|
362
362
|
instrument = _unique_child_group(entry, snx.NXinstrument, None)
|
|
363
363
|
component = instrument[component_name]
|
|
@@ -554,7 +554,7 @@ def _parse_monitor(group: snx.Group) -> NeXusMonitorInfo:
|
|
|
554
554
|
|
|
555
555
|
def read_nexus_file_info(file_path: FilePath | NeXusFile | NeXusGroup) -> NeXusFileInfo:
|
|
556
556
|
"""Opens and inspects a NeXus file, returning a summary of its contents."""
|
|
557
|
-
with
|
|
557
|
+
with open_nexus_file(file_path) as f:
|
|
558
558
|
entry = _unique_child_group(f, snx.NXentry, None)
|
|
559
559
|
instrument = _unique_child_group(entry, snx.NXinstrument, None)
|
|
560
560
|
detectors = {}
|
ess/reduce/nexus/workflow.py
CHANGED
|
@@ -62,7 +62,9 @@ def file_path_to_file_spec(
|
|
|
62
62
|
filename: Filename[RunType], preopen: PreopenNeXusFile
|
|
63
63
|
) -> NeXusFileSpec[RunType]:
|
|
64
64
|
return NeXusFileSpec[RunType](
|
|
65
|
-
|
|
65
|
+
nexus.open_nexus_file(filename, definitions=definitions)
|
|
66
|
+
if preopen
|
|
67
|
+
else filename
|
|
66
68
|
)
|
|
67
69
|
|
|
68
70
|
|
ess/reduce/streaming.py
CHANGED
|
@@ -240,6 +240,22 @@ class StreamProcessor:
|
|
|
240
240
|
processing based on the input keys. In particular, it is the responsibility of the
|
|
241
241
|
user to ensure that the workflow is "linear" with respect to the dynamic keys up to
|
|
242
242
|
the accumulation keys.
|
|
243
|
+
|
|
244
|
+
Similarly, the stream processor cannot determine from the workflow structure whether
|
|
245
|
+
context updates are compatible with the accumulated data. Accumulators are not
|
|
246
|
+
cleared automatically. This is best illustrated with an example:
|
|
247
|
+
|
|
248
|
+
- If the context is the detector rotation angle, and we accumulate I(Q) (or a
|
|
249
|
+
prerequisite of I(Q)), then updating the detector angle context is compatible with
|
|
250
|
+
previous data, assuming Q for each new chunk is computed based on the angle.
|
|
251
|
+
- If the context is the sample temperature, and we accumulate I(Q), then updating
|
|
252
|
+
the temperature context is not compatible with previous data. Accumulating I(Q, T)
|
|
253
|
+
could be compatible in this case.
|
|
254
|
+
|
|
255
|
+
Since the correctness cannot be determined from the workflow structure, we recommend
|
|
256
|
+
implementing processing steps in a way to catch such problems. For example, adding
|
|
257
|
+
the temperature as a coordinate to the I(Q) data array should allow for
|
|
258
|
+
automatically raising in the accumulator if the temperature changes.
|
|
243
259
|
"""
|
|
244
260
|
|
|
245
261
|
def __init__(
|
|
@@ -247,6 +263,7 @@ class StreamProcessor:
|
|
|
247
263
|
base_workflow: sciline.Pipeline,
|
|
248
264
|
*,
|
|
249
265
|
dynamic_keys: tuple[sciline.typing.Key, ...],
|
|
266
|
+
context_keys: tuple[sciline.typing.Key, ...] = (),
|
|
250
267
|
target_keys: tuple[sciline.typing.Key, ...],
|
|
251
268
|
accumulators: dict[sciline.typing.Key, Accumulator | Callable[..., Accumulator]]
|
|
252
269
|
| tuple[sciline.typing.Key, ...],
|
|
@@ -260,7 +277,12 @@ class StreamProcessor:
|
|
|
260
277
|
base_workflow:
|
|
261
278
|
Workflow to be used for processing chunks.
|
|
262
279
|
dynamic_keys:
|
|
263
|
-
Keys that are expected to be updated with each chunk.
|
|
280
|
+
Keys that are expected to be updated with each chunk. These keys cannot
|
|
281
|
+
depend on each other or on context_keys.
|
|
282
|
+
context_keys:
|
|
283
|
+
Keys that define context for processing chunks and may change occasionally.
|
|
284
|
+
These keys cannot overlap with dynamic_keys or depend on each other or on
|
|
285
|
+
dynamic_keys.
|
|
264
286
|
target_keys:
|
|
265
287
|
Keys to be computed and returned.
|
|
266
288
|
accumulators:
|
|
@@ -275,21 +297,59 @@ class StreamProcessor:
|
|
|
275
297
|
unless the values for these keys are valid for all chunks comprised in the
|
|
276
298
|
final accumulators at the point where :py:meth:`finalize` is called.
|
|
277
299
|
"""
|
|
300
|
+
self._dynamic_keys = set(dynamic_keys)
|
|
301
|
+
self._context_keys = set(context_keys)
|
|
302
|
+
|
|
303
|
+
# Validate that dynamic and context keys do not overlap
|
|
304
|
+
overlap = self._dynamic_keys & self._context_keys
|
|
305
|
+
if overlap:
|
|
306
|
+
raise ValueError(f"Keys cannot be both dynamic and context: {overlap}")
|
|
307
|
+
|
|
308
|
+
# Check dynamic/context keys don't depend on other dynamic/context keys
|
|
309
|
+
graph = base_workflow.underlying_graph
|
|
310
|
+
special_keys = self._dynamic_keys | self._context_keys
|
|
311
|
+
for key in special_keys:
|
|
312
|
+
if key not in graph:
|
|
313
|
+
continue
|
|
314
|
+
ancestors = nx.ancestors(graph, key)
|
|
315
|
+
special_ancestors = ancestors & special_keys
|
|
316
|
+
downstream = 'Dynamic' if key in self._dynamic_keys else 'Context'
|
|
317
|
+
if special_ancestors:
|
|
318
|
+
raise ValueError(
|
|
319
|
+
f"{downstream} key '{key}' depends on other dynamic/context keys: "
|
|
320
|
+
f"{special_ancestors}. This is not supported."
|
|
321
|
+
)
|
|
322
|
+
|
|
278
323
|
workflow = sciline.Pipeline()
|
|
279
324
|
for key in target_keys:
|
|
280
325
|
workflow[key] = base_workflow[key]
|
|
281
326
|
for key in dynamic_keys:
|
|
282
327
|
workflow[key] = None # hack to prune branches
|
|
283
|
-
|
|
284
|
-
|
|
328
|
+
for key in context_keys:
|
|
329
|
+
workflow[key] = None
|
|
285
330
|
|
|
286
331
|
# Find and pre-compute static nodes as far down the graph as possible
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
for key, value in base_workflow.compute(parents).items():
|
|
332
|
+
nodes = _find_descendants(workflow, dynamic_keys + context_keys)
|
|
333
|
+
last_static = _find_parents(workflow, nodes) - nodes
|
|
334
|
+
for key, value in base_workflow.compute(last_static).items():
|
|
291
335
|
workflow[key] = value
|
|
292
336
|
|
|
337
|
+
# Nodes that may need updating on context change but should be cached otherwise.
|
|
338
|
+
dynamic_nodes = _find_descendants(workflow, dynamic_keys)
|
|
339
|
+
# Nodes as far "down" in the graph as possible, right before the dynamic nodes.
|
|
340
|
+
# This also includes target keys that are not dynamic but context-dependent.
|
|
341
|
+
context_to_cache = (
|
|
342
|
+
(_find_parents(workflow, dynamic_nodes) | set(target_keys)) - dynamic_nodes
|
|
343
|
+
) & _find_descendants(workflow, context_keys)
|
|
344
|
+
graph = workflow.underlying_graph
|
|
345
|
+
self._context_key_to_cached_context_nodes_map = {
|
|
346
|
+
context_key: ({context_key} | nx.descendants(graph, context_key))
|
|
347
|
+
& context_to_cache
|
|
348
|
+
for context_key in self._context_keys
|
|
349
|
+
if context_key in graph
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
self._context_workflow = workflow.copy()
|
|
293
353
|
self._process_chunk_workflow = workflow.copy()
|
|
294
354
|
self._finalize_workflow = workflow.copy()
|
|
295
355
|
self._accumulators = (
|
|
@@ -299,7 +359,6 @@ class StreamProcessor:
|
|
|
299
359
|
)
|
|
300
360
|
|
|
301
361
|
# Map each accumulator to its dependent dynamic keys
|
|
302
|
-
graph = workflow.underlying_graph
|
|
303
362
|
self._accumulator_dependencies = {
|
|
304
363
|
acc_key: nx.ancestors(graph, acc_key) & self._dynamic_keys
|
|
305
364
|
for acc_key in self._accumulators
|
|
@@ -323,6 +382,30 @@ class StreamProcessor:
|
|
|
323
382
|
self._target_keys = target_keys
|
|
324
383
|
self._allow_bypass = allow_bypass
|
|
325
384
|
|
|
385
|
+
def set_context(self, context: dict[sciline.typing.Key, Any]) -> None:
|
|
386
|
+
"""
|
|
387
|
+
Set the context for processing chunks.
|
|
388
|
+
|
|
389
|
+
Parameters
|
|
390
|
+
----------
|
|
391
|
+
context:
|
|
392
|
+
Context to be set.
|
|
393
|
+
"""
|
|
394
|
+
needs_recompute = set()
|
|
395
|
+
for key in context:
|
|
396
|
+
if key not in self._context_keys:
|
|
397
|
+
raise ValueError(f"Key '{key}' is not a context key")
|
|
398
|
+
needs_recompute |= self._context_key_to_cached_context_nodes_map[key]
|
|
399
|
+
for key, value in context.items():
|
|
400
|
+
self._context_workflow[key] = value
|
|
401
|
+
results = self._context_workflow.compute(needs_recompute)
|
|
402
|
+
for key, value in results.items():
|
|
403
|
+
if key in self._target_keys:
|
|
404
|
+
# Context-dependent key is direct target, independent of dynamic nodes.
|
|
405
|
+
self._finalize_workflow[key] = value
|
|
406
|
+
else:
|
|
407
|
+
self._process_chunk_workflow[key] = value
|
|
408
|
+
|
|
326
409
|
def add_chunk(
|
|
327
410
|
self, chunks: dict[sciline.typing.Key, Any]
|
|
328
411
|
) -> dict[sciline.typing.Key, Any]:
|
|
@@ -6,9 +6,9 @@ Utilities for computing real neutron time-of-flight from chopper settings and
|
|
|
6
6
|
neutron time-of-arrival at the detectors.
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
|
+
from .eto_to_tof import default_parameters, providers, resample_tof_data
|
|
9
10
|
from .simulation import simulate_beamline
|
|
10
11
|
from .to_events import to_events
|
|
11
|
-
from .toa_to_tof import default_parameters, providers, resample_tof_data
|
|
12
12
|
from .types import (
|
|
13
13
|
DistanceResolution,
|
|
14
14
|
LookupTableRelativeErrorThreshold,
|
|
@@ -284,38 +284,90 @@ def compute_tof_lookup_table(
|
|
|
284
284
|
)
|
|
285
285
|
|
|
286
286
|
|
|
287
|
-
|
|
288
|
-
lookup: sc.DataArray, distance_unit: str, time_unit: str
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
(
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
287
|
+
class TofInterpolator:
|
|
288
|
+
def __init__(self, lookup: sc.DataArray, distance_unit: str, time_unit: str):
|
|
289
|
+
from scipy.interpolate import RegularGridInterpolator
|
|
290
|
+
|
|
291
|
+
# TODO: to make use of multi-threading, we could write our own interpolator.
|
|
292
|
+
# This should be simple enough as we are making the bins linspace, so computing
|
|
293
|
+
# bin indices is fast.
|
|
294
|
+
|
|
295
|
+
self._distance_unit = distance_unit
|
|
296
|
+
self._time_unit = time_unit
|
|
297
|
+
|
|
298
|
+
# In the pulse dimension, it could be that for a given event_time_offset and
|
|
299
|
+
# distance, a tof value is finite in one pulse and NaN in the other.
|
|
300
|
+
# When using the bilinear interpolation, even if the value of the requested
|
|
301
|
+
# point is exactly 0 or 1 (in the case of pulse_stride=2), the interpolator
|
|
302
|
+
# will still use all 4 corners surrounding the point. This means that if one of
|
|
303
|
+
# the corners is NaN, the result will be NaN.
|
|
304
|
+
# Here, we use a trick where we duplicate the lookup values in the 'pulse'
|
|
305
|
+
# dimension so that the interpolator has values on bin edges for that dimension.
|
|
306
|
+
# The interpolator raises an error if axes coordinates are not strictly
|
|
307
|
+
# monotonic, so we cannot use e.g. [-0.5, 0.5, 0.5, 1.5] in the case of
|
|
308
|
+
# pulse_stride=2. Instead we use [-0.25, 0.25, 0.75, 1.25].
|
|
309
|
+
base_grid = np.arange(float(lookup.sizes["pulse"]))
|
|
310
|
+
self._interpolator = RegularGridInterpolator(
|
|
311
|
+
(
|
|
312
|
+
np.sort(np.concatenate([base_grid - 0.25, base_grid + 0.25])),
|
|
313
|
+
lookup.coords["distance"].to(unit=distance_unit, copy=False).values,
|
|
314
|
+
lookup.coords["event_time_offset"]
|
|
315
|
+
.to(unit=self._time_unit, copy=False)
|
|
316
|
+
.values,
|
|
317
|
+
),
|
|
318
|
+
np.repeat(
|
|
319
|
+
lookup.data.to(unit=self._time_unit, copy=False).values, 2, axis=0
|
|
320
|
+
),
|
|
321
|
+
method="linear",
|
|
322
|
+
bounds_error=False,
|
|
323
|
+
fill_value=np.nan,
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
def __call__(
|
|
327
|
+
self,
|
|
328
|
+
pulse_index: sc.Variable,
|
|
329
|
+
ltotal: sc.Variable,
|
|
330
|
+
event_time_offset: sc.Variable,
|
|
331
|
+
) -> sc.Variable:
|
|
332
|
+
if pulse_index.unit not in ("", None):
|
|
333
|
+
raise sc.UnitError(
|
|
334
|
+
"pulse_index must have unit dimensionless or None, "
|
|
335
|
+
f"but got unit: {pulse_index.unit}."
|
|
336
|
+
)
|
|
337
|
+
if ltotal.unit != self._distance_unit:
|
|
338
|
+
raise sc.UnitError(
|
|
339
|
+
f"ltotal must have unit: {self._distance_unit}, "
|
|
340
|
+
f"but got unit: {ltotal.unit}."
|
|
341
|
+
)
|
|
342
|
+
if event_time_offset.unit != self._time_unit:
|
|
343
|
+
raise sc.UnitError(
|
|
344
|
+
f"event_time_offset must have unit: {self._time_unit}, "
|
|
345
|
+
f"but got unit: {event_time_offset.unit}."
|
|
346
|
+
)
|
|
347
|
+
out_dims = event_time_offset.dims
|
|
348
|
+
pulse_index = pulse_index.values
|
|
349
|
+
ltotal = ltotal.values
|
|
350
|
+
event_time_offset = event_time_offset.values
|
|
351
|
+
# Check bounds for pulse_index and ltotal.
|
|
352
|
+
# We do not check the event_time_offset dimension because histogrammed monitors
|
|
353
|
+
# often have binning which can be anything (does not necessarily stop at 71ms).
|
|
354
|
+
# Raising an error here would be too restrictive, and warnings would add noise
|
|
355
|
+
# to the workflows.
|
|
356
|
+
for i, (name, values) in enumerate(
|
|
357
|
+
{'pulse_index': pulse_index, 'ltotal': ltotal}.items()
|
|
358
|
+
):
|
|
359
|
+
vmin = self._interpolator.grid[i][0]
|
|
360
|
+
vmax = self._interpolator.grid[i][-1]
|
|
361
|
+
if np.any(values < vmin) or np.any(values > vmax):
|
|
362
|
+
raise ValueError(
|
|
363
|
+
"Some requested values are outside of lookup table bounds for "
|
|
364
|
+
f"axis {i}: {name}, min: {vmin}, max: {vmax}."
|
|
365
|
+
)
|
|
366
|
+
return sc.array(
|
|
367
|
+
dims=out_dims,
|
|
368
|
+
values=self._interpolator((pulse_index, ltotal, event_time_offset)),
|
|
369
|
+
unit=self._time_unit,
|
|
370
|
+
)
|
|
319
371
|
|
|
320
372
|
|
|
321
373
|
def _time_of_flight_data_histogram(
|
|
@@ -360,17 +412,11 @@ def _time_of_flight_data_histogram(
|
|
|
360
412
|
)
|
|
361
413
|
pulse_index = sc.zeros(sizes=etos.sizes)
|
|
362
414
|
|
|
363
|
-
# Create
|
|
364
|
-
interp =
|
|
365
|
-
lookup, distance_unit=ltotal.unit, time_unit=eto_unit
|
|
366
|
-
)
|
|
415
|
+
# Create linear interpolator
|
|
416
|
+
interp = TofInterpolator(lookup, distance_unit=ltotal.unit, time_unit=eto_unit)
|
|
367
417
|
|
|
368
418
|
# Compute time-of-flight of the bin edges using the interpolator
|
|
369
|
-
tofs =
|
|
370
|
-
dims=etos.dims,
|
|
371
|
-
values=interp((pulse_index.values, ltotal.values, etos.values)),
|
|
372
|
-
unit=eto_unit,
|
|
373
|
-
)
|
|
419
|
+
tofs = interp(pulse_index=pulse_index, ltotal=ltotal, event_time_offset=etos)
|
|
374
420
|
|
|
375
421
|
return rebinned.assign_coords(tof=tofs)
|
|
376
422
|
|
|
@@ -380,7 +426,7 @@ def _guess_pulse_stride_offset(
|
|
|
380
426
|
ltotal: sc.Variable,
|
|
381
427
|
event_time_offset: sc.Variable,
|
|
382
428
|
pulse_stride: int,
|
|
383
|
-
interp:
|
|
429
|
+
interp: TofInterpolator,
|
|
384
430
|
) -> int:
|
|
385
431
|
"""
|
|
386
432
|
Using the minimum ``event_time_zero`` to calculate a reference time when computing
|
|
@@ -408,21 +454,29 @@ def _guess_pulse_stride_offset(
|
|
|
408
454
|
pulse_stride:
|
|
409
455
|
Stride of used pulses.
|
|
410
456
|
interp:
|
|
411
|
-
|
|
457
|
+
Interpolator for the lookup table.
|
|
412
458
|
"""
|
|
413
459
|
tofs = {}
|
|
414
460
|
# Choose a few random events to compute the time-of-flight
|
|
415
461
|
inds = np.random.choice(
|
|
416
462
|
len(event_time_offset), min(5000, len(event_time_offset)), replace=False
|
|
417
463
|
)
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
464
|
+
pulse_index = sc.array(
|
|
465
|
+
dims=pulse_index.dims,
|
|
466
|
+
values=pulse_index.values[inds],
|
|
467
|
+
unit=pulse_index.unit,
|
|
468
|
+
)
|
|
469
|
+
ltotal = sc.array(dims=ltotal.dims, values=ltotal.values[inds], unit=ltotal.unit)
|
|
470
|
+
etos = sc.array(
|
|
471
|
+
dims=event_time_offset.dims,
|
|
472
|
+
values=event_time_offset.values[inds],
|
|
473
|
+
unit=event_time_offset.unit,
|
|
474
|
+
)
|
|
421
475
|
for i in range(pulse_stride):
|
|
422
|
-
pulse_inds = (
|
|
423
|
-
tofs[i] = interp(
|
|
476
|
+
pulse_inds = (pulse_index + i) % pulse_stride
|
|
477
|
+
tofs[i] = interp(pulse_index=pulse_inds, ltotal=ltotal, event_time_offset=etos)
|
|
424
478
|
# Find the entry in the list with the least number of nan values
|
|
425
|
-
return sorted(tofs, key=lambda x:
|
|
479
|
+
return sorted(tofs, key=lambda x: sc.isnan(tofs[x]).sum())[0]
|
|
426
480
|
|
|
427
481
|
|
|
428
482
|
def _time_of_flight_data_events(
|
|
@@ -436,10 +490,8 @@ def _time_of_flight_data_events(
|
|
|
436
490
|
etos = da.bins.coords["event_time_offset"]
|
|
437
491
|
eto_unit = elem_unit(etos)
|
|
438
492
|
|
|
439
|
-
# Create
|
|
440
|
-
interp =
|
|
441
|
-
lookup, distance_unit=ltotal.unit, time_unit=eto_unit
|
|
442
|
-
)
|
|
493
|
+
# Create linear interpolator
|
|
494
|
+
interp = TofInterpolator(lookup, distance_unit=ltotal.unit, time_unit=eto_unit)
|
|
443
495
|
|
|
444
496
|
# Operate on events (broadcast distances to all events)
|
|
445
497
|
ltotal = sc.bins_like(etos, ltotal).bins.constituents["data"]
|
|
@@ -491,11 +543,7 @@ def _time_of_flight_data_events(
|
|
|
491
543
|
pulse_index %= pulse_stride
|
|
492
544
|
|
|
493
545
|
# Compute time-of-flight for all neutrons using the interpolator
|
|
494
|
-
tofs =
|
|
495
|
-
dims=etos.dims,
|
|
496
|
-
values=interp((pulse_index.values, ltotal.values, etos.values)),
|
|
497
|
-
unit=eto_unit,
|
|
498
|
-
)
|
|
546
|
+
tofs = interp(pulse_index=pulse_index, ltotal=ltotal, event_time_offset=etos)
|
|
499
547
|
|
|
500
548
|
parts = da.bins.constituents
|
|
501
549
|
parts["data"] = tofs
|
|
@@ -3,7 +3,7 @@ ess/reduce/data.py,sha256=vaoeAJ6EpK1YghOiAALLdWiW17TgUnnnt0H-RGiGzXk,3756
|
|
|
3
3
|
ess/reduce/logging.py,sha256=6n8Czq4LZ3OK9ENlKsWSI1M3KvKv6_HSoUiV4__IUlU,357
|
|
4
4
|
ess/reduce/parameter.py,sha256=4sCfoKOI2HuO_Q7JLH_jAXnEOFANSn5P3NdaOBzhJxc,4635
|
|
5
5
|
ess/reduce/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
-
ess/reduce/streaming.py,sha256=
|
|
6
|
+
ess/reduce/streaming.py,sha256=TBttQV5WdSpUKh38J0pdv53seMWtUFswxd6-ltaZb_M,17403
|
|
7
7
|
ess/reduce/ui.py,sha256=zmorAbDwX1cU3ygDT--OP58o0qU7OBcmJz03jPeYSLA,10884
|
|
8
8
|
ess/reduce/uncertainty.py,sha256=LR4O6ApB6Z-W9gC_XW0ajupl8yFG-du0eee1AX_R-gk,6990
|
|
9
9
|
ess/reduce/workflow.py,sha256=sL34T_2Cjl_8iFlegujxI9VyOUwo6erVC8pOXnfWgYw,3060
|
|
@@ -12,17 +12,17 @@ ess/reduce/live/raw.py,sha256=hyWkDJ0WYE2TS12dVxpRUh6RkzcUJL0bVDd4JjTidi0,24217
|
|
|
12
12
|
ess/reduce/live/roi.py,sha256=Hs-pW98k41WU6Kl3UQ41kQawk80c2QNOQ_WNctLzDPE,3795
|
|
13
13
|
ess/reduce/live/workflow.py,sha256=bsbwvTqPhRO6mC__3b7MgU7DWwAnOvGvG-t2n22EKq8,4285
|
|
14
14
|
ess/reduce/nexus/__init__.py,sha256=59bxKkNYg8DYcSykNvH6nCa5SYchJC4SbgZEKhkNdYc,967
|
|
15
|
-
ess/reduce/nexus/_nexus_loader.py,sha256=
|
|
15
|
+
ess/reduce/nexus/_nexus_loader.py,sha256=5N48AMJx1AaFZb6WZPPbVKUlXyFMVVtZrn7Bae57O3A,19842
|
|
16
16
|
ess/reduce/nexus/json_generator.py,sha256=ME2Xn8L7Oi3uHJk9ZZdCRQTRX-OV_wh9-DJn07Alplk,2529
|
|
17
17
|
ess/reduce/nexus/json_nexus.py,sha256=QrVc0p424nZ5dHX9gebAJppTw6lGZq9404P_OFl1giA,10282
|
|
18
18
|
ess/reduce/nexus/types.py,sha256=15XcHbNbOfnAYjWXzzKyYDVNyNixRnP0hJ-Q2duwMWE,9896
|
|
19
|
-
ess/reduce/nexus/workflow.py,sha256=
|
|
19
|
+
ess/reduce/nexus/workflow.py,sha256=EiD6-58eGwoN5fbo47UTZy_oYFitCbwlIH-xqDOSp4c,24326
|
|
20
20
|
ess/reduce/scripts/grow_nexus.py,sha256=hET3h06M0xlJd62E3palNLFvJMyNax2kK4XyJcOhl-I,3387
|
|
21
|
-
ess/reduce/time_of_flight/__init__.py,sha256=
|
|
21
|
+
ess/reduce/time_of_flight/__init__.py,sha256=TSHfyoROwFhM2k3jHzamw3zeb0OQOaiuvgCgDEPEQ_g,1097
|
|
22
|
+
ess/reduce/time_of_flight/eto_to_tof.py,sha256=4itpEB2Vb5-6HvgFfKIV0_-l8zS4UJgB5uBp12L6cls,25827
|
|
22
23
|
ess/reduce/time_of_flight/fakes.py,sha256=rlBgceFVbHIhP_xPyUzYVf-2wEu--G8hA-kxPzAnPbM,4236
|
|
23
24
|
ess/reduce/time_of_flight/simulation.py,sha256=CireE9m9kFbUXhGUeY2L3SoMy7kpqopxKj__h4tSKzo,2614
|
|
24
25
|
ess/reduce/time_of_flight/to_events.py,sha256=_5CcUOWvguDcK8uo2pPZWzXnWoiZhC1w-zF8xysaIvU,4339
|
|
25
|
-
ess/reduce/time_of_flight/toa_to_tof.py,sha256=bt28z6wixS4AegBxsl1uYBREP08TyAs8Y9Z738YcXE4,23476
|
|
26
26
|
ess/reduce/time_of_flight/types.py,sha256=Iv1XGLbrZ9bD4CPAVhsIPkAaB46YC7l7yf5XweljLqk,5047
|
|
27
27
|
ess/reduce/widgets/__init__.py,sha256=SoSHBv8Dc3QXV9HUvPhjSYWMwKTGYZLpsWwsShIO97Q,5325
|
|
28
28
|
ess/reduce/widgets/_base.py,sha256=_wN3FOlXgx_u0c-A_3yyoIH-SdUvDENGgquh9S-h5GI,4852
|
|
@@ -36,9 +36,9 @@ ess/reduce/widgets/_spinner.py,sha256=2VY4Fhfa7HMXox2O7UbofcdKsYG-AJGrsgGJB85nDX
|
|
|
36
36
|
ess/reduce/widgets/_string_widget.py,sha256=iPAdfANyXHf-nkfhgkyH6gQDklia0LebLTmwi3m-iYQ,1482
|
|
37
37
|
ess/reduce/widgets/_switchable_widget.py,sha256=fjKz99SKLhIF1BLgGVBSKKn3Lu_jYBwDYGeAjbJY3Q8,2390
|
|
38
38
|
ess/reduce/widgets/_vector_widget.py,sha256=aTaBqCFHZQhrIoX6-sSqFWCPePEW8HQt5kUio8jP1t8,1203
|
|
39
|
-
essreduce-25.
|
|
40
|
-
essreduce-25.
|
|
41
|
-
essreduce-25.
|
|
42
|
-
essreduce-25.
|
|
43
|
-
essreduce-25.
|
|
44
|
-
essreduce-25.
|
|
39
|
+
essreduce-25.3.1.dist-info/LICENSE,sha256=nVEiume4Qj6jMYfSRjHTM2jtJ4FGu0g-5Sdh7osfEYw,1553
|
|
40
|
+
essreduce-25.3.1.dist-info/METADATA,sha256=xzclNXH4P_JVuujgIOjGtTw_7OQtH5s2e4_FlwUWDBs,3708
|
|
41
|
+
essreduce-25.3.1.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
|
|
42
|
+
essreduce-25.3.1.dist-info/entry_points.txt,sha256=PMZOIYzCifHMTe4pK3HbhxUwxjFaZizYlLD0td4Isb0,66
|
|
43
|
+
essreduce-25.3.1.dist-info/top_level.txt,sha256=0JxTCgMKPLKtp14wb1-RKisQPQWX7i96innZNvHBr-s,4
|
|
44
|
+
essreduce-25.3.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|