essreduce 25.3.0__py3-none-any.whl → 25.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ess/reduce/live/raw.py +10 -4
- ess/reduce/nexus/_nexus_loader.py +5 -5
- ess/reduce/nexus/workflow.py +3 -1
- ess/reduce/streaming.py +91 -8
- ess/reduce/time_of_flight/__init__.py +1 -1
- ess/reduce/time_of_flight/{toa_to_tof.py → eto_to_tof.py} +114 -67
- ess/reduce/time_of_flight/fakes.py +8 -1
- ess/reduce/time_of_flight/interpolator_numba.py +162 -0
- ess/reduce/time_of_flight/interpolator_scipy.py +60 -0
- ess/reduce/time_of_flight/simulation.py +9 -2
- ess/reduce/time_of_flight/to_events.py +2 -2
- {essreduce-25.3.0.dist-info → essreduce-25.4.0.dist-info}/METADATA +4 -2
- {essreduce-25.3.0.dist-info → essreduce-25.4.0.dist-info}/RECORD +17 -15
- {essreduce-25.3.0.dist-info → essreduce-25.4.0.dist-info}/WHEEL +1 -1
- {essreduce-25.3.0.dist-info → essreduce-25.4.0.dist-info}/entry_points.txt +0 -0
- {essreduce-25.3.0.dist-info → essreduce-25.4.0.dist-info/licenses}/LICENSE +0 -0
- {essreduce-25.3.0.dist-info → essreduce-25.4.0.dist-info}/top_level.txt +0 -0
ess/reduce/live/raw.py
CHANGED
|
@@ -380,7 +380,8 @@ class RollingDetectorView(Detector):
|
|
|
380
380
|
pixel_noise = sc.scalar(0.0, unit='m')
|
|
381
381
|
noise_replica_count = 0
|
|
382
382
|
else:
|
|
383
|
-
|
|
383
|
+
# Unclear what a good number is, could be made configurable.
|
|
384
|
+
noise_replica_count = 4
|
|
384
385
|
wf = GenericNeXusWorkflow(run_types=[SampleRun], monitor_types=[])
|
|
385
386
|
wf[RollingDetectorViewWindow] = window
|
|
386
387
|
if projection == 'cylinder_mantle_z':
|
|
@@ -606,9 +607,14 @@ def position_with_noisy_replicas(
|
|
|
606
607
|
noise_dim = position_noise.dim
|
|
607
608
|
size = position.size * replicas
|
|
608
609
|
# "Paint" the short array of noise on top of the (replicated) position data.
|
|
609
|
-
noise =
|
|
610
|
-
|
|
611
|
-
|
|
610
|
+
noise = (
|
|
611
|
+
sc.broadcast(
|
|
612
|
+
position_noise,
|
|
613
|
+
sizes={'dummy': ceil(size / position_noise.size), **position_noise.sizes},
|
|
614
|
+
)
|
|
615
|
+
.flatten(to=noise_dim)[:size]
|
|
616
|
+
.fold(dim=noise_dim, sizes={'replica': replicas, **position.sizes})
|
|
617
|
+
)
|
|
612
618
|
return sc.concat([position, noise + position], dim='replica')
|
|
613
619
|
|
|
614
620
|
|
|
@@ -85,7 +85,7 @@ def load_metadata(
|
|
|
85
85
|
entry_name: NeXusEntryName | None = None,
|
|
86
86
|
definitions: Mapping | NoNewDefinitionsType = NoNewDefinitions,
|
|
87
87
|
) -> _Model:
|
|
88
|
-
with
|
|
88
|
+
with open_nexus_file(file_path, definitions=definitions) as f:
|
|
89
89
|
entry = _unique_child_group(f, snx.NXentry, entry_name)
|
|
90
90
|
return model.from_nexus_entry(entry)
|
|
91
91
|
|
|
@@ -113,7 +113,7 @@ def compute_component_position(dg: sc.DataGroup) -> sc.DataGroup:
|
|
|
113
113
|
)
|
|
114
114
|
|
|
115
115
|
|
|
116
|
-
def
|
|
116
|
+
def open_nexus_file(
|
|
117
117
|
file_path: FilePath | NeXusFile | NeXusGroup,
|
|
118
118
|
definitions: Mapping | None | NoNewDefinitionsType = NoNewDefinitions,
|
|
119
119
|
*,
|
|
@@ -212,7 +212,7 @@ def _open_component_parent(
|
|
|
212
212
|
"""Locate the parent group of a NeXus component."""
|
|
213
213
|
file_path = location.filename
|
|
214
214
|
entry_name = location.entry_name
|
|
215
|
-
with
|
|
215
|
+
with open_nexus_file(file_path, definitions=definitions) as f:
|
|
216
216
|
entry = _unique_child_group(f, snx.NXentry, entry_name)
|
|
217
217
|
if nx_class is snx.NXsample:
|
|
218
218
|
yield entry
|
|
@@ -357,7 +357,7 @@ def load_data(
|
|
|
357
357
|
:
|
|
358
358
|
Data array with events or a histogram.
|
|
359
359
|
"""
|
|
360
|
-
with
|
|
360
|
+
with open_nexus_file(file_path, definitions=definitions) as f:
|
|
361
361
|
entry = _unique_child_group(f, snx.NXentry, entry_name)
|
|
362
362
|
instrument = _unique_child_group(entry, snx.NXinstrument, None)
|
|
363
363
|
component = instrument[component_name]
|
|
@@ -554,7 +554,7 @@ def _parse_monitor(group: snx.Group) -> NeXusMonitorInfo:
|
|
|
554
554
|
|
|
555
555
|
def read_nexus_file_info(file_path: FilePath | NeXusFile | NeXusGroup) -> NeXusFileInfo:
|
|
556
556
|
"""Opens and inspects a NeXus file, returning a summary of its contents."""
|
|
557
|
-
with
|
|
557
|
+
with open_nexus_file(file_path) as f:
|
|
558
558
|
entry = _unique_child_group(f, snx.NXentry, None)
|
|
559
559
|
instrument = _unique_child_group(entry, snx.NXinstrument, None)
|
|
560
560
|
detectors = {}
|
ess/reduce/nexus/workflow.py
CHANGED
|
@@ -62,7 +62,9 @@ def file_path_to_file_spec(
|
|
|
62
62
|
filename: Filename[RunType], preopen: PreopenNeXusFile
|
|
63
63
|
) -> NeXusFileSpec[RunType]:
|
|
64
64
|
return NeXusFileSpec[RunType](
|
|
65
|
-
|
|
65
|
+
nexus.open_nexus_file(filename, definitions=definitions)
|
|
66
|
+
if preopen
|
|
67
|
+
else filename
|
|
66
68
|
)
|
|
67
69
|
|
|
68
70
|
|
ess/reduce/streaming.py
CHANGED
|
@@ -240,6 +240,22 @@ class StreamProcessor:
|
|
|
240
240
|
processing based on the input keys. In particular, it is the responsibility of the
|
|
241
241
|
user to ensure that the workflow is "linear" with respect to the dynamic keys up to
|
|
242
242
|
the accumulation keys.
|
|
243
|
+
|
|
244
|
+
Similarly, the stream processor cannot determine from the workflow structure whether
|
|
245
|
+
context updates are compatible with the accumulated data. Accumulators are not
|
|
246
|
+
cleared automatically. This is best illustrated with an example:
|
|
247
|
+
|
|
248
|
+
- If the context is the detector rotation angle, and we accumulate I(Q) (or a
|
|
249
|
+
prerequisite of I(Q)), then updating the detector angle context is compatible with
|
|
250
|
+
previous data, assuming Q for each new chunk is computed based on the angle.
|
|
251
|
+
- If the context is the sample temperature, and we accumulate I(Q), then updating
|
|
252
|
+
the temperature context is not compatible with previous data. Accumulating I(Q, T)
|
|
253
|
+
could be compatible in this case.
|
|
254
|
+
|
|
255
|
+
Since the correctness cannot be determined from the workflow structure, we recommend
|
|
256
|
+
implementing processing steps in a way to catch such problems. For example, adding
|
|
257
|
+
the temperature as a coordinate to the I(Q) data array should allow for
|
|
258
|
+
automatically raising in the accumulator if the temperature changes.
|
|
243
259
|
"""
|
|
244
260
|
|
|
245
261
|
def __init__(
|
|
@@ -247,6 +263,7 @@ class StreamProcessor:
|
|
|
247
263
|
base_workflow: sciline.Pipeline,
|
|
248
264
|
*,
|
|
249
265
|
dynamic_keys: tuple[sciline.typing.Key, ...],
|
|
266
|
+
context_keys: tuple[sciline.typing.Key, ...] = (),
|
|
250
267
|
target_keys: tuple[sciline.typing.Key, ...],
|
|
251
268
|
accumulators: dict[sciline.typing.Key, Accumulator | Callable[..., Accumulator]]
|
|
252
269
|
| tuple[sciline.typing.Key, ...],
|
|
@@ -260,7 +277,12 @@ class StreamProcessor:
|
|
|
260
277
|
base_workflow:
|
|
261
278
|
Workflow to be used for processing chunks.
|
|
262
279
|
dynamic_keys:
|
|
263
|
-
Keys that are expected to be updated with each chunk.
|
|
280
|
+
Keys that are expected to be updated with each chunk. These keys cannot
|
|
281
|
+
depend on each other or on context_keys.
|
|
282
|
+
context_keys:
|
|
283
|
+
Keys that define context for processing chunks and may change occasionally.
|
|
284
|
+
These keys cannot overlap with dynamic_keys or depend on each other or on
|
|
285
|
+
dynamic_keys.
|
|
264
286
|
target_keys:
|
|
265
287
|
Keys to be computed and returned.
|
|
266
288
|
accumulators:
|
|
@@ -275,21 +297,59 @@ class StreamProcessor:
|
|
|
275
297
|
unless the values for these keys are valid for all chunks comprised in the
|
|
276
298
|
final accumulators at the point where :py:meth:`finalize` is called.
|
|
277
299
|
"""
|
|
300
|
+
self._dynamic_keys = set(dynamic_keys)
|
|
301
|
+
self._context_keys = set(context_keys)
|
|
302
|
+
|
|
303
|
+
# Validate that dynamic and context keys do not overlap
|
|
304
|
+
overlap = self._dynamic_keys & self._context_keys
|
|
305
|
+
if overlap:
|
|
306
|
+
raise ValueError(f"Keys cannot be both dynamic and context: {overlap}")
|
|
307
|
+
|
|
308
|
+
# Check dynamic/context keys don't depend on other dynamic/context keys
|
|
309
|
+
graph = base_workflow.underlying_graph
|
|
310
|
+
special_keys = self._dynamic_keys | self._context_keys
|
|
311
|
+
for key in special_keys:
|
|
312
|
+
if key not in graph:
|
|
313
|
+
continue
|
|
314
|
+
ancestors = nx.ancestors(graph, key)
|
|
315
|
+
special_ancestors = ancestors & special_keys
|
|
316
|
+
downstream = 'Dynamic' if key in self._dynamic_keys else 'Context'
|
|
317
|
+
if special_ancestors:
|
|
318
|
+
raise ValueError(
|
|
319
|
+
f"{downstream} key '{key}' depends on other dynamic/context keys: "
|
|
320
|
+
f"{special_ancestors}. This is not supported."
|
|
321
|
+
)
|
|
322
|
+
|
|
278
323
|
workflow = sciline.Pipeline()
|
|
279
324
|
for key in target_keys:
|
|
280
325
|
workflow[key] = base_workflow[key]
|
|
281
326
|
for key in dynamic_keys:
|
|
282
327
|
workflow[key] = None # hack to prune branches
|
|
283
|
-
|
|
284
|
-
|
|
328
|
+
for key in context_keys:
|
|
329
|
+
workflow[key] = None
|
|
285
330
|
|
|
286
331
|
# Find and pre-compute static nodes as far down the graph as possible
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
for key, value in base_workflow.compute(parents).items():
|
|
332
|
+
nodes = _find_descendants(workflow, dynamic_keys + context_keys)
|
|
333
|
+
last_static = _find_parents(workflow, nodes) - nodes
|
|
334
|
+
for key, value in base_workflow.compute(last_static).items():
|
|
291
335
|
workflow[key] = value
|
|
292
336
|
|
|
337
|
+
# Nodes that may need updating on context change but should be cached otherwise.
|
|
338
|
+
dynamic_nodes = _find_descendants(workflow, dynamic_keys)
|
|
339
|
+
# Nodes as far "down" in the graph as possible, right before the dynamic nodes.
|
|
340
|
+
# This also includes target keys that are not dynamic but context-dependent.
|
|
341
|
+
context_to_cache = (
|
|
342
|
+
(_find_parents(workflow, dynamic_nodes) | set(target_keys)) - dynamic_nodes
|
|
343
|
+
) & _find_descendants(workflow, context_keys)
|
|
344
|
+
graph = workflow.underlying_graph
|
|
345
|
+
self._context_key_to_cached_context_nodes_map = {
|
|
346
|
+
context_key: ({context_key} | nx.descendants(graph, context_key))
|
|
347
|
+
& context_to_cache
|
|
348
|
+
for context_key in self._context_keys
|
|
349
|
+
if context_key in graph
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
self._context_workflow = workflow.copy()
|
|
293
353
|
self._process_chunk_workflow = workflow.copy()
|
|
294
354
|
self._finalize_workflow = workflow.copy()
|
|
295
355
|
self._accumulators = (
|
|
@@ -299,7 +359,6 @@ class StreamProcessor:
|
|
|
299
359
|
)
|
|
300
360
|
|
|
301
361
|
# Map each accumulator to its dependent dynamic keys
|
|
302
|
-
graph = workflow.underlying_graph
|
|
303
362
|
self._accumulator_dependencies = {
|
|
304
363
|
acc_key: nx.ancestors(graph, acc_key) & self._dynamic_keys
|
|
305
364
|
for acc_key in self._accumulators
|
|
@@ -323,6 +382,30 @@ class StreamProcessor:
|
|
|
323
382
|
self._target_keys = target_keys
|
|
324
383
|
self._allow_bypass = allow_bypass
|
|
325
384
|
|
|
385
|
+
def set_context(self, context: dict[sciline.typing.Key, Any]) -> None:
|
|
386
|
+
"""
|
|
387
|
+
Set the context for processing chunks.
|
|
388
|
+
|
|
389
|
+
Parameters
|
|
390
|
+
----------
|
|
391
|
+
context:
|
|
392
|
+
Context to be set.
|
|
393
|
+
"""
|
|
394
|
+
needs_recompute = set()
|
|
395
|
+
for key in context:
|
|
396
|
+
if key not in self._context_keys:
|
|
397
|
+
raise ValueError(f"Key '{key}' is not a context key")
|
|
398
|
+
needs_recompute |= self._context_key_to_cached_context_nodes_map[key]
|
|
399
|
+
for key, value in context.items():
|
|
400
|
+
self._context_workflow[key] = value
|
|
401
|
+
results = self._context_workflow.compute(needs_recompute)
|
|
402
|
+
for key, value in results.items():
|
|
403
|
+
if key in self._target_keys:
|
|
404
|
+
# Context-dependent key is direct target, independent of dynamic nodes.
|
|
405
|
+
self._finalize_workflow[key] = value
|
|
406
|
+
else:
|
|
407
|
+
self._process_chunk_workflow[key] = value
|
|
408
|
+
|
|
326
409
|
def add_chunk(
|
|
327
410
|
self, chunks: dict[sciline.typing.Key, Any]
|
|
328
411
|
) -> dict[sciline.typing.Key, Any]:
|
|
@@ -6,9 +6,9 @@ Utilities for computing real neutron time-of-flight from chopper settings and
|
|
|
6
6
|
neutron time-of-arrival at the detectors.
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
|
+
from .eto_to_tof import default_parameters, providers, resample_tof_data
|
|
9
10
|
from .simulation import simulate_beamline
|
|
10
11
|
from .to_events import to_events
|
|
11
|
-
from .toa_to_tof import default_parameters, providers, resample_tof_data
|
|
12
12
|
from .types import (
|
|
13
13
|
DistanceResolution,
|
|
14
14
|
LookupTableRelativeErrorThreshold,
|
|
@@ -14,6 +14,10 @@ import scipp as sc
|
|
|
14
14
|
from scipp._scipp.core import _bins_no_validate
|
|
15
15
|
from scippneutron._utils import elem_unit
|
|
16
16
|
|
|
17
|
+
try:
|
|
18
|
+
from .interpolator_numba import Interpolator as InterpolatorImpl
|
|
19
|
+
except ImportError:
|
|
20
|
+
from .interpolator_scipy import Interpolator as InterpolatorImpl
|
|
17
21
|
from .to_events import to_events
|
|
18
22
|
from .types import (
|
|
19
23
|
DistanceResolution,
|
|
@@ -284,38 +288,78 @@ def compute_tof_lookup_table(
|
|
|
284
288
|
)
|
|
285
289
|
|
|
286
290
|
|
|
287
|
-
|
|
288
|
-
lookup: sc.DataArray, distance_unit: str, time_unit: str
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
291
|
+
class TofInterpolator:
|
|
292
|
+
def __init__(self, lookup: sc.DataArray, distance_unit: str, time_unit: str):
|
|
293
|
+
self._distance_unit = distance_unit
|
|
294
|
+
self._time_unit = time_unit
|
|
295
|
+
|
|
296
|
+
# In the pulse dimension, it could be that for a given event_time_offset and
|
|
297
|
+
# distance, a tof value is finite in one pulse and NaN in the other.
|
|
298
|
+
# When using the bilinear interpolation, even if the value of the requested
|
|
299
|
+
# point is exactly 0 or 1 (in the case of pulse_stride=2), the interpolator
|
|
300
|
+
# will still use all 4 corners surrounding the point. This means that if one of
|
|
301
|
+
# the corners is NaN, the result will be NaN.
|
|
302
|
+
# Here, we use a trick where we duplicate the lookup values in the 'pulse'
|
|
303
|
+
# dimension so that the interpolator has values on bin edges for that dimension.
|
|
304
|
+
# The interpolator raises an error if axes coordinates are not strictly
|
|
305
|
+
# monotonic, so we cannot use e.g. [-0.5, 0.5, 0.5, 1.5] in the case of
|
|
306
|
+
# pulse_stride=2. Instead we use [-0.25, 0.25, 0.75, 1.25].
|
|
307
|
+
base_grid = np.arange(float(lookup.sizes["pulse"]))
|
|
308
|
+
self._pulse_edges = np.sort(
|
|
309
|
+
np.concatenate([base_grid - 0.25, base_grid + 0.25])
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
self._time_edges = (
|
|
313
|
+
lookup.coords["event_time_offset"]
|
|
314
|
+
.to(unit=self._time_unit, copy=False)
|
|
315
|
+
.values
|
|
316
|
+
)
|
|
317
|
+
self._distance_edges = (
|
|
318
|
+
lookup.coords["distance"].to(unit=distance_unit, copy=False).values
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
self._interpolator = InterpolatorImpl(
|
|
322
|
+
time_edges=self._time_edges,
|
|
323
|
+
distance_edges=self._distance_edges,
|
|
324
|
+
pulse_edges=self._pulse_edges,
|
|
325
|
+
values=np.repeat(
|
|
326
|
+
lookup.data.to(unit=self._time_unit, copy=False).values, 2, axis=0
|
|
327
|
+
),
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
def __call__(
|
|
331
|
+
self,
|
|
332
|
+
pulse_index: sc.Variable,
|
|
333
|
+
ltotal: sc.Variable,
|
|
334
|
+
event_time_offset: sc.Variable,
|
|
335
|
+
) -> sc.Variable:
|
|
336
|
+
if pulse_index.unit not in ("", None):
|
|
337
|
+
raise sc.UnitError(
|
|
338
|
+
"pulse_index must have unit dimensionless or None, "
|
|
339
|
+
f"but got unit: {pulse_index.unit}."
|
|
340
|
+
)
|
|
341
|
+
if ltotal.unit != self._distance_unit:
|
|
342
|
+
raise sc.UnitError(
|
|
343
|
+
f"ltotal must have unit: {self._distance_unit}, "
|
|
344
|
+
f"but got unit: {ltotal.unit}."
|
|
345
|
+
)
|
|
346
|
+
if event_time_offset.unit != self._time_unit:
|
|
347
|
+
raise sc.UnitError(
|
|
348
|
+
f"event_time_offset must have unit: {self._time_unit}, "
|
|
349
|
+
f"but got unit: {event_time_offset.unit}."
|
|
350
|
+
)
|
|
351
|
+
out_dims = event_time_offset.dims
|
|
352
|
+
pulse_index = pulse_index.values
|
|
353
|
+
ltotal = ltotal.values
|
|
354
|
+
event_time_offset = event_time_offset.values
|
|
355
|
+
|
|
356
|
+
return sc.array(
|
|
357
|
+
dims=out_dims,
|
|
358
|
+
values=self._interpolator(
|
|
359
|
+
times=event_time_offset, distances=ltotal, pulse_indices=pulse_index
|
|
360
|
+
),
|
|
361
|
+
unit=self._time_unit,
|
|
362
|
+
)
|
|
319
363
|
|
|
320
364
|
|
|
321
365
|
def _time_of_flight_data_histogram(
|
|
@@ -327,7 +371,8 @@ def _time_of_flight_data_histogram(
|
|
|
327
371
|
# In NeXus, 'time_of_flight' is the canonical name in NXmonitor, but in some files,
|
|
328
372
|
# it may be called 'tof'.
|
|
329
373
|
key = next(iter(set(da.coords.keys()) & {"time_of_flight", "tof"}))
|
|
330
|
-
|
|
374
|
+
raw_eto = da.coords[key].to(dtype=float, copy=False)
|
|
375
|
+
eto_unit = raw_eto.unit
|
|
331
376
|
pulse_period = pulse_period.to(unit=eto_unit)
|
|
332
377
|
|
|
333
378
|
# In histogram mode, because there is a wrap around at the end of the pulse, we
|
|
@@ -335,9 +380,7 @@ def _time_of_flight_data_histogram(
|
|
|
335
380
|
# with one finite left edge and a NaN right edge (it becomes NaN as it would be
|
|
336
381
|
# outside the range of the lookup table).
|
|
337
382
|
new_bins = sc.sort(
|
|
338
|
-
sc.concat(
|
|
339
|
-
[da.coords[key], sc.scalar(0.0, unit=eto_unit), pulse_period], dim=key
|
|
340
|
-
),
|
|
383
|
+
sc.concat([raw_eto, sc.scalar(0.0, unit=eto_unit), pulse_period], dim=key),
|
|
341
384
|
key=key,
|
|
342
385
|
)
|
|
343
386
|
rebinned = da.rebin({key: new_bins})
|
|
@@ -360,16 +403,14 @@ def _time_of_flight_data_histogram(
|
|
|
360
403
|
)
|
|
361
404
|
pulse_index = sc.zeros(sizes=etos.sizes)
|
|
362
405
|
|
|
363
|
-
# Create
|
|
364
|
-
interp =
|
|
365
|
-
lookup, distance_unit=ltotal.unit, time_unit=eto_unit
|
|
366
|
-
)
|
|
406
|
+
# Create linear interpolator
|
|
407
|
+
interp = TofInterpolator(lookup, distance_unit=ltotal.unit, time_unit=eto_unit)
|
|
367
408
|
|
|
368
409
|
# Compute time-of-flight of the bin edges using the interpolator
|
|
369
|
-
tofs =
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
410
|
+
tofs = interp(
|
|
411
|
+
pulse_index=pulse_index,
|
|
412
|
+
ltotal=ltotal.broadcast(sizes=etos.sizes),
|
|
413
|
+
event_time_offset=etos,
|
|
373
414
|
)
|
|
374
415
|
|
|
375
416
|
return rebinned.assign_coords(tof=tofs)
|
|
@@ -380,7 +421,7 @@ def _guess_pulse_stride_offset(
|
|
|
380
421
|
ltotal: sc.Variable,
|
|
381
422
|
event_time_offset: sc.Variable,
|
|
382
423
|
pulse_stride: int,
|
|
383
|
-
interp:
|
|
424
|
+
interp: TofInterpolator,
|
|
384
425
|
) -> int:
|
|
385
426
|
"""
|
|
386
427
|
Using the minimum ``event_time_zero`` to calculate a reference time when computing
|
|
@@ -408,21 +449,29 @@ def _guess_pulse_stride_offset(
|
|
|
408
449
|
pulse_stride:
|
|
409
450
|
Stride of used pulses.
|
|
410
451
|
interp:
|
|
411
|
-
|
|
452
|
+
Interpolator for the lookup table.
|
|
412
453
|
"""
|
|
413
454
|
tofs = {}
|
|
414
455
|
# Choose a few random events to compute the time-of-flight
|
|
415
456
|
inds = np.random.choice(
|
|
416
457
|
len(event_time_offset), min(5000, len(event_time_offset)), replace=False
|
|
417
458
|
)
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
459
|
+
pulse_index = sc.array(
|
|
460
|
+
dims=pulse_index.dims,
|
|
461
|
+
values=pulse_index.values[inds],
|
|
462
|
+
unit=pulse_index.unit,
|
|
463
|
+
)
|
|
464
|
+
ltotal = sc.array(dims=ltotal.dims, values=ltotal.values[inds], unit=ltotal.unit)
|
|
465
|
+
etos = sc.array(
|
|
466
|
+
dims=event_time_offset.dims,
|
|
467
|
+
values=event_time_offset.values[inds],
|
|
468
|
+
unit=event_time_offset.unit,
|
|
469
|
+
)
|
|
421
470
|
for i in range(pulse_stride):
|
|
422
|
-
pulse_inds = (
|
|
423
|
-
tofs[i] = interp(
|
|
471
|
+
pulse_inds = (pulse_index + i) % pulse_stride
|
|
472
|
+
tofs[i] = interp(pulse_index=pulse_inds, ltotal=ltotal, event_time_offset=etos)
|
|
424
473
|
# Find the entry in the list with the least number of nan values
|
|
425
|
-
return sorted(tofs, key=lambda x:
|
|
474
|
+
return sorted(tofs, key=lambda x: sc.isnan(tofs[x]).sum())[0]
|
|
426
475
|
|
|
427
476
|
|
|
428
477
|
def _time_of_flight_data_events(
|
|
@@ -433,13 +482,11 @@ def _time_of_flight_data_events(
|
|
|
433
482
|
pulse_stride: int,
|
|
434
483
|
pulse_stride_offset: int,
|
|
435
484
|
) -> sc.DataArray:
|
|
436
|
-
etos = da.bins.coords["event_time_offset"]
|
|
485
|
+
etos = da.bins.coords["event_time_offset"].to(dtype=float, copy=False)
|
|
437
486
|
eto_unit = elem_unit(etos)
|
|
438
487
|
|
|
439
|
-
# Create
|
|
440
|
-
interp =
|
|
441
|
-
lookup, distance_unit=ltotal.unit, time_unit=eto_unit
|
|
442
|
-
)
|
|
488
|
+
# Create linear interpolator
|
|
489
|
+
interp = TofInterpolator(lookup, distance_unit=ltotal.unit, time_unit=eto_unit)
|
|
443
490
|
|
|
444
491
|
# Operate on events (broadcast distances to all events)
|
|
445
492
|
ltotal = sc.bins_like(etos, ltotal).bins.constituents["data"]
|
|
@@ -491,11 +538,7 @@ def _time_of_flight_data_events(
|
|
|
491
538
|
pulse_index %= pulse_stride
|
|
492
539
|
|
|
493
540
|
# Compute time-of-flight for all neutrons using the interpolator
|
|
494
|
-
tofs =
|
|
495
|
-
dims=etos.dims,
|
|
496
|
-
values=interp((pulse_index.values, ltotal.values, etos.values)),
|
|
497
|
-
unit=eto_unit,
|
|
498
|
-
)
|
|
541
|
+
tofs = interp(pulse_index=pulse_index, ltotal=ltotal, event_time_offset=etos)
|
|
499
542
|
|
|
500
543
|
parts = da.bins.constituents
|
|
501
544
|
parts["data"] = tofs
|
|
@@ -573,17 +616,21 @@ def resample_tof_data(da: TofData) -> ResampledTofData:
|
|
|
573
616
|
Histogrammed data with the time-of-flight coordinate.
|
|
574
617
|
"""
|
|
575
618
|
dim = next(iter(set(da.dims) & {"time_of_flight", "tof"}))
|
|
576
|
-
|
|
619
|
+
data = da.rename_dims({dim: "tof"}).drop_coords(
|
|
620
|
+
[name for name in da.coords if name != "tof"]
|
|
621
|
+
)
|
|
622
|
+
events = to_events(data, "event")
|
|
577
623
|
|
|
578
624
|
# Define a new bin width, close to the original bin width.
|
|
579
625
|
# TODO: this could be a workflow parameter
|
|
580
626
|
coord = da.coords["tof"]
|
|
581
627
|
bin_width = (coord[dim, 1:] - coord[dim, :-1]).nanmedian()
|
|
582
628
|
rehist = events.hist(tof=bin_width)
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
629
|
+
return ResampledTofData(
|
|
630
|
+
rehist.assign_coords(
|
|
631
|
+
{key: var for key, var in da.coords.items() if dim not in var.dims}
|
|
632
|
+
)
|
|
633
|
+
)
|
|
587
634
|
|
|
588
635
|
|
|
589
636
|
def default_parameters() -> dict:
|
|
@@ -23,6 +23,7 @@ class FakeBeamline:
|
|
|
23
23
|
events_per_pulse: int = 200000,
|
|
24
24
|
seed: int | None = None,
|
|
25
25
|
source: Callable | None = None,
|
|
26
|
+
source_position: sc.Variable | None = None,
|
|
26
27
|
):
|
|
27
28
|
import math
|
|
28
29
|
|
|
@@ -32,6 +33,8 @@ class FakeBeamline:
|
|
|
32
33
|
self.frequency = pulse.frequency
|
|
33
34
|
self.npulses = math.ceil((run_length * self.frequency).to(unit="").value)
|
|
34
35
|
self.events_per_pulse = events_per_pulse
|
|
36
|
+
if source_position is None:
|
|
37
|
+
source_position = sc.vector([0, 0, 0], unit='m')
|
|
35
38
|
|
|
36
39
|
# Create a source
|
|
37
40
|
if source is None:
|
|
@@ -54,7 +57,7 @@ class FakeBeamline:
|
|
|
54
57
|
open=ch.slit_begin,
|
|
55
58
|
close=ch.slit_end,
|
|
56
59
|
phase=abs(ch.phase),
|
|
57
|
-
distance=ch.axle_position
|
|
60
|
+
distance=sc.norm(ch.axle_position - source_position),
|
|
58
61
|
name=name,
|
|
59
62
|
)
|
|
60
63
|
for name, ch in choppers.items()
|
|
@@ -117,3 +120,7 @@ def pulse_skipping_choppers():
|
|
|
117
120
|
radius=sc.scalar(30.0, unit="cm"),
|
|
118
121
|
),
|
|
119
122
|
}
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def source_position():
|
|
126
|
+
return sc.vector([0, 0, 0], unit='m')
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
2
|
+
# Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
|
|
3
|
+
import numpy as np
|
|
4
|
+
from numba import njit, prange
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@njit(boundscheck=False, cache=True, fastmath=False, parallel=True)
|
|
8
|
+
def interpolate(
|
|
9
|
+
x: np.ndarray,
|
|
10
|
+
y: np.ndarray,
|
|
11
|
+
z: np.ndarray,
|
|
12
|
+
values: np.ndarray,
|
|
13
|
+
xp: np.ndarray,
|
|
14
|
+
yp: np.ndarray,
|
|
15
|
+
zp: np.ndarray,
|
|
16
|
+
fill_value: float,
|
|
17
|
+
out: np.ndarray,
|
|
18
|
+
):
|
|
19
|
+
"""
|
|
20
|
+
Linear interpolation of data on a 3D regular grid.
|
|
21
|
+
|
|
22
|
+
Parameters
|
|
23
|
+
----------
|
|
24
|
+
x:
|
|
25
|
+
1D array of grid edges along the x-axis. They must be linspaced.
|
|
26
|
+
y:
|
|
27
|
+
1D array of grid edges along the y-axis. They must be linspaced.
|
|
28
|
+
z:
|
|
29
|
+
1D array of grid edges along the z-axis. They must be linspaced.
|
|
30
|
+
values:
|
|
31
|
+
3D array of values on the grid. The shape must be (nz, ny, nx).
|
|
32
|
+
xp:
|
|
33
|
+
1D array of x-coordinates where to interpolate (size N).
|
|
34
|
+
yp:
|
|
35
|
+
1D array of y-coordinates where to interpolate (size N).
|
|
36
|
+
zp:
|
|
37
|
+
1D array of z-coordinates where to interpolate (size N).
|
|
38
|
+
fill_value:
|
|
39
|
+
Value to use for points outside of the grid.
|
|
40
|
+
out:
|
|
41
|
+
1D array where the interpolated values will be stored (size N).
|
|
42
|
+
"""
|
|
43
|
+
if not (len(xp) == len(yp) == len(zp) == len(out)):
|
|
44
|
+
raise ValueError("Interpolator: all input arrays must have the same size.")
|
|
45
|
+
|
|
46
|
+
npoints = len(xp)
|
|
47
|
+
xmin = x[0]
|
|
48
|
+
xmax = x[-1]
|
|
49
|
+
ymin = y[0]
|
|
50
|
+
ymax = y[-1]
|
|
51
|
+
zmin = z[0]
|
|
52
|
+
zmax = z[-1]
|
|
53
|
+
dx = x[1] - xmin
|
|
54
|
+
dy = y[1] - ymin
|
|
55
|
+
dz = z[1] - zmin
|
|
56
|
+
|
|
57
|
+
one_over_dx = 1.0 / dx
|
|
58
|
+
one_over_dy = 1.0 / dy
|
|
59
|
+
one_over_dz = 1.0 / dz
|
|
60
|
+
norm = one_over_dx * one_over_dy * one_over_dz
|
|
61
|
+
|
|
62
|
+
for i in prange(npoints):
|
|
63
|
+
xx = xp[i]
|
|
64
|
+
yy = yp[i]
|
|
65
|
+
zz = zp[i]
|
|
66
|
+
|
|
67
|
+
if (
|
|
68
|
+
(xx < xmin)
|
|
69
|
+
or (xx > xmax)
|
|
70
|
+
or (yy < ymin)
|
|
71
|
+
or (yy > ymax)
|
|
72
|
+
or (zz < zmin)
|
|
73
|
+
or (zz > zmax)
|
|
74
|
+
):
|
|
75
|
+
out[i] = fill_value
|
|
76
|
+
|
|
77
|
+
else:
|
|
78
|
+
ix = int((xx - xmin) * one_over_dx)
|
|
79
|
+
iy = int((yy - ymin) * one_over_dy)
|
|
80
|
+
iz = int((zz - zmin) * one_over_dz)
|
|
81
|
+
|
|
82
|
+
y2 = y[iy + 1]
|
|
83
|
+
y1 = y[iy]
|
|
84
|
+
x2 = x[ix + 1]
|
|
85
|
+
x1 = x[ix]
|
|
86
|
+
z1 = z[iz]
|
|
87
|
+
z2 = z[iz + 1]
|
|
88
|
+
|
|
89
|
+
a111 = values[iz, iy, ix]
|
|
90
|
+
a211 = values[iz, iy, ix + 1]
|
|
91
|
+
a121 = values[iz, iy + 1, ix]
|
|
92
|
+
a221 = values[iz, iy + 1, ix + 1]
|
|
93
|
+
a112 = values[iz + 1, iy, ix]
|
|
94
|
+
a212 = values[iz + 1, iy, ix + 1]
|
|
95
|
+
a122 = values[iz + 1, iy + 1, ix]
|
|
96
|
+
a222 = values[iz + 1, iy + 1, ix + 1]
|
|
97
|
+
|
|
98
|
+
x2mxx = x2 - xx
|
|
99
|
+
xxmx1 = xx - x1
|
|
100
|
+
y2myy = y2 - yy
|
|
101
|
+
yymy1 = yy - y1
|
|
102
|
+
out[i] = (
|
|
103
|
+
(z2 - zz)
|
|
104
|
+
* (
|
|
105
|
+
y2myy * (x2mxx * a111 + xxmx1 * a211)
|
|
106
|
+
+ yymy1 * (x2mxx * a121 + xxmx1 * a221)
|
|
107
|
+
)
|
|
108
|
+
+ (zz - z1)
|
|
109
|
+
* (
|
|
110
|
+
y2myy * (x2mxx * a112 + xxmx1 * a212)
|
|
111
|
+
+ yymy1 * (x2mxx * a122 + xxmx1 * a222)
|
|
112
|
+
)
|
|
113
|
+
) * norm
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class Interpolator:
|
|
117
|
+
def __init__(
|
|
118
|
+
self,
|
|
119
|
+
time_edges: np.ndarray,
|
|
120
|
+
distance_edges: np.ndarray,
|
|
121
|
+
pulse_edges: np.ndarray,
|
|
122
|
+
values: np.ndarray,
|
|
123
|
+
fill_value: float = np.nan,
|
|
124
|
+
):
|
|
125
|
+
"""
|
|
126
|
+
Interpolator for 3D regular grid data (Numba implementation).
|
|
127
|
+
|
|
128
|
+
Parameters
|
|
129
|
+
----------
|
|
130
|
+
time_edges:
|
|
131
|
+
1D array of time edges.
|
|
132
|
+
distance_edges:
|
|
133
|
+
1D array of distance edges.
|
|
134
|
+
pulse_edges:
|
|
135
|
+
1D array of pulse edges.
|
|
136
|
+
values:
|
|
137
|
+
3D array of values on the grid. The shape must be (nz, ny, nx).
|
|
138
|
+
fill_value:
|
|
139
|
+
Value to use for points outside of the grid.
|
|
140
|
+
"""
|
|
141
|
+
self.time_edges = time_edges
|
|
142
|
+
self.distance_edges = distance_edges
|
|
143
|
+
self.pulse_edges = pulse_edges
|
|
144
|
+
self.values = values
|
|
145
|
+
self.fill_value = fill_value
|
|
146
|
+
|
|
147
|
+
def __call__(
|
|
148
|
+
self, times: np.ndarray, distances: np.ndarray, pulse_indices: np.ndarray
|
|
149
|
+
) -> np.ndarray:
|
|
150
|
+
out = np.empty_like(times)
|
|
151
|
+
interpolate(
|
|
152
|
+
x=self.time_edges,
|
|
153
|
+
y=self.distance_edges,
|
|
154
|
+
z=self.pulse_edges,
|
|
155
|
+
values=self.values,
|
|
156
|
+
xp=times,
|
|
157
|
+
yp=distances,
|
|
158
|
+
zp=pulse_indices,
|
|
159
|
+
fill_value=self.fill_value,
|
|
160
|
+
out=out,
|
|
161
|
+
)
|
|
162
|
+
return out
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
2
|
+
# Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class Interpolator:
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
time_edges: np.ndarray,
|
|
11
|
+
distance_edges: np.ndarray,
|
|
12
|
+
pulse_edges: np.ndarray,
|
|
13
|
+
values: np.ndarray,
|
|
14
|
+
method: str = "linear",
|
|
15
|
+
bounds_error: bool = False,
|
|
16
|
+
fill_value: float = np.nan,
|
|
17
|
+
**kwargs,
|
|
18
|
+
):
|
|
19
|
+
"""
|
|
20
|
+
Interpolator for 3D regular grid data (SciPy implementation).
|
|
21
|
+
|
|
22
|
+
Parameters
|
|
23
|
+
----------
|
|
24
|
+
time_edges:
|
|
25
|
+
1D array of time edges.
|
|
26
|
+
distance_edges:
|
|
27
|
+
1D array of distance edges.
|
|
28
|
+
pulse_edges:
|
|
29
|
+
1D array of pulse edges.
|
|
30
|
+
values:
|
|
31
|
+
3D array of values on the grid. The shape must be (nz, ny, nx).
|
|
32
|
+
method:
|
|
33
|
+
Method of interpolation. Default is "linear".
|
|
34
|
+
bounds_error:
|
|
35
|
+
If True, when interpolated values are requested outside of the domain,
|
|
36
|
+
a ValueError is raised. If False, fill_value is used.
|
|
37
|
+
fill_value:
|
|
38
|
+
Value to use for points outside of the grid.
|
|
39
|
+
kwargs:
|
|
40
|
+
Additional arguments to pass to scipy.interpolate.RegularGridInterpolator.
|
|
41
|
+
"""
|
|
42
|
+
from scipy.interpolate import RegularGridInterpolator
|
|
43
|
+
|
|
44
|
+
self._interp = RegularGridInterpolator(
|
|
45
|
+
(
|
|
46
|
+
pulse_edges,
|
|
47
|
+
distance_edges,
|
|
48
|
+
time_edges,
|
|
49
|
+
),
|
|
50
|
+
values,
|
|
51
|
+
method=method,
|
|
52
|
+
bounds_error=bounds_error,
|
|
53
|
+
fill_value=fill_value,
|
|
54
|
+
**kwargs,
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
def __call__(
|
|
58
|
+
self, times: np.ndarray, distances: np.ndarray, pulse_indices: np.ndarray
|
|
59
|
+
) -> np.ndarray:
|
|
60
|
+
return self._interp((pulse_indices, distances, times))
|
|
@@ -9,7 +9,9 @@ from .types import SimulationResults
|
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
def simulate_beamline(
|
|
12
|
+
*,
|
|
12
13
|
choppers: Mapping[str, DiskChopper],
|
|
14
|
+
source_position: sc.Variable,
|
|
13
15
|
neutrons: int = 1_000_000,
|
|
14
16
|
pulses: int = 1,
|
|
15
17
|
seed: int | None = None,
|
|
@@ -25,6 +27,9 @@ def simulate_beamline(
|
|
|
25
27
|
A dict of DiskChopper objects representing the choppers in the beamline. See
|
|
26
28
|
https://scipp.github.io/scippneutron/user-guide/chopper/processing-nexus-choppers.html#Build-DiskChopper
|
|
27
29
|
for more information.
|
|
30
|
+
source_position:
|
|
31
|
+
A scalar variable with ``dtype=vector3`` that defines the source position.
|
|
32
|
+
Must be in the same coordinate system as the choppers' axle positions.
|
|
28
33
|
neutrons:
|
|
29
34
|
Number of neutrons to simulate.
|
|
30
35
|
pulses:
|
|
@@ -45,7 +50,9 @@ def simulate_beamline(
|
|
|
45
50
|
open=ch.slit_begin,
|
|
46
51
|
close=ch.slit_end,
|
|
47
52
|
phase=abs(ch.phase),
|
|
48
|
-
distance=
|
|
53
|
+
distance=sc.norm(
|
|
54
|
+
ch.axle_position - source_position.to(unit=ch.axle_position.unit)
|
|
55
|
+
),
|
|
49
56
|
name=name,
|
|
50
57
|
)
|
|
51
58
|
for name, ch in choppers.items()
|
|
@@ -54,7 +61,7 @@ def simulate_beamline(
|
|
|
54
61
|
if not tof_choppers:
|
|
55
62
|
events = source.data.squeeze().flatten(to='event')
|
|
56
63
|
return SimulationResults(
|
|
57
|
-
time_of_arrival=events.coords["
|
|
64
|
+
time_of_arrival=events.coords["birth_time"],
|
|
58
65
|
speed=events.coords["speed"],
|
|
59
66
|
wavelength=events.coords["wavelength"],
|
|
60
67
|
weight=events.data,
|
|
@@ -34,7 +34,7 @@ def to_events(
|
|
|
34
34
|
rng = np.random.default_rng()
|
|
35
35
|
event_coords = {}
|
|
36
36
|
edge_dims = []
|
|
37
|
-
midp_dims = set()
|
|
37
|
+
midp_dims = set(da.dims)
|
|
38
38
|
midp_coord_names = []
|
|
39
39
|
# Separate bin-edge and midpoints coords
|
|
40
40
|
for name in da.coords:
|
|
@@ -43,9 +43,9 @@ def to_events(
|
|
|
43
43
|
if is_edges:
|
|
44
44
|
if name in dims:
|
|
45
45
|
edge_dims.append(name)
|
|
46
|
+
midp_dims -= {name}
|
|
46
47
|
else:
|
|
47
48
|
midp_coord_names.append(name)
|
|
48
|
-
midp_dims.update(set(dims))
|
|
49
49
|
|
|
50
50
|
edge_sizes = {dim: da.sizes[da.coords[dim].dim] for dim in edge_dims}
|
|
51
51
|
for dim in edge_dims:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: essreduce
|
|
3
|
-
Version: 25.
|
|
3
|
+
Version: 25.4.0
|
|
4
4
|
Summary: Common data reduction tools for the ESS facility
|
|
5
5
|
Author: Scipp contributors
|
|
6
6
|
License: BSD 3-Clause License
|
|
@@ -57,10 +57,12 @@ Requires-Dist: scippneutron>=25.02.0
|
|
|
57
57
|
Requires-Dist: scippnexus>=24.11.0
|
|
58
58
|
Provides-Extra: test
|
|
59
59
|
Requires-Dist: ipywidgets; extra == "test"
|
|
60
|
+
Requires-Dist: numba; extra == "test"
|
|
60
61
|
Requires-Dist: pooch; extra == "test"
|
|
61
62
|
Requires-Dist: pytest; extra == "test"
|
|
62
63
|
Requires-Dist: scipy>=1.7.0; extra == "test"
|
|
63
64
|
Requires-Dist: tof>=25.01.2; extra == "test"
|
|
65
|
+
Dynamic: license-file
|
|
64
66
|
|
|
65
67
|
[](CODE_OF_CONDUCT.md)
|
|
66
68
|
[](https://pypi.python.org/pypi/essreduce)
|
|
@@ -3,26 +3,28 @@ ess/reduce/data.py,sha256=vaoeAJ6EpK1YghOiAALLdWiW17TgUnnnt0H-RGiGzXk,3756
|
|
|
3
3
|
ess/reduce/logging.py,sha256=6n8Czq4LZ3OK9ENlKsWSI1M3KvKv6_HSoUiV4__IUlU,357
|
|
4
4
|
ess/reduce/parameter.py,sha256=4sCfoKOI2HuO_Q7JLH_jAXnEOFANSn5P3NdaOBzhJxc,4635
|
|
5
5
|
ess/reduce/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
-
ess/reduce/streaming.py,sha256=
|
|
6
|
+
ess/reduce/streaming.py,sha256=TBttQV5WdSpUKh38J0pdv53seMWtUFswxd6-ltaZb_M,17403
|
|
7
7
|
ess/reduce/ui.py,sha256=zmorAbDwX1cU3ygDT--OP58o0qU7OBcmJz03jPeYSLA,10884
|
|
8
8
|
ess/reduce/uncertainty.py,sha256=LR4O6ApB6Z-W9gC_XW0ajupl8yFG-du0eee1AX_R-gk,6990
|
|
9
9
|
ess/reduce/workflow.py,sha256=sL34T_2Cjl_8iFlegujxI9VyOUwo6erVC8pOXnfWgYw,3060
|
|
10
10
|
ess/reduce/live/__init__.py,sha256=jPQVhihRVNtEDrE20PoKkclKV2aBF1lS7cCHootgFgI,204
|
|
11
|
-
ess/reduce/live/raw.py,sha256=
|
|
11
|
+
ess/reduce/live/raw.py,sha256=66qV0G2rP8gK5tXuk-syTlDLE2jT3ehfmSnET7Xzfd0,24392
|
|
12
12
|
ess/reduce/live/roi.py,sha256=Hs-pW98k41WU6Kl3UQ41kQawk80c2QNOQ_WNctLzDPE,3795
|
|
13
13
|
ess/reduce/live/workflow.py,sha256=bsbwvTqPhRO6mC__3b7MgU7DWwAnOvGvG-t2n22EKq8,4285
|
|
14
14
|
ess/reduce/nexus/__init__.py,sha256=59bxKkNYg8DYcSykNvH6nCa5SYchJC4SbgZEKhkNdYc,967
|
|
15
|
-
ess/reduce/nexus/_nexus_loader.py,sha256=
|
|
15
|
+
ess/reduce/nexus/_nexus_loader.py,sha256=5N48AMJx1AaFZb6WZPPbVKUlXyFMVVtZrn7Bae57O3A,19842
|
|
16
16
|
ess/reduce/nexus/json_generator.py,sha256=ME2Xn8L7Oi3uHJk9ZZdCRQTRX-OV_wh9-DJn07Alplk,2529
|
|
17
17
|
ess/reduce/nexus/json_nexus.py,sha256=QrVc0p424nZ5dHX9gebAJppTw6lGZq9404P_OFl1giA,10282
|
|
18
18
|
ess/reduce/nexus/types.py,sha256=15XcHbNbOfnAYjWXzzKyYDVNyNixRnP0hJ-Q2duwMWE,9896
|
|
19
|
-
ess/reduce/nexus/workflow.py,sha256=
|
|
19
|
+
ess/reduce/nexus/workflow.py,sha256=EiD6-58eGwoN5fbo47UTZy_oYFitCbwlIH-xqDOSp4c,24326
|
|
20
20
|
ess/reduce/scripts/grow_nexus.py,sha256=hET3h06M0xlJd62E3palNLFvJMyNax2kK4XyJcOhl-I,3387
|
|
21
|
-
ess/reduce/time_of_flight/__init__.py,sha256=
|
|
22
|
-
ess/reduce/time_of_flight/
|
|
23
|
-
ess/reduce/time_of_flight/
|
|
24
|
-
ess/reduce/time_of_flight/
|
|
25
|
-
ess/reduce/time_of_flight/
|
|
21
|
+
ess/reduce/time_of_flight/__init__.py,sha256=TSHfyoROwFhM2k3jHzamw3zeb0OQOaiuvgCgDEPEQ_g,1097
|
|
22
|
+
ess/reduce/time_of_flight/eto_to_tof.py,sha256=Nq2gx7aejoZ_ExLTr9I6KZMqDxCKAx1PpGHslpNXkKU,25271
|
|
23
|
+
ess/reduce/time_of_flight/fakes.py,sha256=REyHkJsSSq2_l5UOtpsv2aKkhCuro_i3KpVsxxITbW0,4470
|
|
24
|
+
ess/reduce/time_of_flight/interpolator_numba.py,sha256=4fZLs5Q4UxXKJREYxRAV5qUWB_uR2PscvMP0vno1A-A,4544
|
|
25
|
+
ess/reduce/time_of_flight/interpolator_scipy.py,sha256=sRJj2ncBiUMv6g9h-MJzI9xyY0Ir0degpAv6FIeSMBw,1834
|
|
26
|
+
ess/reduce/time_of_flight/simulation.py,sha256=cIF_nWkLQlcWUCW2_wvWBU2ocg_8CSfOnfkoqdLdUgs,2923
|
|
27
|
+
ess/reduce/time_of_flight/to_events.py,sha256=w9mHpnWd3vwN2ouob-GK_1NPrTjCaOzPuC2QuEey-m0,4342
|
|
26
28
|
ess/reduce/time_of_flight/types.py,sha256=Iv1XGLbrZ9bD4CPAVhsIPkAaB46YC7l7yf5XweljLqk,5047
|
|
27
29
|
ess/reduce/widgets/__init__.py,sha256=SoSHBv8Dc3QXV9HUvPhjSYWMwKTGYZLpsWwsShIO97Q,5325
|
|
28
30
|
ess/reduce/widgets/_base.py,sha256=_wN3FOlXgx_u0c-A_3yyoIH-SdUvDENGgquh9S-h5GI,4852
|
|
@@ -36,9 +38,9 @@ ess/reduce/widgets/_spinner.py,sha256=2VY4Fhfa7HMXox2O7UbofcdKsYG-AJGrsgGJB85nDX
|
|
|
36
38
|
ess/reduce/widgets/_string_widget.py,sha256=iPAdfANyXHf-nkfhgkyH6gQDklia0LebLTmwi3m-iYQ,1482
|
|
37
39
|
ess/reduce/widgets/_switchable_widget.py,sha256=fjKz99SKLhIF1BLgGVBSKKn3Lu_jYBwDYGeAjbJY3Q8,2390
|
|
38
40
|
ess/reduce/widgets/_vector_widget.py,sha256=aTaBqCFHZQhrIoX6-sSqFWCPePEW8HQt5kUio8jP1t8,1203
|
|
39
|
-
essreduce-25.
|
|
40
|
-
essreduce-25.
|
|
41
|
-
essreduce-25.
|
|
42
|
-
essreduce-25.
|
|
43
|
-
essreduce-25.
|
|
44
|
-
essreduce-25.
|
|
41
|
+
essreduce-25.4.0.dist-info/licenses/LICENSE,sha256=nVEiume4Qj6jMYfSRjHTM2jtJ4FGu0g-5Sdh7osfEYw,1553
|
|
42
|
+
essreduce-25.4.0.dist-info/METADATA,sha256=jVwDtqDA6slsUJQYBDTy_teK7EbLjGCfhqoa4YSf1Ac,3768
|
|
43
|
+
essreduce-25.4.0.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
|
44
|
+
essreduce-25.4.0.dist-info/entry_points.txt,sha256=PMZOIYzCifHMTe4pK3HbhxUwxjFaZizYlLD0td4Isb0,66
|
|
45
|
+
essreduce-25.4.0.dist-info/top_level.txt,sha256=0JxTCgMKPLKtp14wb1-RKisQPQWX7i96innZNvHBr-s,4
|
|
46
|
+
essreduce-25.4.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|