essreduce 25.5.1__py3-none-any.whl → 25.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ess/reduce/data.py CHANGED
@@ -40,6 +40,15 @@ _bifrost_registry = Registry(
40
40
  )
41
41
 
42
42
 
43
+ _dream_registry = Registry(
44
+ instrument='dream',
45
+ files={
46
+ "TEST_977695_00068064.hdf": "md5:9e6ee9ec70d7c5e8c0c93b9e07e8949f",
47
+ },
48
+ version='2',
49
+ )
50
+
51
+
43
52
  _loki_registry = Registry(
44
53
  instrument='loki',
45
54
  files={
@@ -94,3 +103,11 @@ def loki_tutorial_background_run_60393() -> str:
94
103
  def loki_tutorial_sample_transmission_run() -> str:
95
104
  """Sample transmission run (sample + sample holder/can + transmission monitor)."""
96
105
  return _loki_registry.get_path('60394-2022-02-28_2215.nxs')
106
+
107
+
108
+ def dream_coda_test_file() -> str:
109
+ """CODA file for DREAM where most pulses have been removed.
110
+
111
+ See ``tools/shrink_nexus.py``.
112
+ """
113
+ return _dream_registry.get_path('TEST_977695_00068064.hdf')
ess/reduce/nexus/types.py CHANGED
@@ -8,6 +8,7 @@ import sciline
8
8
  import scipp as sc
9
9
  import scippnexus as snx
10
10
  from scippneutron import metadata as scn_meta
11
+ from scippneutron.chopper import DiskChopper
11
12
 
12
13
  FilePath = NewType('FilePath', Path)
13
14
  """Full path to a NeXus file on disk."""
@@ -69,19 +70,13 @@ class TransmissionRun(Generic[ScatteringRunType]):
69
70
  """
70
71
 
71
72
 
72
- RunType = TypeVar(
73
- 'RunType',
74
- BackgroundRun,
75
- EmptyBeamRun,
76
- SampleRun,
77
- # Note that mypy does not seem to like this nesting, may need to find a workaround
78
- TransmissionRun[SampleRun],
79
- TransmissionRun[BackgroundRun],
80
- VanadiumRun,
81
- )
73
+ RunType = TypeVar('RunType')
82
74
  """TypeVar for specifying what run some data belongs to.
83
75
 
84
- Possible values:
76
+ This type must be constrained when used in a Sciline pipeline.
77
+ E.g., by passing ``run_types`` to :class:`ess.reduce.nexus.GenericNeXusWorkflow`.
78
+
79
+ ESSreduce provides the following but custom types can be used:
85
80
 
86
81
  - :class:`BackgroundRun`
87
82
  - :class:`EmptyBeamRun`
@@ -92,58 +87,28 @@ Possible values:
92
87
 
93
88
 
94
89
  # 1.2 Monitor types
95
- Monitor1 = NewType('Monitor1', int)
96
- """Identifier for an arbitrary monitor"""
97
- Monitor2 = NewType('Monitor2', int)
98
- """Identifier for an arbitrary monitor"""
99
- Monitor3 = NewType('Monitor3', int)
100
- """Identifier for an arbitrary monitor"""
101
- Monitor4 = NewType('Monitor4', int)
102
- """Identifier for an arbitrary monitor"""
103
- Monitor5 = NewType('Monitor5', int)
104
- """Identifier for an arbitrary monitor"""
105
- Monitor6 = NewType('Monitor6', int)
106
- """Identifier for an arbitrary monitor"""
107
90
  IncidentMonitor = NewType('IncidentMonitor', int)
108
91
  """Incident monitor"""
109
92
  TransmissionMonitor = NewType('TransmissionMonitor', int)
110
93
  """Transmission monitor"""
111
- FrameMonitor0 = NewType('FrameMonitor', int)
94
+ FrameMonitor0 = NewType('FrameMonitor0', int)
112
95
  """Frame monitor number 0"""
113
- FrameMonitor1 = NewType('FrameMonitor', int)
96
+ FrameMonitor1 = NewType('FrameMonitor1', int)
114
97
  """Frame monitor number 1"""
115
- FrameMonitor2 = NewType('FrameMonitor', int)
98
+ FrameMonitor2 = NewType('FrameMonitor2', int)
116
99
  """Frame monitor number 2"""
117
- FrameMonitor3 = NewType('FrameMonitor', int)
100
+ FrameMonitor3 = NewType('FrameMonitor3', int)
118
101
  """Frame monitor number 3"""
119
102
  CaveMonitor = NewType('CaveMonitor', int)
120
103
  """A monitor located in the instrument cave"""
121
- MonitorType = TypeVar(
122
- 'MonitorType',
123
- Monitor1,
124
- Monitor2,
125
- Monitor3,
126
- Monitor4,
127
- Monitor5,
128
- Monitor6,
129
- IncidentMonitor,
130
- TransmissionMonitor,
131
- FrameMonitor0,
132
- FrameMonitor1,
133
- FrameMonitor2,
134
- FrameMonitor3,
135
- CaveMonitor,
136
- )
104
+ MonitorType = TypeVar('MonitorType')
137
105
  """TypeVar for specifying what monitor some data belongs to.
138
106
 
139
- Possible values:
107
+ This type must be constrained when used in a Sciline pipeline.
108
+ E.g., by passing ``monitor_types`` to :class:`ess.reduce.nexus.GenericNeXusWorkflow`.
109
+
110
+ ESSreduce provides the following but custom types can be used:
140
111
 
141
- - :class:`Monitor1`
142
- - :class:`Monitor2`
143
- - :class:`Monitor3`
144
- - :class:`Monitor4`
145
- - :class:`Monitor5`
146
- - :class:`Monitor6`
147
112
  - :class:`IncidentMonitor`
148
113
  - :class:`TransmissionMonitor`
149
114
  - :class:`FrameMonitor0`
@@ -154,27 +119,20 @@ Possible values:
154
119
  """
155
120
 
156
121
 
157
- Component = TypeVar(
158
- 'Component',
122
+ Component = TypeVar('Component')
123
+ """A beamline component in a neXus file."""
124
+ COMPONENT_CONSTRAINTS = (
159
125
  snx.NXdetector,
160
126
  snx.NXsample,
161
127
  snx.NXsource,
162
128
  snx.NXdisk_chopper,
163
129
  snx.NXcrystal,
164
- Monitor1,
165
- Monitor2,
166
- Monitor3,
167
- Monitor4,
168
- Monitor5,
169
- Monitor6,
170
- IncidentMonitor,
171
- TransmissionMonitor,
172
- FrameMonitor0,
173
- FrameMonitor1,
174
- FrameMonitor2,
175
- FrameMonitor3,
176
- CaveMonitor,
177
130
  )
131
+ """Base constraints for the Component type variable.
132
+
133
+ This list will be supplemented with monitor types when creating a pipeline.
134
+ """
135
+
178
136
  UniqueComponent = TypeVar('UniqueComponent', snx.NXsample, snx.NXsource)
179
137
  """Components that can be identified by their type as there will only be one."""
180
138
 
@@ -336,8 +294,15 @@ class NeXusTransformation(Generic[Component, RunType]):
336
294
  return NeXusTransformation(value=transform)
337
295
 
338
296
 
339
- class Choppers(
297
+ class RawChoppers(
340
298
  sciline.Scope[RunType, sc.DataGroup[sc.DataGroup[Any]]],
341
299
  sc.DataGroup[sc.DataGroup[Any]],
342
300
  ):
343
301
  """All choppers in a NeXus file."""
302
+
303
+
304
+ class DiskChoppers(
305
+ sciline.Scope[RunType, sc.DataGroup[DiskChopper]],
306
+ sc.DataGroup[DiskChopper],
307
+ ):
308
+ """All disk choppers parsed from a NeXus file."""
@@ -5,7 +5,7 @@
5
5
 
6
6
  from collections.abc import Iterable
7
7
  from copy import deepcopy
8
- from typing import Any
8
+ from typing import Any, TypeVar
9
9
 
10
10
  import sciline
11
11
  import sciline.typing
@@ -15,15 +15,14 @@ from scipp.constants import g
15
15
  from scipp.core import label_based_index_to_positional_index
16
16
  from scippneutron.chopper import extract_chopper_from_nexus
17
17
 
18
- from ..utils import prune_type_vars
19
18
  from . import _nexus_loader as nexus
20
19
  from .types import (
20
+ COMPONENT_CONSTRAINTS,
21
21
  AllNeXusComponents,
22
22
  Beamline,
23
23
  CalibratedBeamline,
24
24
  CalibratedDetector,
25
25
  CalibratedMonitor,
26
- Choppers,
27
26
  Component,
28
27
  DetectorBankSizes,
29
28
  DetectorData,
@@ -46,6 +45,7 @@ from .types import (
46
45
  NeXusTransformationChain,
47
46
  Position,
48
47
  PreopenNeXusFile,
48
+ RawChoppers,
49
49
  RunType,
50
50
  SampleRun,
51
51
  TimeInterval,
@@ -513,9 +513,18 @@ def assemble_monitor_data(
513
513
 
514
514
  def parse_disk_choppers(
515
515
  choppers: AllNeXusComponents[snx.NXdisk_chopper, RunType],
516
- ) -> Choppers[RunType]:
517
- """Convert the NeXus representation of a chopper to ours."""
518
- return Choppers[RunType](
516
+ ) -> RawChoppers[RunType]:
517
+ """Convert the NeXus representation of a chopper to ours.
518
+
519
+ Returns
520
+ -------
521
+ :
522
+ A nested data group containing the loaded choppers.
523
+ The elements may be time-dependent arrays that first need to be processed
524
+ before they can be passed to other functions as
525
+ :class:`ess.reduce.nexus.types.DiskChoppers`.
526
+ """
527
+ return RawChoppers[RunType](
519
528
  choppers.apply(
520
529
  lambda chopper: extract_chopper_from_nexus(
521
530
  nexus.compute_component_position(chopper)
@@ -551,10 +560,10 @@ class _StrippedDetector(snx.NXdetector):
551
560
  class _DummyField:
552
561
  """Dummy field that can replace snx.Field in NXmonitor."""
553
562
 
554
- def __init__(self):
563
+ def __init__(self, dim: str):
555
564
  self.attrs = {}
556
- self.sizes = {'event_time_zero': 0}
557
- self.dims = ('event_time_zero',)
565
+ self.sizes = {dim: 0}
566
+ self.dims = (dim,)
558
567
  self.shape = (0,)
559
568
 
560
569
  def __getitem__(self, key: Any) -> sc.Variable:
@@ -564,14 +573,17 @@ class _DummyField:
564
573
  class _StrippedMonitor(snx.NXmonitor):
565
574
  """Monitor definition without event data for ScippNexus.
566
575
 
567
- Drops NXevent_data group, data is replaced by a dummy field.
576
+ Drops NXevent_data and NXdata groups, data is replaced by a dummy field.
568
577
  """
569
578
 
570
579
  def __init__(
571
580
  self, attrs: dict[str, Any], children: dict[str, snx.Field | snx.Group]
572
581
  ):
573
- children = _drop(children, (snx.NXevent_data,))
574
- children['data'] = _DummyField()
582
+ is_dense = snx.NXdata in (
583
+ getattr(child, 'nx_class', None) for child in children
584
+ )
585
+ children = _drop(children, (snx.NXevent_data, snx.NXdata))
586
+ children['data'] = _DummyField(dim='time' if is_dense else 'event_time_zero')
575
587
  super().__init__(attrs=attrs, children=children)
576
588
 
577
589
 
@@ -579,11 +591,21 @@ def _add_variances(da: sc.DataArray) -> sc.DataArray:
579
591
  out = da.copy(deep=False)
580
592
  if out.bins is not None:
581
593
  content = out.bins.constituents['data']
582
- if content.variances is None:
583
- content.variances = content.values
594
+ content.data = _assign_values_as_variances(content.data)
595
+ elif out.variances is None:
596
+ out.data = _assign_values_as_variances(out.data)
584
597
  return out
585
598
 
586
599
 
600
+ def _assign_values_as_variances(var: sc.Variable) -> sc.Variable:
601
+ try:
602
+ var.variances = var.values
603
+ except sc.VariancesError:
604
+ var = var.to(dtype=sc.DType.float64)
605
+ var.variances = var.values
606
+ return var
607
+
608
+
587
609
  def load_beamline_metadata_from_nexus(file_spec: NeXusFileSpec[SampleRun]) -> Beamline:
588
610
  """Load beamline metadata from a sample NeXus file."""
589
611
  return nexus.load_metadata(file_spec.value, Beamline)
@@ -644,16 +666,34 @@ _metadata_providers = (
644
666
  )
645
667
 
646
668
 
647
- def LoadMonitorWorkflow() -> sciline.Pipeline:
669
+ def LoadMonitorWorkflow(
670
+ *,
671
+ run_types: Iterable[sciline.typing.Key],
672
+ monitor_types: Iterable[sciline.typing.Key],
673
+ ) -> sciline.Pipeline:
648
674
  """Generic workflow for loading monitor data from a NeXus file."""
649
- wf = sciline.Pipeline((*_common_providers, *_monitor_providers))
675
+ wf = sciline.Pipeline(
676
+ (*_common_providers, *_monitor_providers),
677
+ constraints=_gather_constraints(
678
+ run_types=run_types, monitor_types=monitor_types
679
+ ),
680
+ )
650
681
  wf[PreopenNeXusFile] = PreopenNeXusFile(False)
651
682
  return wf
652
683
 
653
684
 
654
- def LoadDetectorWorkflow() -> sciline.Pipeline:
685
+ def LoadDetectorWorkflow(
686
+ *,
687
+ run_types: Iterable[sciline.typing.Key],
688
+ monitor_types: Iterable[sciline.typing.Key],
689
+ ) -> sciline.Pipeline:
655
690
  """Generic workflow for loading detector data from a NeXus file."""
656
- wf = sciline.Pipeline((*_common_providers, *_detector_providers))
691
+ wf = sciline.Pipeline(
692
+ (*_common_providers, *_detector_providers),
693
+ constraints=_gather_constraints(
694
+ run_types=run_types, monitor_types=monitor_types
695
+ ),
696
+ )
657
697
  wf[DetectorBankSizes] = DetectorBankSizes({})
658
698
  wf[PreopenNeXusFile] = PreopenNeXusFile(False)
659
699
  return wf
@@ -661,8 +701,8 @@ def LoadDetectorWorkflow() -> sciline.Pipeline:
661
701
 
662
702
  def GenericNeXusWorkflow(
663
703
  *,
664
- run_types: Iterable[sciline.typing.Key] | None = None,
665
- monitor_types: Iterable[sciline.typing.Key] | None = None,
704
+ run_types: Iterable[sciline.typing.Key],
705
+ monitor_types: Iterable[sciline.typing.Key],
666
706
  ) -> sciline.Pipeline:
667
707
  """
668
708
  Generic workflow for loading detector and monitor data from a NeXus file.
@@ -681,13 +721,12 @@ def GenericNeXusWorkflow(
681
721
  Parameters
682
722
  ----------
683
723
  run_types:
684
- List of run types to include in the workflow. If not provided, all run types
685
- are included.
686
- Must be a possible value of :class:`ess.reduce.nexus.types.RunType`.
724
+ List of run types to include in the workflow.
725
+ Constrains the possible values of :class:`ess.reduce.nexus.types.RunType`.
687
726
  monitor_types:
688
- List of monitor types to include in the workflow. If not provided, all monitor
689
- types are included.
690
- Must be a possible value of :class:`ess.reduce.nexus.types.MonitorType`.
727
+ List of monitor types to include in the workflow.
728
+ Constrains the possible values of :class:`ess.reduce.nexus.types.MonitorType`
729
+ and :class:`ess.reduce.nexus.types.Component`.
691
730
 
692
731
  Returns
693
732
  -------
@@ -701,12 +740,26 @@ def GenericNeXusWorkflow(
701
740
  *_detector_providers,
702
741
  *_chopper_providers,
703
742
  *_metadata_providers,
704
- )
743
+ ),
744
+ constraints=_gather_constraints(
745
+ run_types=run_types, monitor_types=monitor_types
746
+ ),
705
747
  )
706
748
  wf[DetectorBankSizes] = DetectorBankSizes({})
707
749
  wf[PreopenNeXusFile] = PreopenNeXusFile(False)
708
750
 
709
- if run_types is not None or monitor_types is not None:
710
- prune_type_vars(wf, run_types=run_types, monitor_types=monitor_types)
711
-
712
751
  return wf
752
+
753
+
754
+ def _gather_constraints(
755
+ *,
756
+ run_types: Iterable[sciline.typing.Key],
757
+ monitor_types: Iterable[sciline.typing.Key],
758
+ ) -> dict[TypeVar, Iterable[type]]:
759
+ mon = tuple(iter(monitor_types))
760
+ constraints = {
761
+ RunType: run_types,
762
+ MonitorType: mon,
763
+ Component: (*COMPONENT_CONSTRAINTS, *mon),
764
+ }
765
+ return constraints
ess/reduce/streaming.py CHANGED
@@ -138,6 +138,30 @@ class EternalAccumulator(Accumulator[T]):
138
138
  self._value = None
139
139
 
140
140
 
141
+ class MeanAccumulator(EternalAccumulator[T]):
142
+ """
143
+ Accumulator that computes the mean of pushed values.
144
+
145
+ Does not support event data.
146
+ """
147
+
148
+ def __init__(self, **kwargs: Any) -> None:
149
+ super().__init__(**kwargs)
150
+ self._count = 0
151
+
152
+ def _do_push(self, value: T) -> None:
153
+ super()._do_push(value)
154
+ self._count += 1
155
+
156
+ def _get_value(self) -> T:
157
+ return super()._get_value() / self._count
158
+
159
+ def clear(self) -> None:
160
+ """Clear the accumulated value and count."""
161
+ super().clear()
162
+ self._count = 0
163
+
164
+
141
165
  class RollingAccumulator(Accumulator[T]):
142
166
  """
143
167
  Accumulator that adds pushed values to a rolling window.
@@ -6,14 +6,8 @@ Utilities for computing real neutron time-of-flight from chopper settings and
6
6
  neutron time-of-arrival at the detectors.
7
7
  """
8
8
 
9
- from .eto_to_tof import (
10
- default_parameters,
11
- providers,
12
- resample_detector_time_of_flight_data,
13
- resample_monitor_time_of_flight_data,
14
- )
9
+ from .eto_to_tof import default_parameters, providers
15
10
  from .simulation import simulate_beamline
16
- from .to_events import to_events
17
11
  from .types import (
18
12
  DetectorLtotal,
19
13
  DetectorTofData,
@@ -25,8 +19,6 @@ from .types import (
25
19
  PulsePeriod,
26
20
  PulseStride,
27
21
  PulseStrideOffset,
28
- ResampledDetectorTofData,
29
- ResampledMonitorTofData,
30
22
  SimulationResults,
31
23
  TimeOfFlightLookupTable,
32
24
  TimeOfFlightLookupTableFilename,
@@ -37,17 +29,17 @@ from .workflow import GenericTofWorkflow, TofLutProvider
37
29
  __all__ = [
38
30
  "DetectorLtotal",
39
31
  "DetectorTofData",
32
+ "DetectorTofData",
40
33
  "DistanceResolution",
41
34
  "GenericTofWorkflow",
42
35
  "LookupTableRelativeErrorThreshold",
43
36
  "LtotalRange",
44
37
  "MonitorLtotal",
45
38
  "MonitorTofData",
39
+ "MonitorTofData",
46
40
  "PulsePeriod",
47
41
  "PulseStride",
48
42
  "PulseStrideOffset",
49
- "ResampledDetectorTofData",
50
- "ResampledMonitorTofData",
51
43
  "SimulationResults",
52
44
  "TimeOfFlightLookupTable",
53
45
  "TimeOfFlightLookupTableFilename",
@@ -55,8 +47,5 @@ __all__ = [
55
47
  "TofLutProvider",
56
48
  "default_parameters",
57
49
  "providers",
58
- "resample_detector_time_of_flight_data",
59
- "resample_monitor_time_of_flight_data",
60
50
  "simulate_beamline",
61
- "to_events",
62
51
  ]
@@ -27,7 +27,7 @@ from ..nexus.types import (
27
27
  MonitorType,
28
28
  RunType,
29
29
  )
30
- from .to_events import to_events
30
+ from .resample import rebin_strictly_increasing
31
31
  from .types import (
32
32
  DetectorLtotal,
33
33
  DetectorTofData,
@@ -39,8 +39,6 @@ from .types import (
39
39
  PulsePeriod,
40
40
  PulseStride,
41
41
  PulseStrideOffset,
42
- ResampledDetectorTofData,
43
- ResampledMonitorTofData,
44
42
  SimulationResults,
45
43
  TimeOfFlightLookupTable,
46
44
  TimeResolution,
@@ -586,7 +584,8 @@ def _compute_tof_data(
586
584
  pulse_stride_offset: int,
587
585
  ) -> sc.DataArray:
588
586
  if da.bins is None:
589
- return _time_of_flight_data_histogram(da=da, lookup=lookup, ltotal=ltotal)
587
+ data = _time_of_flight_data_histogram(da=da, lookup=lookup, ltotal=ltotal)
588
+ return rebin_strictly_increasing(data, dim='tof')
590
589
  else:
591
590
  return _time_of_flight_data_events(
592
591
  da=da,
@@ -664,62 +663,6 @@ def monitor_time_of_flight_data(
664
663
  )
665
664
 
666
665
 
667
- def _resample_tof_data(da: sc.DataArray) -> sc.DataArray:
668
- """
669
- Histogrammed data that has been converted to `tof` will typically have
670
- unsorted bin edges (due to either wrapping of `time_of_flight` or wavelength
671
- overlap between subframes).
672
- This function re-histograms the data to ensure that the bin edges are sorted.
673
- It makes use of the ``to_events`` helper which generates a number of events in each
674
- bin with a uniform distribution. The new events are then histogrammed using a set of
675
- sorted bin edges.
676
-
677
- WARNING:
678
- This function is highly experimental, has limitations and should be used with
679
- caution. It is a workaround to the issue that rebinning data with unsorted bin
680
- edges is not supported in scipp.
681
- As such, this function is not part of the default set of providers, and needs to be
682
- inserted manually into the workflow.
683
-
684
- Parameters
685
- ----------
686
- da:
687
- Histogrammed data with the time-of-flight coordinate.
688
- """
689
- dim = next(iter(set(da.dims) & {"time_of_flight", "tof"}))
690
- data = da.rename_dims({dim: "tof"}).drop_coords(
691
- [name for name in da.coords if name != "tof"]
692
- )
693
- events = to_events(data, "event")
694
-
695
- # Define a new bin width, close to the original bin width.
696
- # TODO: this could be a workflow parameter
697
- coord = da.coords["tof"]
698
- bin_width = (coord[dim, 1:] - coord[dim, :-1]).nanmedian()
699
- rehist = events.hist(tof=bin_width)
700
- return rehist.assign_coords(
701
- {key: var for key, var in da.coords.items() if dim not in var.dims}
702
- )
703
-
704
-
705
- def resample_detector_time_of_flight_data(
706
- da: DetectorTofData[RunType],
707
- ) -> ResampledDetectorTofData[RunType]:
708
- """
709
- Resample the detector time-of-flight data to ensure that the bin edges are sorted.
710
- """
711
- return ResampledDetectorTofData(_resample_tof_data(da))
712
-
713
-
714
- def resample_monitor_time_of_flight_data(
715
- da: MonitorTofData[RunType, MonitorType],
716
- ) -> ResampledMonitorTofData[RunType, MonitorType]:
717
- """
718
- Resample the monitor time-of-flight data to ensure that the bin edges are sorted.
719
- """
720
- return ResampledMonitorTofData(_resample_tof_data(da))
721
-
722
-
723
666
  def default_parameters() -> dict:
724
667
  """
725
668
  Default parameters of the time-of-flight workflow.
@@ -0,0 +1,97 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
3
+
4
+
5
+ import numpy as np
6
+ import scipp as sc
7
+
8
+
9
+ def find_strictly_increasing_sections(var: sc.Variable) -> list[slice]:
10
+ """
11
+ Find strictly increasing sections in a coordinate dimension (minimum length 2).
12
+
13
+ Parameters
14
+ ----------
15
+ var:
16
+ The variable to analyze, which should be one-dimensional.
17
+
18
+ Returns
19
+ -------
20
+ sections:
21
+ Slice objects that can be used extract strictly increasing sections.
22
+ """
23
+ values = var.values
24
+ finite = np.isfinite(values)
25
+ increasing = (np.sign(np.diff(values)) > 0) & finite[:-1] & finite[1:]
26
+ # 1 marks the start of an increasing section, -1 marks the end
27
+ transitions = np.diff(np.concatenate(([False], increasing, [False])).astype(int))
28
+ section_starts = np.where(transitions == 1)[0]
29
+ section_ends = np.where(transitions == -1)[0] + np.array(1)
30
+ return [
31
+ slice(start, end)
32
+ for start, end in zip(section_starts, section_ends, strict=True)
33
+ if end - start >= 2 # Ensure section has at least 2 points
34
+ ]
35
+
36
+
37
+ def get_min_max(
38
+ var: sc.Variable, *, dim: str, slices: list[slice]
39
+ ) -> tuple[sc.Variable, sc.Variable]:
40
+ if not slices:
41
+ raise ValueError("No strictly increasing sections found.")
42
+ combined = sc.concat([var[dim, slice] for slice in slices], dim)
43
+ return combined.min(), combined.max()
44
+
45
+
46
+ def make_regular_grid(
47
+ var: sc.Variable, *, dim: str, slices: list[slice]
48
+ ) -> sc.Variable:
49
+ """
50
+ Create a regular grid variable based on the min and max of the slices.
51
+
52
+ The grid is constructed such that it includes the minimum and maximum values
53
+ of the strictly increasing sections, with a step size equal to the difference
54
+ between the first two values of the section with the minimum start value (which is
55
+ not necessarily the first section).
56
+ """
57
+ min_val, max_val = get_min_max(var, dim=dim, slices=slices)
58
+ first: sc.Variable | None = None
59
+ for s in slices:
60
+ first = var[dim, s]
61
+ if sc.identical(first[0], min_val):
62
+ break
63
+ if first is None:
64
+ # This should not happen if slices are correctly identified and passed from
65
+ # find_strictly_increasing_sections.
66
+ raise ValueError("Section is not strictly increasing.")
67
+ step = first[1] - first[0]
68
+ return sc.arange(
69
+ dim=dim,
70
+ start=min_val.value,
71
+ stop=max_val.value + step.value, # Ensure the last bin edge is included
72
+ step=step.value,
73
+ unit=step.unit,
74
+ dtype=step.dtype,
75
+ )
76
+
77
+
78
+ def rebin_strictly_increasing(da: sc.DataArray, dim: str) -> sc.DataArray:
79
+ """
80
+ Find strictly monotonic sections in a coordinate dimension and rebin the data array
81
+ into a regular grid based on these sections.
82
+ """
83
+ # Ensure the dimension is named like the coordinate.
84
+ da = da.rename_dims({da.coords[dim].dim: dim})
85
+ slices = find_strictly_increasing_sections(da.coords[dim])
86
+ if len(slices) == 1:
87
+ return da[dim, slices[0]]
88
+ if not slices:
89
+ raise ValueError("No strictly increasing sections found.")
90
+ if da.coords[dim].dtype not in (sc.DType.float64, sc.DType.float32):
91
+ # rebin does not like integer coords.
92
+ da = da.assign_coords({dim: da.coords[dim].to(dtype='float64')})
93
+ # Slices refer to the indices in the coord, which are bin edges. For slicing data
94
+ # we need to stop at the last index minus one.
95
+ sections = [da[dim, section.start : section.stop - 1] for section in slices]
96
+ edges = make_regular_grid(da.coords[dim], dim=dim, slices=slices)
97
+ return sc.reduce([sc.rebin(section, {dim: edges}) for section in sections]).sum()
@@ -6,7 +6,7 @@ import scipp as sc
6
6
  import scippnexus as snx
7
7
  from scippneutron.chopper import DiskChopper
8
8
 
9
- from ..nexus.types import Choppers, Position, SampleRun
9
+ from ..nexus.types import DiskChoppers, Position, SampleRun
10
10
  from .types import NumberOfSimulatedNeutrons, SimulationResults
11
11
 
12
12
 
@@ -87,7 +87,7 @@ def simulate_beamline(
87
87
 
88
88
 
89
89
  def simulate_chopper_cascade_using_tof(
90
- choppers: Choppers[SampleRun],
90
+ choppers: DiskChoppers[SampleRun],
91
91
  neutrons: NumberOfSimulatedNeutrons,
92
92
  source_position: Position[snx.NXsource, SampleRun],
93
93
  ) -> SimulationResults:
@@ -130,43 +130,3 @@ class DetectorTofData(sl.Scope[RunType, sc.DataArray], sc.DataArray):
130
130
 
131
131
  class MonitorTofData(sl.Scope[RunType, MonitorType, sc.DataArray], sc.DataArray):
132
132
  """Monitor data with time-of-flight coordinate."""
133
-
134
-
135
- class ResampledDetectorTofData(sl.Scope[RunType, sc.DataArray], sc.DataArray):
136
- """
137
- Histogrammed detector data with time-of-flight coordinate, that has been resampled.
138
-
139
- Histogrammed data that has been converted to `tof` will typically have
140
- unsorted bin edges (due to either wrapping of `time_of_flight` or wavelength
141
- overlap between subframes).
142
- We thus resample the data to ensure that the bin edges are sorted.
143
- It makes use of the ``to_events`` helper which generates a number of events in each
144
- bin with a uniform distribution. The new events are then histogrammed using a set of
145
- sorted bin edges to yield a new histogram with sorted bin edges.
146
-
147
- WARNING:
148
- This function is highly experimental, has limitations and should be used with
149
- caution. It is a workaround to the issue that rebinning data with unsorted bin
150
- edges is not supported in scipp.
151
- """
152
-
153
-
154
- class ResampledMonitorTofData(
155
- sl.Scope[RunType, MonitorType, sc.DataArray], sc.DataArray
156
- ):
157
- """
158
- Histogrammed monitor data with time-of-flight coordinate, that has been resampled.
159
-
160
- Histogrammed data that has been converted to `tof` will typically have
161
- unsorted bin edges (due to either wrapping of `time_of_flight` or wavelength
162
- overlap between subframes).
163
- We thus resample the data to ensure that the bin edges are sorted.
164
- It makes use of the ``to_events`` helper which generates a number of events in each
165
- bin with a uniform distribution. The new events are then histogrammed using a set of
166
- sorted bin edges to yield a new histogram with sorted bin edges.
167
-
168
- WARNING:
169
- This function is highly experimental, has limitations and should be used with
170
- caution. It is a workaround to the issue that rebinning data with unsorted bin
171
- edges is not supported in scipp.
172
- """
@@ -7,7 +7,6 @@ import sciline
7
7
  import scipp as sc
8
8
 
9
9
  from ..nexus import GenericNeXusWorkflow
10
- from ..utils import prune_type_vars
11
10
  from . import eto_to_tof, simulation
12
11
  from .types import TimeOfFlightLookupTable, TimeOfFlightLookupTableFilename
13
12
 
@@ -28,13 +27,14 @@ def load_tof_lookup_table(
28
27
 
29
28
  def GenericTofWorkflow(
30
29
  *,
31
- run_types: Iterable[sciline.typing.Key] | None = None,
32
- monitor_types: Iterable[sciline.typing.Key] | None = None,
30
+ run_types: Iterable[sciline.typing.Key],
31
+ monitor_types: Iterable[sciline.typing.Key],
33
32
  tof_lut_provider: TofLutProvider = TofLutProvider.FILE,
34
33
  ) -> sciline.Pipeline:
35
34
  """
36
35
  Generic workflow for computing the neutron time-of-flight for detector and monitor
37
36
  data.
37
+
38
38
  This workflow builds on the ``GenericNeXusWorkflow`` and computes time-of-flight
39
39
  from a lookup table that is created from the chopper settings, detector Ltotal and
40
40
  the neutron time-of-arrival.
@@ -53,13 +53,12 @@ def GenericTofWorkflow(
53
53
  Parameters
54
54
  ----------
55
55
  run_types:
56
- List of run types to include in the workflow. If not provided, all run types
57
- are included.
58
- Must be a possible value of :class:`ess.reduce.nexus.types.RunType`.
56
+ List of run types to include in the workflow.
57
+ Constrains the possible values of :class:`ess.reduce.nexus.types.RunType`.
59
58
  monitor_types:
60
- List of monitor types to include in the workflow. If not provided, all monitor
61
- types are included.
62
- Must be a possible value of :class:`ess.reduce.nexus.types.MonitorType`.
59
+ List of monitor types to include in the workflow.
60
+ Constrains the possible values of :class:`ess.reduce.nexus.types.MonitorType`
61
+ and :class:`ess.reduce.nexus.types.Component`.
63
62
  tof_lut_provider:
64
63
  Specifies how the time-of-flight lookup table is provided:
65
64
  - FILE: Read from a file
@@ -88,7 +87,4 @@ def GenericTofWorkflow(
88
87
  for key, value in eto_to_tof.default_parameters().items():
89
88
  wf[key] = value
90
89
 
91
- if run_types is not None or monitor_types is not None:
92
- prune_type_vars(wf, run_types=run_types, monitor_types=monitor_types)
93
-
94
90
  return wf
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: essreduce
3
- Version: 25.5.1
3
+ Version: 25.5.3
4
4
  Summary: Common data reduction tools for the ESS facility
5
5
  Author: Scipp contributors
6
6
  License: BSD 3-Clause License
@@ -51,7 +51,7 @@ Classifier: Typing :: Typed
51
51
  Requires-Python: >=3.10
52
52
  Description-Content-Type: text/markdown
53
53
  License-File: LICENSE
54
- Requires-Dist: sciline>=25.04.1
54
+ Requires-Dist: sciline>=25.05.1
55
55
  Requires-Dist: scipp>=25.01.0
56
56
  Requires-Dist: scippneutron>=25.02.0
57
57
  Requires-Dist: scippnexus>=24.11.0
@@ -1,12 +1,11 @@
1
1
  ess/reduce/__init__.py,sha256=o1pWRP9YGwTukM_k-qlG6KcoXOpMb0PDVH59vod12lw,419
2
- ess/reduce/data.py,sha256=vaoeAJ6EpK1YghOiAALLdWiW17TgUnnnt0H-RGiGzXk,3756
2
+ ess/reduce/data.py,sha256=0N7iq1363tO16ntMztTNjxkQDFst-Gnp9awpgUOBVdY,4133
3
3
  ess/reduce/logging.py,sha256=6n8Czq4LZ3OK9ENlKsWSI1M3KvKv6_HSoUiV4__IUlU,357
4
4
  ess/reduce/parameter.py,sha256=4sCfoKOI2HuO_Q7JLH_jAXnEOFANSn5P3NdaOBzhJxc,4635
5
5
  ess/reduce/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- ess/reduce/streaming.py,sha256=TBttQV5WdSpUKh38J0pdv53seMWtUFswxd6-ltaZb_M,17403
6
+ ess/reduce/streaming.py,sha256=zbqxQz5dASDq4ZVyx-TdbapBXMyBttImCYz_6WOj4pg,17978
7
7
  ess/reduce/ui.py,sha256=zmorAbDwX1cU3ygDT--OP58o0qU7OBcmJz03jPeYSLA,10884
8
8
  ess/reduce/uncertainty.py,sha256=LR4O6ApB6Z-W9gC_XW0ajupl8yFG-du0eee1AX_R-gk,6990
9
- ess/reduce/utils.py,sha256=RBAfJRNil6JjVF-jPaxeL0ssEEfPBBQEZ3ObEorpDLo,1132
10
9
  ess/reduce/workflow.py,sha256=738-lcdgsORYfQ4A0UTk2IgnbVxC3jBdpscpaOFIpdc,3114
11
10
  ess/reduce/live/__init__.py,sha256=jPQVhihRVNtEDrE20PoKkclKV2aBF1lS7cCHootgFgI,204
12
11
  ess/reduce/live/raw.py,sha256=66qV0G2rP8gK5tXuk-syTlDLE2jT3ehfmSnET7Xzfd0,24392
@@ -16,18 +15,18 @@ ess/reduce/nexus/__init__.py,sha256=59bxKkNYg8DYcSykNvH6nCa5SYchJC4SbgZEKhkNdYc,
16
15
  ess/reduce/nexus/_nexus_loader.py,sha256=5N48AMJx1AaFZb6WZPPbVKUlXyFMVVtZrn7Bae57O3A,19842
17
16
  ess/reduce/nexus/json_generator.py,sha256=ME2Xn8L7Oi3uHJk9ZZdCRQTRX-OV_wh9-DJn07Alplk,2529
18
17
  ess/reduce/nexus/json_nexus.py,sha256=QrVc0p424nZ5dHX9gebAJppTw6lGZq9404P_OFl1giA,10282
19
- ess/reduce/nexus/types.py,sha256=-pj8PpHu7QJrjrOOQ-VD9QhYh1L92onkX4xtmY3mgXE,9645
20
- ess/reduce/nexus/workflow.py,sha256=hqK58dsr8KtQn065-PS4Eiyir90qnZ3GQNyNDupOg4I,23084
18
+ ess/reduce/nexus/types.py,sha256=DE82JnbgpTlQnt7UN2a2Gur2N9QupV3CDL9j4Iy4lsE,9178
19
+ ess/reduce/nexus/workflow.py,sha256=Ytt80-muk5EiXmip890ahb_m5DQqlTGRQUyaTVXRNzo,24568
21
20
  ess/reduce/scripts/grow_nexus.py,sha256=hET3h06M0xlJd62E3palNLFvJMyNax2kK4XyJcOhl-I,3387
22
- ess/reduce/time_of_flight/__init__.py,sha256=UxMvY4aFkhZQmIbGSo4FBpBGRD2wDJbczLMVqcEhCSg,1583
23
- ess/reduce/time_of_flight/eto_to_tof.py,sha256=JCu7C3AmJnB7GDJrL76oPjgxGesp67nct9xXRp3O8E4,28204
21
+ ess/reduce/time_of_flight/__init__.py,sha256=jbvLcVgODURuweuicrsDqEYqiL_GNJa_t4c5ik344Ro,1269
22
+ ess/reduce/time_of_flight/eto_to_tof.py,sha256=FYujFQSqDoxOLiVbNId4YcpuhKmNdvtBMUr9nK5poIk,26070
24
23
  ess/reduce/time_of_flight/fakes.py,sha256=0gtbSX3ZQilaM4ZP5dMr3fqbnhpyoVsZX2YEb8GgREE,4489
25
24
  ess/reduce/time_of_flight/interpolator_numba.py,sha256=wh2YS3j2rOu30v1Ok3xNHcwS7t8eEtZyZvbfXOCtgrQ,3835
26
25
  ess/reduce/time_of_flight/interpolator_scipy.py,sha256=_InoAPuMm2qhJKZQBAHOGRFqtvvuQ8TStoN7j_YgS4M,1853
27
- ess/reduce/time_of_flight/simulation.py,sha256=nJe-pkVvG9V6VdfB0_HyVYQoOSNJSMo_QydCHHW1dqM,3624
28
- ess/reduce/time_of_flight/to_events.py,sha256=w9mHpnWd3vwN2ouob-GK_1NPrTjCaOzPuC2QuEey-m0,4342
29
- ess/reduce/time_of_flight/types.py,sha256=OQeMYNN7QinXs_HDcoE6kkh_xNcyD0dEJWtnHQy5-uA,6675
30
- ess/reduce/time_of_flight/workflow.py,sha256=ooSVwbL0hPBBVCfuTsAVaGEENs9P4kDN-FlH42NVzJQ,3427
26
+ ess/reduce/time_of_flight/resample.py,sha256=Opmi-JA4zNH725l9VB99U4O9UlM37f5ACTCGtwBcows,3718
27
+ ess/reduce/time_of_flight/simulation.py,sha256=vo-zjG6t-PLetv2_nj4dhMSTEyTQ1MsrhlM2XkhOtf8,3632
28
+ ess/reduce/time_of_flight/types.py,sha256=LJlK_5u5yeFj2TLnz-LI3eApkp8vEg5q8OncHkmHjj8,4844
29
+ ess/reduce/time_of_flight/workflow.py,sha256=BAIMeA1bSJlS6JSG7r2srVdtBsAK6VD0DuOiYZuQvNg,3182
31
30
  ess/reduce/widgets/__init__.py,sha256=SoSHBv8Dc3QXV9HUvPhjSYWMwKTGYZLpsWwsShIO97Q,5325
32
31
  ess/reduce/widgets/_base.py,sha256=_wN3FOlXgx_u0c-A_3yyoIH-SdUvDENGgquh9S-h5GI,4852
33
32
  ess/reduce/widgets/_binedges_widget.py,sha256=ZCQsGjYHnJr9GFUn7NjoZc1CdsnAzm_fMzyF-fTKKVY,2785
@@ -40,9 +39,9 @@ ess/reduce/widgets/_spinner.py,sha256=2VY4Fhfa7HMXox2O7UbofcdKsYG-AJGrsgGJB85nDX
40
39
  ess/reduce/widgets/_string_widget.py,sha256=iPAdfANyXHf-nkfhgkyH6gQDklia0LebLTmwi3m-iYQ,1482
41
40
  ess/reduce/widgets/_switchable_widget.py,sha256=fjKz99SKLhIF1BLgGVBSKKn3Lu_jYBwDYGeAjbJY3Q8,2390
42
41
  ess/reduce/widgets/_vector_widget.py,sha256=aTaBqCFHZQhrIoX6-sSqFWCPePEW8HQt5kUio8jP1t8,1203
43
- essreduce-25.5.1.dist-info/licenses/LICENSE,sha256=nVEiume4Qj6jMYfSRjHTM2jtJ4FGu0g-5Sdh7osfEYw,1553
44
- essreduce-25.5.1.dist-info/METADATA,sha256=8VVFEjGw_n02K81UZs5IexKEOuN4IyNfaQybrzKn4j0,3768
45
- essreduce-25.5.1.dist-info/WHEEL,sha256=DnLRTWE75wApRYVsjgc6wsVswC54sMSJhAEd4xhDpBk,91
46
- essreduce-25.5.1.dist-info/entry_points.txt,sha256=PMZOIYzCifHMTe4pK3HbhxUwxjFaZizYlLD0td4Isb0,66
47
- essreduce-25.5.1.dist-info/top_level.txt,sha256=0JxTCgMKPLKtp14wb1-RKisQPQWX7i96innZNvHBr-s,4
48
- essreduce-25.5.1.dist-info/RECORD,,
42
+ essreduce-25.5.3.dist-info/licenses/LICENSE,sha256=nVEiume4Qj6jMYfSRjHTM2jtJ4FGu0g-5Sdh7osfEYw,1553
43
+ essreduce-25.5.3.dist-info/METADATA,sha256=WNGTtuRJz8G7G3mFV0sWWQy5ByR1uzJ3iDQi7hVyYps,3768
44
+ essreduce-25.5.3.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
45
+ essreduce-25.5.3.dist-info/entry_points.txt,sha256=PMZOIYzCifHMTe4pK3HbhxUwxjFaZizYlLD0td4Isb0,66
46
+ essreduce-25.5.3.dist-info/top_level.txt,sha256=0JxTCgMKPLKtp14wb1-RKisQPQWX7i96innZNvHBr-s,4
47
+ essreduce-25.5.3.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.4.0)
2
+ Generator: setuptools (80.8.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,111 +0,0 @@
1
- # SPDX-License-Identifier: BSD-3-Clause
2
- # Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
3
-
4
- from functools import reduce
5
-
6
- import numpy as np
7
- import scipp as sc
8
-
9
-
10
- def to_events(
11
- da: sc.DataArray, event_dim: str, events_per_bin: int = 500
12
- ) -> sc.DataArray:
13
- """
14
- Convert a histogrammed data array to an event list.
15
- The generated events have a uniform distribution within each bin.
16
- Each dimension with a bin-edge coordinate is converted to an event coordinate.
17
- The contract is that if we re-histogram the event list with the same bin edges,
18
- we should get the original counts back.
19
- Masks on non-bin-edge dimensions are preserved.
20
- If there are masks on bin-edge dimensions, the masked values are zeroed out in the
21
- original data before the conversion to events.
22
-
23
- Parameters
24
- ----------
25
- da:
26
- DataArray to convert to events.
27
- event_dim:
28
- Name of the new event dimension.
29
- events_per_bin:
30
- Number of events to generate per bin.
31
- """
32
- if da.bins is not None:
33
- raise ValueError("Cannot convert a binned DataArray to events.")
34
- rng = np.random.default_rng()
35
- event_coords = {}
36
- edge_dims = []
37
- midp_dims = set(da.dims)
38
- midp_coord_names = []
39
- # Separate bin-edge and midpoints coords
40
- for name in da.coords:
41
- dims = da.coords[name].dims
42
- is_edges = False if not dims else da.coords.is_edges(name)
43
- if is_edges:
44
- if name in dims:
45
- edge_dims.append(name)
46
- midp_dims -= {name}
47
- else:
48
- midp_coord_names.append(name)
49
-
50
- edge_sizes = {dim: da.sizes[da.coords[dim].dim] for dim in edge_dims}
51
- for dim in edge_dims:
52
- coord = da.coords[dim]
53
- left = sc.broadcast(coord[dim, :-1], sizes=edge_sizes).values
54
- right = sc.broadcast(coord[dim, 1:], sizes=edge_sizes).values
55
-
56
- # The numpy.random.uniform function below does not support NaNs, so we need to
57
- # replace them with zeros, and then replace them back after the random numbers
58
- # have been generated.
59
- nans = np.isnan(left) | np.isnan(right)
60
- left = np.where(nans, 0.0, left)
61
- right = np.where(nans, 0.0, right)
62
- # Ensure left <= right
63
- left, right = np.minimum(left, right), np.maximum(left, right)
64
-
65
- # In each bin, we generate a number of events with a uniform distribution.
66
- events = rng.uniform(
67
- left, right, size=(events_per_bin, *list(edge_sizes.values()))
68
- )
69
- events[..., nans] = np.nan
70
- event_coords[dim] = sc.array(
71
- dims=[event_dim, *edge_dims], values=events, unit=coord.unit
72
- )
73
-
74
- # Find and apply masks that are on a bin-edge dimension
75
- event_masks = {}
76
- other_masks = {}
77
- edge_dims_set = set(edge_dims)
78
- for key, mask in da.masks.items():
79
- if set(mask.dims) & edge_dims_set:
80
- event_masks[key] = mask
81
- else:
82
- other_masks[key] = mask
83
-
84
- data = da.data
85
- if event_masks:
86
- inv_mask = (~reduce(lambda a, b: a | b, event_masks.values())).to(dtype=int)
87
- inv_mask.unit = ""
88
- data = data * inv_mask
89
-
90
- # Create the data counts, which are the original counts divided by the number of
91
- # events per bin
92
- sizes = {event_dim: events_per_bin} | da.sizes
93
- val = sc.broadcast(sc.values(data) / float(events_per_bin), sizes=sizes)
94
- kwargs = {"dims": sizes.keys(), "values": val.values, "unit": data.unit}
95
- if data.variances is not None:
96
- # Note here that all the events are correlated.
97
- # If we later histogram the events with different edges than the original
98
- # histogram, then neighboring bins will be correlated, and the error obtained
99
- # will be too small. It is however not clear what can be done to improve this.
100
- kwargs["variances"] = sc.broadcast(
101
- sc.variances(data) / float(events_per_bin), sizes=sizes
102
- ).values
103
- new_data = sc.array(**kwargs)
104
-
105
- new = sc.DataArray(data=new_data, coords=event_coords)
106
- new = new.transpose((*midp_dims, *edge_dims, event_dim)).flatten(
107
- dims=[*edge_dims, event_dim], to=event_dim
108
- )
109
- return new.assign_coords(
110
- {dim: da.coords[dim].copy() for dim in midp_coord_names}
111
- ).assign_masks({key: mask.copy() for key, mask in other_masks.items()})
ess/reduce/utils.py DELETED
@@ -1,36 +0,0 @@
1
- # SPDX-License-Identifier: BSD-3-Clause
2
- # Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
3
-
4
- from collections.abc import Iterable
5
- from typing import Any
6
-
7
- import sciline
8
-
9
- from .nexus.types import MonitorType, RunType
10
-
11
-
12
- def prune_type_vars(
13
- workflow: sciline.Pipeline,
14
- *,
15
- run_types: Iterable[sciline.typing.Key] | None,
16
- monitor_types: Iterable[sciline.typing.Key] | None,
17
- ) -> None:
18
- # Remove all nodes that use a run type or monitor types that is
19
- # not listed in the function arguments.
20
- excluded_run_types = excluded_type_args(RunType, run_types)
21
- excluded_monitor_types = excluded_type_args(MonitorType, monitor_types)
22
- excluded_types = excluded_run_types | excluded_monitor_types
23
-
24
- graph = workflow.underlying_graph
25
- to_remove = [
26
- node for node in graph if excluded_types & set(getattr(node, "__args__", set()))
27
- ]
28
- graph.remove_nodes_from(to_remove)
29
-
30
-
31
- def excluded_type_args(
32
- type_var: Any, keep: Iterable[sciline.typing.Key] | None
33
- ) -> set[sciline.typing.Key]:
34
- if keep is None:
35
- return set()
36
- return set(type_var.__constraints__) - set(keep)