essreduce 25.5.2__py3-none-any.whl → 25.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,478 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
3
+ """
4
+ Utilities for computing time-of-flight lookup tables from neutron simulations.
5
+ """
6
+
7
+ from dataclasses import dataclass
8
+ from typing import NewType
9
+
10
+ import numpy as np
11
+ import sciline as sl
12
+ import scipp as sc
13
+
14
+ from ..nexus.types import DiskChoppers
15
+ from .types import TimeOfFlightLookupTable
16
+
17
+
18
+ @dataclass
19
+ class SimulationResults:
20
+ """
21
+ Results of a time-of-flight simulation used to create a lookup table.
22
+
23
+ The results (apart from ``distance``) should be flat lists (1d arrays) of length N
24
+ where N is the number of neutrons, containing the properties of the neutrons in the
25
+ simulation.
26
+
27
+ Parameters
28
+ ----------
29
+ time_of_arrival:
30
+ Time of arrival of the neutrons at the position where the events were recorded
31
+ (1d array of size N).
32
+ speed:
33
+ Speed of the neutrons, typically derived from the wavelength of the neutrons
34
+ (1d array of size N).
35
+ wavelength:
36
+ Wavelength of the neutrons (1d array of size N).
37
+ weight:
38
+ Weight/probability of the neutrons (1d array of size N).
39
+ distance:
40
+ Distance from the source to the position where the events were recorded
41
+ (single value; we assume all neutrons were recorded at the same position).
42
+ For a ``tof`` simulation, this is just the position of the detector where the
43
+ events are recorded. For a ``McStas`` simulation, this is the distance between
44
+ the source and the event monitor.
45
+ """
46
+
47
+ time_of_arrival: sc.Variable
48
+ speed: sc.Variable
49
+ wavelength: sc.Variable
50
+ weight: sc.Variable
51
+ distance: sc.Variable
52
+
53
+
54
+ NumberOfSimulatedNeutrons = NewType("NumberOfSimulatedNeutrons", int)
55
+ """
56
+ Number of neutrons simulated in the simulation that is used to create the lookup table.
57
+ This is typically a large number, e.g., 1e6 or 1e7.
58
+ """
59
+
60
+ LtotalRange = NewType("LtotalRange", tuple[sc.Variable, sc.Variable])
61
+ """
62
+ Range (min, max) of the total length of the flight path from the source to the detector.
63
+ This is used to create the lookup table to compute the neutron time-of-flight.
64
+ Note that the resulting table will extend slightly beyond this range, as the supplied
65
+ range is not necessarily a multiple of the distance resolution.
66
+
67
+ Note also that the range of total flight paths is supplied manually to the workflow
68
+ instead of being read from the input data, as it allows us to compute the expensive part
69
+ of the workflow in advance (the lookup table) and does not need to be repeated for each
70
+ run, or for new data coming in in the case of live data collection.
71
+ """
72
+
73
+ DistanceResolution = NewType("DistanceResolution", sc.Variable)
74
+ """
75
+ Step size of the distance axis in the lookup table.
76
+ Should be a single scalar value with a unit of length.
77
+ This is typically of the order of 1-10 cm.
78
+ """
79
+
80
+ TimeResolution = NewType("TimeResolution", sc.Variable)
81
+ """
82
+ Step size of the event_time_offset axis in the lookup table.
83
+ This is basically the 'time-of-flight' resolution of the detector.
84
+ Should be a single scalar value with a unit of time.
85
+ This is typically of the order of 0.1-0.5 ms.
86
+
87
+ Since the event_time_offset range needs to span exactly one pulse period, the final
88
+ resolution in the lookup table will be at least the supplied value here, but may be
89
+ smaller if the pulse period is not an integer multiple of the time resolution.
90
+ """
91
+
92
+
93
+ LookupTableRelativeErrorThreshold = NewType("LookupTableRelativeErrorThreshold", float)
94
+ """
95
+ Threshold for the relative standard deviation (coefficient of variation) of the
96
+ projected time-of-flight above which values are masked.
97
+ """
98
+
99
+ PulsePeriod = NewType("PulsePeriod", sc.Variable)
100
+ """
101
+ Period of the source pulses, i.e., time between consecutive pulse starts.
102
+ """
103
+
104
+ PulseStride = NewType("PulseStride", int)
105
+ """
106
+ Stride of used pulses. Usually 1, but may be a small integer when pulse-skipping.
107
+ """
108
+
109
+ SourcePosition = NewType("SourcePosition", sc.Variable)
110
+ """
111
+ Position of the neutron source in the coordinate system of the choppers.
112
+ """
113
+
114
+ SimulationSeed = NewType("SimulationSeed", int | None)
115
+ """Seed for the random number generator used in the simulation.
116
+ """
117
+
118
+ SimulationFacility = NewType("SimulationFacility", str)
119
+ """
120
+ Facility where the experiment is performed, e.g., 'ess'.
121
+ """
122
+
123
+
124
+ def _mask_large_uncertainty(table: sc.DataArray, error_threshold: float):
125
+ """
126
+ Mask regions with large uncertainty with NaNs.
127
+ The values are modified in place in the input table.
128
+
129
+ Parameters
130
+ ----------
131
+ table:
132
+ Lookup table with time-of-flight as a function of distance and time-of-arrival.
133
+ error_threshold:
134
+ Threshold for the relative standard deviation (coefficient of variation) of the
135
+ projected time-of-flight above which values are masked.
136
+ """
137
+ # Finally, mask regions with large uncertainty with NaNs.
138
+ relative_error = sc.stddevs(table.data) / sc.values(table.data)
139
+ mask = relative_error > sc.scalar(error_threshold)
140
+ # Use numpy for indexing as table is 2D
141
+ table.values[mask.values] = np.nan
142
+
143
+
144
+ def _compute_mean_tof_in_distance_range(
145
+ simulation: SimulationResults,
146
+ distance_bins: sc.Variable,
147
+ time_bins: sc.Variable,
148
+ distance_unit: str,
149
+ time_unit: str,
150
+ frame_period: sc.Variable,
151
+ time_bins_half_width: sc.Variable,
152
+ ) -> sc.DataArray:
153
+ """
154
+ Compute the mean time-of-flight inside event_time_offset bins for a given range of
155
+ distances.
156
+
157
+ Parameters
158
+ ----------
159
+ simulation:
160
+ Results of a time-of-flight simulation used to create a lookup table.
161
+ distance_bins:
162
+ Bin edges for the distance axis in the lookup table.
163
+ time_bins:
164
+ Bin edges for the event_time_offset axis in the lookup table.
165
+ distance_unit:
166
+ Unit of the distance axis.
167
+ time_unit:
168
+ Unit of the event_time_offset axis.
169
+ frame_period:
170
+ Period of the source pulses, i.e., time between consecutive pulse starts.
171
+ time_bins_half_width:
172
+ Half width of the time bins in the event_time_offset axis.
173
+ """
174
+ simulation_distance = simulation.distance.to(unit=distance_unit)
175
+ distances = sc.midpoints(distance_bins)
176
+ # Compute arrival and flight times for all neutrons
177
+ toas = simulation.time_of_arrival + (distances / simulation.speed).to(
178
+ unit=time_unit, copy=False
179
+ )
180
+ dist = distances + simulation_distance
181
+ tofs = dist * (sc.constants.m_n / sc.constants.h) * simulation.wavelength
182
+
183
+ data = sc.DataArray(
184
+ data=sc.broadcast(simulation.weight, sizes=toas.sizes),
185
+ coords={
186
+ "toa": toas,
187
+ "tof": tofs.to(unit=time_unit, copy=False),
188
+ "distance": dist,
189
+ },
190
+ ).flatten(to="event")
191
+
192
+ # Add the event_time_offset coordinate, wrapped to the frame_period
193
+ data.coords['event_time_offset'] = data.coords['toa'] % frame_period
194
+
195
+ # Because we staggered the mesh by half a bin width, we want the values above
196
+ # the last bin edge to wrap around to the first bin.
197
+ # Technically, those values should end up between -0.5*bin_width and 0, but
198
+ # a simple modulo also works here because even if they end up between 0 and
199
+ # 0.5*bin_width, we are (below) computing the mean between -0.5*bin_width and
200
+ # 0.5*bin_width and it yields the same result.
201
+ # data.coords['event_time_offset'] %= pulse_period - time_bins_half_width
202
+ data.coords['event_time_offset'] %= frame_period - time_bins_half_width
203
+
204
+ binned = data.bin(
205
+ distance=distance_bins + simulation_distance, event_time_offset=time_bins
206
+ )
207
+
208
+ # Weighted mean of tof inside each bin
209
+ mean_tof = (
210
+ binned.bins.data * binned.bins.coords["tof"]
211
+ ).bins.sum() / binned.bins.sum()
212
+ # Compute the variance of the tofs to track regions with large uncertainty
213
+ variance = (
214
+ binned.bins.data * (binned.bins.coords["tof"] - mean_tof) ** 2
215
+ ).bins.sum() / binned.bins.sum()
216
+
217
+ mean_tof.variances = variance.values
218
+ return mean_tof
219
+
220
+
221
+ def make_tof_lookup_table(
222
+ simulation: SimulationResults,
223
+ ltotal_range: LtotalRange,
224
+ distance_resolution: DistanceResolution,
225
+ time_resolution: TimeResolution,
226
+ pulse_period: PulsePeriod,
227
+ pulse_stride: PulseStride,
228
+ error_threshold: LookupTableRelativeErrorThreshold,
229
+ ) -> TimeOfFlightLookupTable:
230
+ """
231
+ Compute a lookup table for time-of-flight as a function of distance and
232
+ time-of-arrival.
233
+
234
+ Parameters
235
+ ----------
236
+ simulation:
237
+ Results of a time-of-flight simulation used to create a lookup table.
238
+ The results should be a flat table with columns for time-of-arrival, speed,
239
+ wavelength, and weight.
240
+ ltotal_range:
241
+ Range of total flight path lengths from the source to the detector.
242
+ distance_resolution:
243
+ Resolution of the distance axis in the lookup table.
244
+ time_resolution:
245
+ Resolution of the time-of-arrival axis in the lookup table. Must be an integer.
246
+ pulse_period:
247
+ Period of the source pulses, i.e., time between consecutive pulse starts.
248
+ pulse_stride:
249
+ Stride of used pulses. Usually 1, but may be a small integer when
250
+ pulse-skipping.
251
+ error_threshold:
252
+ Threshold for the relative standard deviation (coefficient of variation) of the
253
+ projected time-of-flight above which values are masked.
254
+
255
+ Notes
256
+ -----
257
+
258
+ Below are some details about the binning and wrapping around frame period in the
259
+ time dimension.
260
+
261
+ We have some simulated ``toa`` (events) from a Tof/McStas simulation.
262
+ Those are absolute ``toa``, unwrapped.
263
+ First we compute the usual ``event_time_offset = toa % frame_period``.
264
+
265
+ Now, we want to ensure periodic boundaries. If we make a bin centered around 0,
266
+ and a bin centered around 71ms: the first bin will use events between 0 and
267
+ ``0.5 * dt`` (where ``dt`` is the bin width).
268
+ The last bin will use events between ``frame_period - 0.5*dt`` and
269
+ ``frame_period + 0.5 * dt``. So when we compute the mean inside those two bins,
270
+ they will not yield the same results.
271
+ It is as if the first bin is missing the events it should have between
272
+ ``-0.5 * dt`` and 0 (because of the modulo we computed above).
273
+
274
+ To fix this, we do not make a last bin around 71ms (the bins stop at
275
+ ``frame_period - 0.5*dt``). Instead, we compute modulo a second time,
276
+ but this time using ``event_time_offset %= (frame_period - 0.5*dt)``.
277
+ (we cannot directly do ``event_time_offset = toa % (frame_period - 0.5*dt)`` in a
278
+ single step because it would introduce a gradual shift,
279
+ as the pulse number increases).
280
+
281
+ This second modulo effectively takes all the events that would have gone in the
282
+ last bin (between ``frame_period - 0.5*dt`` and ``frame_period``) and puts them in
283
+ the first bin. Instead of placing them between ``-0.5*dt`` and 0,
284
+ it places them between 0 and ``0.5*dt``, but this does not really matter,
285
+ because we then take the mean inside the first bin.
286
+ Whether the events are on the left or right side of zero does not matter.
287
+
288
+ Finally, we make a copy of the left edge, and append it to the right of the table,
289
+ thus ensuring that the values on the right edge are strictly the same as on the
290
+ left edge.
291
+ """
292
+ distance_unit = "m"
293
+ time_unit = simulation.time_of_arrival.unit
294
+ res = distance_resolution.to(unit=distance_unit)
295
+ pulse_period = pulse_period.to(unit=time_unit)
296
+ frame_period = pulse_period * pulse_stride
297
+
298
+ min_dist, max_dist = (
299
+ x.to(unit=distance_unit) - simulation.distance.to(unit=distance_unit)
300
+ for x in ltotal_range
301
+ )
302
+ # We need to bin the data below, to compute the weighted mean of the wavelength.
303
+ # This results in data with bin edges.
304
+ # However, the 2d interpolator expects bin centers.
305
+ # We want to give the 2d interpolator a table that covers the requested range,
306
+ # hence we need to extend the range by at least half a resolution in each direction.
307
+ # Then, we make the choice that the resolution in distance is the quantity that
308
+ # should be preserved. Because the difference between min and max distance is
309
+ # not necessarily an integer multiple of the resolution, we need to add a pad to
310
+ # ensure that the last bin is not cut off. We want the upper edge to be higher than
311
+ # the maximum distance, hence we pad with an additional 1.5 x resolution.
312
+ pad = 2.0 * res
313
+ distance_bins = sc.arange('distance', min_dist - pad, max_dist + pad, res)
314
+
315
+ # Create some time bins for event_time_offset.
316
+ # We want our final table to strictly cover the range [0, frame_period].
317
+ # However, binning the data associates mean values inside the bins to the bin
318
+ # centers. Instead, we stagger the mesh by half a bin width so we are computing
319
+ # values for the final mesh edges (the bilinear interpolation needs values on the
320
+ # edges/corners).
321
+ nbins = int(frame_period / time_resolution.to(unit=time_unit)) + 1
322
+ time_bins = sc.linspace(
323
+ 'event_time_offset', 0.0, frame_period.value, nbins + 1, unit=pulse_period.unit
324
+ )
325
+ time_bins_half_width = 0.5 * (time_bins[1] - time_bins[0])
326
+ time_bins -= time_bins_half_width
327
+
328
+ # To avoid a too large RAM usage, we compute the table in chunks, and piece them
329
+ # together at the end.
330
+ ndist = len(distance_bins) - 1
331
+ max_size = 2e7
332
+ total_size = ndist * len(simulation.time_of_arrival)
333
+ nchunks = total_size / max_size
334
+ chunk_size = int(ndist / nchunks) + 1
335
+ pieces = []
336
+ for i in range(int(nchunks) + 1):
337
+ dist_edges = distance_bins[i * chunk_size : (i + 1) * chunk_size + 1]
338
+
339
+ pieces.append(
340
+ _compute_mean_tof_in_distance_range(
341
+ simulation=simulation,
342
+ distance_bins=dist_edges,
343
+ time_bins=time_bins,
344
+ distance_unit=distance_unit,
345
+ time_unit=time_unit,
346
+ frame_period=frame_period,
347
+ time_bins_half_width=time_bins_half_width,
348
+ )
349
+ )
350
+
351
+ table = sc.concat(pieces, 'distance')
352
+ table.coords["distance"] = sc.midpoints(table.coords["distance"])
353
+ table.coords["event_time_offset"] = sc.midpoints(table.coords["event_time_offset"])
354
+
355
+ # Copy the left edge to the right to create periodic boundary conditions
356
+ table = sc.DataArray(
357
+ data=sc.concat(
358
+ [table.data, table.data['event_time_offset', 0]], dim='event_time_offset'
359
+ ),
360
+ coords={
361
+ "distance": table.coords["distance"],
362
+ "event_time_offset": sc.concat(
363
+ [table.coords["event_time_offset"], frame_period],
364
+ dim='event_time_offset',
365
+ ),
366
+ "pulse_period": pulse_period,
367
+ "pulse_stride": sc.scalar(pulse_stride, unit=None),
368
+ "distance_resolution": table.coords["distance"][1]
369
+ - table.coords["distance"][0],
370
+ "time_resolution": table.coords["event_time_offset"][1]
371
+ - table.coords["event_time_offset"][0],
372
+ "error_threshold": sc.scalar(error_threshold),
373
+ },
374
+ )
375
+
376
+ # In-place masking for better performance
377
+ _mask_large_uncertainty(table, error_threshold)
378
+
379
+ return TimeOfFlightLookupTable(table)
380
+
381
+
382
+ def simulate_chopper_cascade_using_tof(
383
+ choppers: DiskChoppers,
384
+ source_position: SourcePosition,
385
+ neutrons: NumberOfSimulatedNeutrons,
386
+ pulse_stride: PulseStride,
387
+ seed: SimulationSeed,
388
+ facility: SimulationFacility,
389
+ ) -> SimulationResults:
390
+ """
391
+ Simulate a pulse of neutrons propagating through a chopper cascade using the
392
+ ``tof`` package (https://tof.readthedocs.io).
393
+
394
+ Parameters
395
+ ----------
396
+ choppers:
397
+ A dict of DiskChopper objects representing the choppers in the beamline. See
398
+ https://scipp.github.io/scippneutron/user-guide/chopper/processing-nexus-choppers.html#Build-DiskChopper
399
+ for more information.
400
+ source_position:
401
+ A scalar variable with ``dtype=vector3`` that defines the source position.
402
+ Must be in the same coordinate system as the choppers' axle positions.
403
+ neutrons:
404
+ Number of neutrons to simulate.
405
+ pulse_stride:
406
+ The pulse strinde; we need to simulate at least enough pulses to cover the
407
+ requested stride.
408
+ seed:
409
+ Seed for the random number generator used in the simulation.
410
+ facility:
411
+ Facility where the experiment is performed.
412
+ """
413
+ import tof
414
+
415
+ tof_choppers = [
416
+ tof.Chopper(
417
+ frequency=abs(ch.frequency),
418
+ direction=tof.AntiClockwise
419
+ if (ch.frequency.value > 0.0)
420
+ else tof.Clockwise,
421
+ open=ch.slit_begin,
422
+ close=ch.slit_end,
423
+ phase=abs(ch.phase),
424
+ distance=sc.norm(
425
+ ch.axle_position - source_position.to(unit=ch.axle_position.unit)
426
+ ),
427
+ name=name,
428
+ )
429
+ for name, ch in choppers.items()
430
+ ]
431
+ source = tof.Source(
432
+ facility=facility, neutrons=neutrons, pulses=pulse_stride, seed=seed
433
+ )
434
+ if not tof_choppers:
435
+ events = source.data.squeeze().flatten(to='event')
436
+ return SimulationResults(
437
+ time_of_arrival=events.coords["birth_time"],
438
+ speed=events.coords["speed"],
439
+ wavelength=events.coords["wavelength"],
440
+ weight=events.data,
441
+ distance=0.0 * sc.units.m,
442
+ )
443
+ model = tof.Model(source=source, choppers=tof_choppers)
444
+ results = model.run()
445
+ # Find name of the furthest chopper in tof_choppers
446
+ furthest_chopper = max(tof_choppers, key=lambda c: c.distance)
447
+ events = results[furthest_chopper.name].data.squeeze().flatten(to='event')
448
+ events = events[
449
+ ~(events.masks["blocked_by_others"] | events.masks["blocked_by_me"])
450
+ ]
451
+ return SimulationResults(
452
+ time_of_arrival=events.coords["toa"],
453
+ speed=events.coords["speed"],
454
+ wavelength=events.coords["wavelength"],
455
+ weight=events.data,
456
+ distance=furthest_chopper.distance,
457
+ )
458
+
459
+
460
+ def TofLookupTableWorkflow():
461
+ """
462
+ Create a workflow for computing a time-of-flight lookup table from a
463
+ simulation of neutrons propagating through a chopper cascade.
464
+ """
465
+ wf = sl.Pipeline(
466
+ (make_tof_lookup_table, simulate_chopper_cascade_using_tof),
467
+ params={
468
+ PulsePeriod: 1.0 / sc.scalar(14.0, unit="Hz"),
469
+ PulseStride: 1,
470
+ DistanceResolution: sc.scalar(0.1, unit="m"),
471
+ TimeResolution: sc.scalar(250.0, unit='us'),
472
+ LookupTableRelativeErrorThreshold: 0.1,
473
+ NumberOfSimulatedNeutrons: 1_000_000,
474
+ SimulationSeed: None,
475
+ SimulationFacility: 'ess',
476
+ },
477
+ )
478
+ return wf
@@ -0,0 +1,97 @@
1
+ # SPDX-License-Identifier: BSD-3-Clause
2
+ # Copyright (c) 2025 Scipp contributors (https://github.com/scipp)
3
+
4
+
5
+ import numpy as np
6
+ import scipp as sc
7
+
8
+
9
+ def find_strictly_increasing_sections(var: sc.Variable) -> list[slice]:
10
+ """
11
+ Find strictly increasing sections in a coordinate dimension (minimum length 2).
12
+
13
+ Parameters
14
+ ----------
15
+ var:
16
+ The variable to analyze, which should be one-dimensional.
17
+
18
+ Returns
19
+ -------
20
+ sections:
21
+ Slice objects that can be used extract strictly increasing sections.
22
+ """
23
+ values = var.values
24
+ finite = np.isfinite(values)
25
+ increasing = (np.sign(np.diff(values)) > 0) & finite[:-1] & finite[1:]
26
+ # 1 marks the start of an increasing section, -1 marks the end
27
+ transitions = np.diff(np.concatenate(([False], increasing, [False])).astype(int))
28
+ section_starts = np.where(transitions == 1)[0]
29
+ section_ends = np.where(transitions == -1)[0] + np.array(1)
30
+ return [
31
+ slice(start, end)
32
+ for start, end in zip(section_starts, section_ends, strict=True)
33
+ if end - start >= 2 # Ensure section has at least 2 points
34
+ ]
35
+
36
+
37
+ def get_min_max(
38
+ var: sc.Variable, *, dim: str, slices: list[slice]
39
+ ) -> tuple[sc.Variable, sc.Variable]:
40
+ if not slices:
41
+ raise ValueError("No strictly increasing sections found.")
42
+ combined = sc.concat([var[dim, slice] for slice in slices], dim)
43
+ return combined.min(), combined.max()
44
+
45
+
46
+ def make_regular_grid(
47
+ var: sc.Variable, *, dim: str, slices: list[slice]
48
+ ) -> sc.Variable:
49
+ """
50
+ Create a regular grid variable based on the min and max of the slices.
51
+
52
+ The grid is constructed such that it includes the minimum and maximum values
53
+ of the strictly increasing sections, with a step size equal to the difference
54
+ between the first two values of the section with the minimum start value (which is
55
+ not necessarily the first section).
56
+ """
57
+ min_val, max_val = get_min_max(var, dim=dim, slices=slices)
58
+ first: sc.Variable | None = None
59
+ for s in slices:
60
+ first = var[dim, s]
61
+ if sc.identical(first[0], min_val):
62
+ break
63
+ if first is None:
64
+ # This should not happen if slices are correctly identified and passed from
65
+ # find_strictly_increasing_sections.
66
+ raise ValueError("Section is not strictly increasing.")
67
+ step = first[1] - first[0]
68
+ return sc.arange(
69
+ dim=dim,
70
+ start=min_val.value,
71
+ stop=max_val.value + step.value, # Ensure the last bin edge is included
72
+ step=step.value,
73
+ unit=step.unit,
74
+ dtype=step.dtype,
75
+ )
76
+
77
+
78
+ def rebin_strictly_increasing(da: sc.DataArray, dim: str) -> sc.DataArray:
79
+ """
80
+ Find strictly monotonic sections in a coordinate dimension and rebin the data array
81
+ into a regular grid based on these sections.
82
+ """
83
+ # Ensure the dimension is named like the coordinate.
84
+ da = da.rename_dims({da.coords[dim].dim: dim})
85
+ slices = find_strictly_increasing_sections(da.coords[dim])
86
+ if len(slices) == 1:
87
+ return da[dim, slices[0]]
88
+ if not slices:
89
+ raise ValueError("No strictly increasing sections found.")
90
+ if da.coords[dim].dtype not in (sc.DType.float64, sc.DType.float32):
91
+ # rebin does not like integer coords.
92
+ da = da.assign_coords({dim: da.coords[dim].to(dtype='float64')})
93
+ # Slices refer to the indices in the coord, which are bin edges. For slicing data
94
+ # we need to stop at the last index minus one.
95
+ sections = [da[dim, section.start : section.stop - 1] for section in slices]
96
+ edges = make_regular_grid(da.coords[dim], dim=dim, slices=slices)
97
+ return sc.reduce([sc.rebin(section, {dim: edges}) for section in sections]).sum()