reboost 0.8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- reboost/__init__.py +14 -0
- reboost/_version.py +34 -0
- reboost/build_evt.py +134 -0
- reboost/build_glm.py +305 -0
- reboost/build_hit.py +466 -0
- reboost/cli.py +194 -0
- reboost/core.py +526 -0
- reboost/daq/__init__.py +5 -0
- reboost/daq/core.py +262 -0
- reboost/daq/utils.py +28 -0
- reboost/hpge/__init__.py +0 -0
- reboost/hpge/psd.py +847 -0
- reboost/hpge/surface.py +284 -0
- reboost/hpge/utils.py +79 -0
- reboost/iterator.py +226 -0
- reboost/log_utils.py +29 -0
- reboost/math/__init__.py +0 -0
- reboost/math/functions.py +175 -0
- reboost/math/stats.py +119 -0
- reboost/optmap/__init__.py +5 -0
- reboost/optmap/cli.py +246 -0
- reboost/optmap/convolve.py +325 -0
- reboost/optmap/create.py +423 -0
- reboost/optmap/evt.py +141 -0
- reboost/optmap/mapview.py +208 -0
- reboost/optmap/numba_pdg.py +26 -0
- reboost/optmap/optmap.py +328 -0
- reboost/profile.py +82 -0
- reboost/shape/__init__.py +0 -0
- reboost/shape/cluster.py +260 -0
- reboost/shape/group.py +189 -0
- reboost/shape/reduction.py +0 -0
- reboost/spms/__init__.py +5 -0
- reboost/spms/pe.py +178 -0
- reboost/units.py +107 -0
- reboost/utils.py +503 -0
- reboost-0.8.3.dist-info/METADATA +82 -0
- reboost-0.8.3.dist-info/RECORD +42 -0
- reboost-0.8.3.dist-info/WHEEL +5 -0
- reboost-0.8.3.dist-info/entry_points.txt +3 -0
- reboost-0.8.3.dist-info/licenses/LICENSE +674 -0
- reboost-0.8.3.dist-info/top_level.txt +1 -0
reboost/daq/core.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import awkward as ak
|
|
4
|
+
import numba
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
from .utils import print_random_crash_msg
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def run_daq_non_sparse(
|
|
11
|
+
evt: ak.Array,
|
|
12
|
+
n_sim_events: int,
|
|
13
|
+
source_activity: float,
|
|
14
|
+
*,
|
|
15
|
+
tau_preamp: float = 500,
|
|
16
|
+
noise_threshold: float = 5,
|
|
17
|
+
baseline_slope_threshold: float = 0.01,
|
|
18
|
+
trigger_threshold: float = 25,
|
|
19
|
+
waveform_length: float = 100,
|
|
20
|
+
trigger_position: float = 50,
|
|
21
|
+
):
|
|
22
|
+
r"""Run the DAQ in non-sparse mode.
|
|
23
|
+
|
|
24
|
+
Pipe simulated HPGe events through the DAQ system in non-sparse mode.
|
|
25
|
+
Return a table where each row represents an event that was actually
|
|
26
|
+
recorded by the DAQ. for each event and each channel, determine the
|
|
27
|
+
characteristics of the waveform.
|
|
28
|
+
|
|
29
|
+
Warning
|
|
30
|
+
-------
|
|
31
|
+
This code assumes that the simulated events are time-independent.
|
|
32
|
+
|
|
33
|
+
The returned Awkward array (the table) has the following fields:
|
|
34
|
+
|
|
35
|
+
- ``evtid`` (int): event ID in the simulation.
|
|
36
|
+
- ``timestamp`` (float): timestamp of the event.
|
|
37
|
+
- ``has_trigger`` (array of bools): this waveform triggered the DAQ.
|
|
38
|
+
- ``has_pre_pulse`` (array of bools): the waveform has a signal below
|
|
39
|
+
trigger_threshold in the first part of the waveform, before the trigger
|
|
40
|
+
position.
|
|
41
|
+
- ``has_post_pulse`` (array of bools): the waveform has a signal in the
|
|
42
|
+
second part of the waveform, after the trigger position.
|
|
43
|
+
- ``has_slope`` (array of bools): waveform has decaying tail of earlier
|
|
44
|
+
signals, that came before this waveform.
|
|
45
|
+
|
|
46
|
+
The table sits in a tuple, together with a list of the channel
|
|
47
|
+
identifiers, with the same order as in the data array.
|
|
48
|
+
|
|
49
|
+
Parameters
|
|
50
|
+
----------
|
|
51
|
+
evt
|
|
52
|
+
simulated events.
|
|
53
|
+
source_activity
|
|
54
|
+
source activity in Bq.
|
|
55
|
+
n_sim_events
|
|
56
|
+
total number of simulated events.
|
|
57
|
+
tau_preamp
|
|
58
|
+
pre-amplification RC constant in microseconds. the signal model is an
|
|
59
|
+
exponential:
|
|
60
|
+
|
|
61
|
+
.. math::
|
|
62
|
+
|
|
63
|
+
f(t) = E_i * e^{((t - t_i) / \tau)}
|
|
64
|
+
|
|
65
|
+
where :math:`E_i` is the energy of the signal and :math:`t_i` is the
|
|
66
|
+
time it occurred.
|
|
67
|
+
noise_threshold
|
|
68
|
+
threshold (in keV) for a signal to be "visible" above noise. In
|
|
69
|
+
LEGEND-200, the "energy" of forced trigger events is
|
|
70
|
+
gauss-distributed around 0.5 keV with a standard deviation of about
|
|
71
|
+
0.5 keV.
|
|
72
|
+
baseline_slope_threshold
|
|
73
|
+
threshold (in keV/us) on the baseline slope to be tagged as not flat.
|
|
74
|
+
in LEGEND-200, the slope of waveforms in force-triggered events is
|
|
75
|
+
gauss-distributed around 0 with a standard deviation of about 2 keV/ms.
|
|
76
|
+
trigger_threshold
|
|
77
|
+
amplitude (in keV) needed for the DAQ to trigger on a signal.
|
|
78
|
+
waveform_length
|
|
79
|
+
length of the waveform in microseconds stored on disk.
|
|
80
|
+
trigger_position
|
|
81
|
+
location (offset) in microseconds of the triggered signal in the
|
|
82
|
+
waveform.
|
|
83
|
+
"""
|
|
84
|
+
# random engine
|
|
85
|
+
rng = np.random.default_rng()
|
|
86
|
+
|
|
87
|
+
# simulate ORCA
|
|
88
|
+
print_random_crash_msg(rng)
|
|
89
|
+
|
|
90
|
+
# add time of each simulated event, in microseconds, according to the expected event rate
|
|
91
|
+
detected_rate = source_activity * len(evt) / n_sim_events
|
|
92
|
+
evt["t0"] = np.cumsum(rng.exponential(scale=1e6 / detected_rate, size=len(evt)))
|
|
93
|
+
|
|
94
|
+
# get rawids of detectors present in the simulation
|
|
95
|
+
channel_ids = np.sort(np.unique(ak.flatten(evt.geds_rawid_active))).to_list()
|
|
96
|
+
|
|
97
|
+
daq_records = _run_daq_non_sparse_impl(
|
|
98
|
+
evt,
|
|
99
|
+
channel_ids,
|
|
100
|
+
tau_preamp,
|
|
101
|
+
noise_threshold,
|
|
102
|
+
baseline_slope_threshold,
|
|
103
|
+
trigger_threshold,
|
|
104
|
+
waveform_length,
|
|
105
|
+
trigger_position,
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
fields = ["evtid", "timestamp", "has_trigger", "has_pre_pulse", "has_post_pulse", "has_slope"]
|
|
109
|
+
|
|
110
|
+
daq_data = ak.Array(dict(zip(fields, daq_records, strict=False)))
|
|
111
|
+
|
|
112
|
+
return daq_data, channel_ids
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
@numba.njit(cache=True)
|
|
116
|
+
def _run_daq_non_sparse_impl(
|
|
117
|
+
evt: ak.Array,
|
|
118
|
+
chids: list,
|
|
119
|
+
tau_preamp: float,
|
|
120
|
+
noise_threshold: float,
|
|
121
|
+
baseline_slope_threshold: float,
|
|
122
|
+
trigger_threshold: float,
|
|
123
|
+
waveform_length: float,
|
|
124
|
+
trigger_position: float,
|
|
125
|
+
):
|
|
126
|
+
"""Numba-accelerated implementation of :func:`run_daq_non_sparse`."""
|
|
127
|
+
o_evtid = np.full(len(evt), dtype=np.int64, fill_value=-1)
|
|
128
|
+
o_timestamp = np.full(len(evt), dtype=np.float64, fill_value=-1)
|
|
129
|
+
|
|
130
|
+
def _init_data(dtype=np.bool_):
|
|
131
|
+
return np.zeros((len(evt), len(chids)), dtype=dtype)
|
|
132
|
+
|
|
133
|
+
o_has_trigger = _init_data()
|
|
134
|
+
o_has_pre_pulse = _init_data()
|
|
135
|
+
o_has_post_pulse = _init_data()
|
|
136
|
+
o_has_slope = _init_data()
|
|
137
|
+
|
|
138
|
+
# this is the index of the current daq record
|
|
139
|
+
r_idx = -1
|
|
140
|
+
|
|
141
|
+
# list of event indices (fifo) to keep track of past events that have still
|
|
142
|
+
# an effect on the baseline
|
|
143
|
+
evt_idx_buffer = []
|
|
144
|
+
|
|
145
|
+
# TODO: this will need to be updated
|
|
146
|
+
# {
|
|
147
|
+
# evtid: 5.37e+03,
|
|
148
|
+
# geds_energy_active: [756, 152], in keV, "geds" means "HPGe detectors"
|
|
149
|
+
# geds_multiplicity_active: 2,
|
|
150
|
+
# geds_rawid_active: [1110400, 1112000],
|
|
151
|
+
# geds_t0_active: [1.53e+12, 1.53e+12]
|
|
152
|
+
# }
|
|
153
|
+
|
|
154
|
+
# loop over simulated events
|
|
155
|
+
for s_idx, ev in enumerate(evt):
|
|
156
|
+
# loop over the event_buffer and remove events that occurred more
|
|
157
|
+
# than 10 times the tau_preamp ago
|
|
158
|
+
cutoff = ev.t0 - 10 * tau_preamp
|
|
159
|
+
while evt_idx_buffer:
|
|
160
|
+
first_idx = evt_idx_buffer[0]
|
|
161
|
+
if evt[first_idx].t0 < cutoff:
|
|
162
|
+
evt_idx_buffer.pop(0)
|
|
163
|
+
else:
|
|
164
|
+
break
|
|
165
|
+
|
|
166
|
+
# add current event to the buffer for later baseline analysis
|
|
167
|
+
evt_idx_buffer.append(s_idx)
|
|
168
|
+
|
|
169
|
+
# don't do any of this if there is no last record yet
|
|
170
|
+
if r_idx != -1:
|
|
171
|
+
# check if the last trigger was less than (waveform_length - trigger_position)
|
|
172
|
+
# ago. if yes, this is not a trigger but there is a hard post-pile-up
|
|
173
|
+
# on the previous trigger. so we need to check for each channel if the
|
|
174
|
+
# energy deposited is above the noise_threshold. if yes, get the last
|
|
175
|
+
# trigger and set the hard_post_pileup flag of that channel to true.
|
|
176
|
+
# then continue to the next event.
|
|
177
|
+
dt = ev.t0 - o_timestamp[r_idx]
|
|
178
|
+
if dt < (waveform_length - trigger_position):
|
|
179
|
+
for rawid, ene in zip(ev.geds_rawid_active, ev.geds_energy_active): # noqa: B905
|
|
180
|
+
if ene >= noise_threshold:
|
|
181
|
+
o_has_post_pulse[r_idx, chids.index(rawid)] = True
|
|
182
|
+
continue
|
|
183
|
+
|
|
184
|
+
# check if the last trigger was less than waveform_length but more than
|
|
185
|
+
# (waveform_length - trigger_position) ago. if yes, this event is not
|
|
186
|
+
# recorded by the daq (dead time), so do nothing and just continue to the
|
|
187
|
+
# next event.
|
|
188
|
+
if dt > (waveform_length - trigger_position) and dt < waveform_length:
|
|
189
|
+
continue
|
|
190
|
+
|
|
191
|
+
# if we are here it means that we can actively look for new triggers.
|
|
192
|
+
# check if we have a trigger by checking energy in each detector against
|
|
193
|
+
# the trigger_threshold. if not, continue to the next event
|
|
194
|
+
triggered_rawids = []
|
|
195
|
+
for rawid, ene in zip(ev.geds_rawid_active, ev.geds_energy_active): # noqa: B905
|
|
196
|
+
if ene >= trigger_threshold:
|
|
197
|
+
triggered_rawids.append(int(rawid))
|
|
198
|
+
|
|
199
|
+
if not triggered_rawids:
|
|
200
|
+
continue
|
|
201
|
+
|
|
202
|
+
# if we are here, it means we found a trigger. first, we save the last
|
|
203
|
+
# daq record to disk and we initialize a new one
|
|
204
|
+
r_idx += 1
|
|
205
|
+
|
|
206
|
+
o_evtid[r_idx] = ev.evtid
|
|
207
|
+
o_timestamp[r_idx] = ev.t0
|
|
208
|
+
|
|
209
|
+
# then, let's log which channels triggered the daq in the daq_record
|
|
210
|
+
for rawid in triggered_rawids:
|
|
211
|
+
o_has_trigger[r_idx, chids.index(rawid)] = True
|
|
212
|
+
|
|
213
|
+
# time of the start of the waveform
|
|
214
|
+
t0_start = ev.t0 - trigger_position
|
|
215
|
+
# now we need to peek into the event_buffer to check if the baseline is
|
|
216
|
+
# affected by the tails of previous events (soft pile-up) or includes a
|
|
217
|
+
# small in-trace signal (pre-hard-pileup). to take a decision, we use the
|
|
218
|
+
# baseline slope threshold
|
|
219
|
+
for rawid in chids:
|
|
220
|
+
abs_baseline_slope = 0
|
|
221
|
+
for j in evt_idx_buffer:
|
|
222
|
+
_ev = evt[j]
|
|
223
|
+
|
|
224
|
+
# for each event in the buffer get timestamp and energy
|
|
225
|
+
tj = _ev.t0
|
|
226
|
+
ej = 0
|
|
227
|
+
for k, e in zip(_ev.geds_rawid_active, _ev.geds_energy_active): # noqa: B905
|
|
228
|
+
if k == rawid:
|
|
229
|
+
ej = e
|
|
230
|
+
break
|
|
231
|
+
|
|
232
|
+
# if the event occurred before the current waveform window, we
|
|
233
|
+
# account for its tail in the baseline of the current waveform
|
|
234
|
+
# the baseline slope is calculated at the start of the waveform
|
|
235
|
+
if tj <= t0_start:
|
|
236
|
+
abs_baseline_slope += ej / tau_preamp * np.exp(-(t0_start - tj) / tau_preamp)
|
|
237
|
+
|
|
238
|
+
# if there was any energy in a channel that occurred less than
|
|
239
|
+
# (timestamp - trigger_position) ago, this channel has a hard
|
|
240
|
+
# pre-pile-up in the current waveform
|
|
241
|
+
elif tj < ev.t0 and ej >= noise_threshold:
|
|
242
|
+
o_has_pre_pulse[r_idx, chids.index(rawid)] = True
|
|
243
|
+
|
|
244
|
+
# now we have computed the baseline and we can check against the the
|
|
245
|
+
# noise threshold if it's significantly non-flat
|
|
246
|
+
if abs_baseline_slope >= baseline_slope_threshold:
|
|
247
|
+
o_has_slope[r_idx, chids.index(rawid)] = True
|
|
248
|
+
|
|
249
|
+
# the timestamp should refer to the start of the waveform, like in our DAQ
|
|
250
|
+
o_timestamp -= trigger_position
|
|
251
|
+
|
|
252
|
+
# the last event was recorded too
|
|
253
|
+
r_idx += 1
|
|
254
|
+
|
|
255
|
+
return (
|
|
256
|
+
o_evtid[:r_idx],
|
|
257
|
+
o_timestamp[:r_idx],
|
|
258
|
+
o_has_trigger[:r_idx],
|
|
259
|
+
o_has_pre_pulse[:r_idx],
|
|
260
|
+
o_has_post_pulse[:r_idx],
|
|
261
|
+
o_has_slope[:r_idx],
|
|
262
|
+
)
|
reboost/daq/utils.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def print_random_crash_msg(rng):
|
|
5
|
+
msgs = [
|
|
6
|
+
"Segmentation fault (core dumped)",
|
|
7
|
+
"zsh: segmentation fault ./orca.out",
|
|
8
|
+
"Segmentation fault: 11",
|
|
9
|
+
"Bus error (core dumped)",
|
|
10
|
+
"Bus error: 10",
|
|
11
|
+
"*** stack smashing detected ***: terminated",
|
|
12
|
+
"free(): double free detected in tcache 2",
|
|
13
|
+
"free(): invalid pointer",
|
|
14
|
+
"malloc(): corrupted top size",
|
|
15
|
+
"malloc(): memory corruption",
|
|
16
|
+
"malloc(): unaligned tcache chunk detected",
|
|
17
|
+
"Killed",
|
|
18
|
+
"Out of memory: Killed process 4321 (orca.out)",
|
|
19
|
+
"Illegal instruction (core dumped)",
|
|
20
|
+
"Illegal instruction: 4",
|
|
21
|
+
"general protection fault: 0000 [#1] SMP",
|
|
22
|
+
"fish: Job 1, './orca.out' terminated by signal SIGSEGV (Address boundary error)",
|
|
23
|
+
"==24567==ERROR: AddressSanitizer: heap-use-after-free on address 0x602000000010",
|
|
24
|
+
"Abort trap: 6",
|
|
25
|
+
"Trace/BPT trap: 5",
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
print(msgs[rng.choice(len(msgs))]) # noqa: T201
|
reboost/hpge/__init__.py
ADDED
|
File without changes
|