reboost 0.6.2__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- reboost/_version.py +16 -3
- reboost/build_hit.py +102 -58
- reboost/cli.py +1 -0
- reboost/core.py +18 -9
- reboost/daq/__init__.py +5 -0
- reboost/daq/core.py +262 -0
- reboost/daq/utils.py +28 -0
- reboost/hpge/psd.py +444 -94
- reboost/hpge/surface.py +34 -1
- reboost/hpge/utils.py +2 -1
- reboost/iterator.py +4 -1
- reboost/math/stats.py +2 -2
- reboost/optmap/cli.py +40 -101
- reboost/optmap/convolve.py +206 -233
- reboost/optmap/create.py +41 -124
- reboost/optmap/evt.py +5 -2
- reboost/optmap/mapview.py +9 -7
- reboost/optmap/optmap.py +13 -14
- reboost/shape/cluster.py +4 -4
- reboost/spms/__init__.py +5 -0
- reboost/spms/pe.py +178 -0
- reboost/units.py +40 -8
- reboost/utils.py +65 -3
- {reboost-0.6.2.dist-info → reboost-0.8.0.dist-info}/METADATA +7 -5
- reboost-0.8.0.dist-info/RECORD +42 -0
- reboost-0.6.2.dist-info/RECORD +0 -37
- {reboost-0.6.2.dist-info → reboost-0.8.0.dist-info}/WHEEL +0 -0
- {reboost-0.6.2.dist-info → reboost-0.8.0.dist-info}/entry_points.txt +0 -0
- {reboost-0.6.2.dist-info → reboost-0.8.0.dist-info}/licenses/LICENSE +0 -0
- {reboost-0.6.2.dist-info → reboost-0.8.0.dist-info}/top_level.txt +0 -0
reboost/_version.py
CHANGED
|
@@ -1,7 +1,14 @@
|
|
|
1
1
|
# file generated by setuptools-scm
|
|
2
2
|
# don't change, don't track in version control
|
|
3
3
|
|
|
4
|
-
__all__ = [
|
|
4
|
+
__all__ = [
|
|
5
|
+
"__version__",
|
|
6
|
+
"__version_tuple__",
|
|
7
|
+
"version",
|
|
8
|
+
"version_tuple",
|
|
9
|
+
"__commit_id__",
|
|
10
|
+
"commit_id",
|
|
11
|
+
]
|
|
5
12
|
|
|
6
13
|
TYPE_CHECKING = False
|
|
7
14
|
if TYPE_CHECKING:
|
|
@@ -9,13 +16,19 @@ if TYPE_CHECKING:
|
|
|
9
16
|
from typing import Union
|
|
10
17
|
|
|
11
18
|
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
|
19
|
+
COMMIT_ID = Union[str, None]
|
|
12
20
|
else:
|
|
13
21
|
VERSION_TUPLE = object
|
|
22
|
+
COMMIT_ID = object
|
|
14
23
|
|
|
15
24
|
version: str
|
|
16
25
|
__version__: str
|
|
17
26
|
__version_tuple__: VERSION_TUPLE
|
|
18
27
|
version_tuple: VERSION_TUPLE
|
|
28
|
+
commit_id: COMMIT_ID
|
|
29
|
+
__commit_id__: COMMIT_ID
|
|
19
30
|
|
|
20
|
-
__version__ = version = '0.
|
|
21
|
-
__version_tuple__ = version_tuple = (0,
|
|
31
|
+
__version__ = version = '0.8.0'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 8, 0)
|
|
33
|
+
|
|
34
|
+
__commit_id__ = commit_id = None
|
reboost/build_hit.py
CHANGED
|
@@ -7,11 +7,16 @@ A :func:`build_hit` to parse the following configuration file:
|
|
|
7
7
|
# dictionary of objects useful for later computation. they are constructed with
|
|
8
8
|
# auxiliary data (e.g. metadata). They can be accessed later as OBJECTS (all caps)
|
|
9
9
|
objects:
|
|
10
|
-
lmeta: LegendMetadata(ARGS.legendmetadata)
|
|
10
|
+
lmeta: legendmeta.LegendMetadata(ARGS.legendmetadata)
|
|
11
11
|
geometry: pyg4ometry.load(ARGS.gdml)
|
|
12
12
|
user_pars: dbetto.TextDB(ARGS.par)
|
|
13
13
|
dataprod_pars: dbetto.TextDB(ARGS.dataprod_cycle)
|
|
14
14
|
|
|
15
|
+
_spms: OBJECTS.lmeta.channelmap(on=ARGS.timestamp)
|
|
16
|
+
.group("system").spms
|
|
17
|
+
.map("name")
|
|
18
|
+
spms: "{name: spm.daq.rawid for name, spm in OBJECTS._spms.items()}"
|
|
19
|
+
|
|
15
20
|
# processing chain is defined to act on a group of detectors
|
|
16
21
|
processing_groups:
|
|
17
22
|
|
|
@@ -107,9 +112,10 @@ A :func:`build_hit` to parse the following configuration file:
|
|
|
107
112
|
outputs:
|
|
108
113
|
- evtid
|
|
109
114
|
- tot_edep_wlsr
|
|
115
|
+
- num_scint_ph_lar
|
|
110
116
|
|
|
111
117
|
operations:
|
|
112
|
-
tot_edep_wlsr: ak.sum(HITS[(HITS.
|
|
118
|
+
tot_edep_wlsr: ak.sum(HITS.edep[np.abs(HITS.zloc) < 3000], axis=-1)
|
|
113
119
|
|
|
114
120
|
- name: spms
|
|
115
121
|
|
|
@@ -117,11 +123,14 @@ A :func:`build_hit` to parse the following configuration file:
|
|
|
117
123
|
# same name as the current detector. This can be overridden for special processors
|
|
118
124
|
|
|
119
125
|
detector_mapping:
|
|
120
|
-
- output: OBJECTS.
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
126
|
+
- output: OBJECTS.spms.keys()
|
|
127
|
+
input: lar
|
|
128
|
+
|
|
129
|
+
hit_table_layout: reboost.shape.group_by_time(STEPS, window=10)
|
|
130
|
+
|
|
131
|
+
pre_operations:
|
|
132
|
+
num_scint_ph_lar: reboost.spms.emitted_scintillation_photons(HITS.edep, HITS.particle, "lar")
|
|
133
|
+
# num_scint_ph_pen: ...
|
|
125
134
|
|
|
126
135
|
outputs:
|
|
127
136
|
- t0
|
|
@@ -130,22 +139,23 @@ A :func:`build_hit` to parse the following configuration file:
|
|
|
130
139
|
|
|
131
140
|
detector_objects:
|
|
132
141
|
meta: pygeomtools.get_sensvol_metadata(OBJECTS.geometry, DETECTOR)
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
hit_table_layout: reboost.shape.group_by_time(STEPS, window=10)
|
|
142
|
+
spm_uid: OBJECTS.spms[DETECTOR]
|
|
143
|
+
optmap_lar: reboost.spms.load_optmap(ARGS.optmap_path_pen, DETECTOR_OBJECTS.spm_uid)
|
|
144
|
+
optmap_pen: reboost.spms.load_optmap(ARGS.optmap_path_lar, DETECTOR_OBJECTS.spm_uid)
|
|
137
145
|
|
|
138
146
|
operations:
|
|
139
147
|
pe_times_lar: reboost.spms.detected_photoelectrons(
|
|
140
|
-
|
|
148
|
+
HITS.num_scint_ph_lar, HITS.particle, HITS.time, HITS.xloc, HITS.yloc, HITS.zloc,
|
|
141
149
|
DETECTOR_OBJECTS.optmap_lar,
|
|
142
|
-
|
|
150
|
+
"lar",
|
|
151
|
+
DETECTOR_OBJECTS.spm_uid
|
|
143
152
|
)
|
|
144
153
|
|
|
145
154
|
pe_times_pen: reboost.spms.detected_photoelectrons(
|
|
146
|
-
|
|
155
|
+
HITS.num_scint_ph_pen, HITS.particle, HITS.time, HITS.xloc, HITS.yloc, HITS.zloc,
|
|
147
156
|
DETECTOR_OBJECTS.optmap_pen,
|
|
148
|
-
|
|
157
|
+
"pen",
|
|
158
|
+
DETECTOR_OBJECTS.spm_uid
|
|
149
159
|
)
|
|
150
160
|
|
|
151
161
|
pe_times: ak.concatenate([HITS.pe_times_lar, HITS.pe_times_pen], axis=-1)
|
|
@@ -189,6 +199,7 @@ def build_hit(
|
|
|
189
199
|
out_field: str = "hit",
|
|
190
200
|
buffer: int = int(5e6),
|
|
191
201
|
overwrite: bool = False,
|
|
202
|
+
allow_missing_inputs=True,
|
|
192
203
|
) -> None | ak.Array:
|
|
193
204
|
"""Build the hit tier from the remage step files.
|
|
194
205
|
|
|
@@ -215,6 +226,8 @@ def build_hit(
|
|
|
215
226
|
buffer size for use in the `LH5Iterator`.
|
|
216
227
|
overwrite
|
|
217
228
|
flag to overwrite the existing output.
|
|
229
|
+
allow_missing_inputs
|
|
230
|
+
Flag to allow an input table to be missing, generally when there were no events.
|
|
218
231
|
"""
|
|
219
232
|
# extract the config file
|
|
220
233
|
if isinstance(config, str):
|
|
@@ -238,7 +251,7 @@ def build_hit(
|
|
|
238
251
|
output_tables_names = set()
|
|
239
252
|
|
|
240
253
|
# iterate over files
|
|
241
|
-
for file_idx, (stp_file, glm_file) in enumerate(zip(files.stp, files.glm)):
|
|
254
|
+
for file_idx, (stp_file, glm_file) in enumerate(zip(files.stp, files.glm, strict=True)):
|
|
242
255
|
msg = (
|
|
243
256
|
f"starting processing of {stp_file} to {files.hit[file_idx]} "
|
|
244
257
|
if files.hit[file_idx] is not None
|
|
@@ -257,7 +270,7 @@ def build_hit(
|
|
|
257
270
|
|
|
258
271
|
# extract the output detectors and the mapping to input detectors
|
|
259
272
|
detectors_mapping = core.get_detector_mapping(
|
|
260
|
-
proc_group.get("detector_mapping"), global_objects
|
|
273
|
+
proc_group.get("detector_mapping"), global_objects, args
|
|
261
274
|
)
|
|
262
275
|
|
|
263
276
|
# loop over detectors
|
|
@@ -276,20 +289,30 @@ def build_hit(
|
|
|
276
289
|
|
|
277
290
|
lh5_group = proc_group.get("lh5_group", "stp")
|
|
278
291
|
if lh5_group is None:
|
|
279
|
-
lh5_group = "
|
|
292
|
+
lh5_group = ""
|
|
293
|
+
table = in_detector
|
|
294
|
+
else:
|
|
295
|
+
table = f"{lh5_group}/{in_detector}"
|
|
280
296
|
|
|
281
297
|
# begin iterating over the glm
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
298
|
+
# check if the in_detector is in the file
|
|
299
|
+
if table in lh5.ls(stp_file, lh5_group + "/"):
|
|
300
|
+
iterator = GLMIterator(
|
|
301
|
+
glm_file,
|
|
302
|
+
stp_file,
|
|
303
|
+
lh5_group=in_detector,
|
|
304
|
+
start_row=start_evtid,
|
|
305
|
+
stp_field=lh5_group,
|
|
306
|
+
n_rows=n_evtid,
|
|
307
|
+
buffer=buffer,
|
|
308
|
+
time_dict=time_dict[proc_name],
|
|
309
|
+
reshaped_files="hit_table_layout" not in proc_group,
|
|
310
|
+
)
|
|
311
|
+
elif allow_missing_inputs:
|
|
312
|
+
continue
|
|
313
|
+
else:
|
|
314
|
+
msg = f"Requested input detector {in_detector} is not present in the group {lh5_group} and missing inputs were not allowed"
|
|
315
|
+
raise ValueError(msg)
|
|
293
316
|
|
|
294
317
|
for stps, chunk_idx, _ in iterator:
|
|
295
318
|
# converting to awkward
|
|
@@ -305,6 +328,21 @@ def build_hit(
|
|
|
305
328
|
if time_dict is not None:
|
|
306
329
|
time_dict[proc_name].update_field("conv", start_time)
|
|
307
330
|
|
|
331
|
+
if "hit_table_layout" in proc_group:
|
|
332
|
+
hit_table_layouted = core.evaluate_hit_table_layout(
|
|
333
|
+
copy.deepcopy(ak_obj),
|
|
334
|
+
expression=proc_group["hit_table_layout"],
|
|
335
|
+
time_dict=time_dict[proc_name],
|
|
336
|
+
)
|
|
337
|
+
else:
|
|
338
|
+
hit_table_layouted = copy.deepcopy(stps)
|
|
339
|
+
|
|
340
|
+
local_dict = {"OBJECTS": global_objects}
|
|
341
|
+
for field, info in proc_group.get("pre_operations", {}).items():
|
|
342
|
+
_evaluate_operation(
|
|
343
|
+
hit_table_layouted, field, info, local_dict, time_dict[proc_name]
|
|
344
|
+
)
|
|
345
|
+
|
|
308
346
|
# produce the hit table
|
|
309
347
|
for out_det_idx, out_detector in enumerate(out_detectors):
|
|
310
348
|
# loop over the rows
|
|
@@ -314,14 +352,12 @@ def build_hit(
|
|
|
314
352
|
# get the attributes
|
|
315
353
|
attrs = utils.copy_units(stps)
|
|
316
354
|
|
|
317
|
-
if
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
else:
|
|
324
|
-
hit_table = copy.deepcopy(stps)
|
|
355
|
+
# if we have more than one output detector, make an independent copy.
|
|
356
|
+
hit_table = (
|
|
357
|
+
copy.deepcopy(hit_table_layouted)
|
|
358
|
+
if len(out_detectors) > 1
|
|
359
|
+
else hit_table_layouted
|
|
360
|
+
)
|
|
325
361
|
|
|
326
362
|
local_dict = {
|
|
327
363
|
"DETECTOR_OBJECTS": det_objects[out_detector],
|
|
@@ -330,28 +366,10 @@ def build_hit(
|
|
|
330
366
|
}
|
|
331
367
|
# add fields
|
|
332
368
|
for field, info in proc_group.get("operations", {}).items():
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
units = None
|
|
336
|
-
else:
|
|
337
|
-
expression = info["expression"]
|
|
338
|
-
units = info.get("units", None)
|
|
339
|
-
|
|
340
|
-
# evaluate the expression
|
|
341
|
-
col = core.evaluate_output_column(
|
|
342
|
-
hit_table,
|
|
343
|
-
table_name="HITS",
|
|
344
|
-
expression=expression,
|
|
345
|
-
local_dict=local_dict,
|
|
346
|
-
time_dict=time_dict[proc_name],
|
|
347
|
-
name=field,
|
|
369
|
+
_evaluate_operation(
|
|
370
|
+
hit_table, field, info, local_dict, time_dict[proc_name]
|
|
348
371
|
)
|
|
349
372
|
|
|
350
|
-
if units is not None:
|
|
351
|
-
col.attrs["units"] = units
|
|
352
|
-
|
|
353
|
-
core.add_field_with_nesting(hit_table, field, col)
|
|
354
|
-
|
|
355
373
|
# remove unwanted fields
|
|
356
374
|
if "outputs" in proc_group:
|
|
357
375
|
hit_table = core.remove_columns(
|
|
@@ -414,9 +432,35 @@ def build_hit(
|
|
|
414
432
|
raise RuntimeError(msg) from e
|
|
415
433
|
|
|
416
434
|
# return output table or nothing
|
|
417
|
-
log.
|
|
435
|
+
log.info(time_dict)
|
|
418
436
|
|
|
419
437
|
if output_tables == {}:
|
|
420
438
|
output_tables = None
|
|
421
439
|
|
|
422
440
|
return output_tables, time_dict
|
|
441
|
+
|
|
442
|
+
|
|
443
|
+
def _evaluate_operation(
|
|
444
|
+
hit_table, field: str, info: str | dict, local_dict: dict, time_dict: ProfileDict
|
|
445
|
+
) -> None:
|
|
446
|
+
if isinstance(info, str):
|
|
447
|
+
expression = info
|
|
448
|
+
units = None
|
|
449
|
+
else:
|
|
450
|
+
expression = info["expression"]
|
|
451
|
+
units = info.get("units", None)
|
|
452
|
+
|
|
453
|
+
# evaluate the expression
|
|
454
|
+
col = core.evaluate_output_column(
|
|
455
|
+
hit_table,
|
|
456
|
+
table_name="HITS",
|
|
457
|
+
expression=expression,
|
|
458
|
+
local_dict=local_dict,
|
|
459
|
+
time_dict=time_dict,
|
|
460
|
+
name=field,
|
|
461
|
+
)
|
|
462
|
+
|
|
463
|
+
if units is not None:
|
|
464
|
+
col.attrs["units"] = units
|
|
465
|
+
|
|
466
|
+
core.add_field_with_nesting(hit_table, field, col)
|
reboost/cli.py
CHANGED
reboost/core.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import contextlib
|
|
3
4
|
import logging
|
|
4
5
|
import time
|
|
5
6
|
from typing import Any
|
|
@@ -145,11 +146,14 @@ def evaluate_output_column(
|
|
|
145
146
|
if globals_dict == {}:
|
|
146
147
|
globals_dict = None
|
|
147
148
|
|
|
149
|
+
ctx = contextlib.nullcontext()
|
|
148
150
|
if globals_dict is not None and "pyg4ometry" in globals_dict:
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
res = hit_table.eval(
|
|
151
|
+
ctx = utils.filter_logging(logging.CRITICAL)
|
|
152
|
+
|
|
153
|
+
with ctx:
|
|
154
|
+
res = hit_table.eval(
|
|
155
|
+
func_call, local_dict, modules=globals_dict, library="ak", with_units=True
|
|
156
|
+
)
|
|
153
157
|
|
|
154
158
|
# how long did it take
|
|
155
159
|
if time_dict is not None:
|
|
@@ -227,7 +231,9 @@ def get_global_objects(
|
|
|
227
231
|
return AttrsDict(res)
|
|
228
232
|
|
|
229
233
|
|
|
230
|
-
def get_detector_mapping(
|
|
234
|
+
def get_detector_mapping(
|
|
235
|
+
detector_mapping: dict, global_objects: AttrsDict, args: AttrsDict
|
|
236
|
+
) -> dict:
|
|
231
237
|
"""Get all the detector mapping using :func:`get_one_detector_mapping`.
|
|
232
238
|
|
|
233
239
|
Parameters
|
|
@@ -236,6 +242,8 @@ def get_detector_mapping(detector_mapping: dict, global_objects: AttrsDict) -> d
|
|
|
236
242
|
dictionary of detector mapping
|
|
237
243
|
global_objects
|
|
238
244
|
dictionary of global objects to use in evaluating the mapping.
|
|
245
|
+
args
|
|
246
|
+
any arguments the expression can depend on, is passed as `locals` to `eval()`.
|
|
239
247
|
"""
|
|
240
248
|
return utils.merge_dicts(
|
|
241
249
|
[
|
|
@@ -243,6 +251,7 @@ def get_detector_mapping(detector_mapping: dict, global_objects: AttrsDict) -> d
|
|
|
243
251
|
mapping["output"],
|
|
244
252
|
input_detector_name=mapping.get("input", None),
|
|
245
253
|
objects=global_objects,
|
|
254
|
+
args=args,
|
|
246
255
|
)
|
|
247
256
|
for mapping in detector_mapping
|
|
248
257
|
]
|
|
@@ -253,6 +262,7 @@ def get_one_detector_mapping(
|
|
|
253
262
|
output_detector_expression: str | list,
|
|
254
263
|
objects: AttrsDict | None = None,
|
|
255
264
|
input_detector_name: str | None = None,
|
|
265
|
+
args: AttrsDict | None = None,
|
|
256
266
|
) -> dict:
|
|
257
267
|
"""Extract the output detectors and the list of input to outputs by parsing the expressions.
|
|
258
268
|
|
|
@@ -282,7 +292,8 @@ def get_one_detector_mapping(
|
|
|
282
292
|
dictionary of objects that can be referenced in the expression.
|
|
283
293
|
input_detector_name
|
|
284
294
|
Optional input detector name for all the outputs.
|
|
285
|
-
|
|
295
|
+
args
|
|
296
|
+
any arguments the expression can depend on, is passed as `locals` to `eval()`.
|
|
286
297
|
|
|
287
298
|
Returns
|
|
288
299
|
-------
|
|
@@ -314,11 +325,9 @@ def get_one_detector_mapping(
|
|
|
314
325
|
out_list = list(output_detector_expression)
|
|
315
326
|
|
|
316
327
|
for expression_tmp in out_list:
|
|
317
|
-
func, globs = utils.get_function_string(expression_tmp)
|
|
318
|
-
|
|
319
328
|
# if no package was imported its just a name
|
|
320
329
|
try:
|
|
321
|
-
objs = evaluate_object(expression_tmp, local_dict={"OBJECTS": objects})
|
|
330
|
+
objs = evaluate_object(expression_tmp, local_dict={"ARGS": args, "OBJECTS": objects})
|
|
322
331
|
out_names.extend(objs)
|
|
323
332
|
except Exception:
|
|
324
333
|
out_names.append(expression_tmp)
|
reboost/daq/__init__.py
ADDED
reboost/daq/core.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import awkward as ak
|
|
4
|
+
import numba
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
from .utils import print_random_crash_msg
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def run_daq_non_sparse(
|
|
11
|
+
evt: ak.Array,
|
|
12
|
+
n_sim_events: int,
|
|
13
|
+
source_activity: float,
|
|
14
|
+
*,
|
|
15
|
+
tau_preamp: float = 500,
|
|
16
|
+
noise_threshold: float = 5,
|
|
17
|
+
baseline_slope_threshold: float = 0.01,
|
|
18
|
+
trigger_threshold: float = 25,
|
|
19
|
+
waveform_length: float = 100,
|
|
20
|
+
trigger_position: float = 50,
|
|
21
|
+
):
|
|
22
|
+
r"""Run the DAQ in non-sparse mode.
|
|
23
|
+
|
|
24
|
+
Pipe simulated HPGe events through the DAQ system in non-sparse mode.
|
|
25
|
+
Return a table where each row represents an event that was actually
|
|
26
|
+
recorded by the DAQ. for each event and each channel, determine the
|
|
27
|
+
characteristics of the waveform.
|
|
28
|
+
|
|
29
|
+
Warning
|
|
30
|
+
-------
|
|
31
|
+
This code assumes that the simulated events are time-independent.
|
|
32
|
+
|
|
33
|
+
The returned Awkward array (the table) has the following fields:
|
|
34
|
+
|
|
35
|
+
- ``evtid`` (int): event ID in the simulation.
|
|
36
|
+
- ``timestamp`` (float): timestamp of the event.
|
|
37
|
+
- ``has_trigger`` (array of bools): this waveform triggered the DAQ.
|
|
38
|
+
- ``has_pre_pulse`` (array of bools): the waveform has a signal below
|
|
39
|
+
trigger_threshold in the first part of the waveform, before the trigger
|
|
40
|
+
position.
|
|
41
|
+
- ``has_post_pulse`` (array of bools): the waveform has a signal in the
|
|
42
|
+
second part of the waveform, after the trigger position.
|
|
43
|
+
- ``has_slope`` (array of bools): waveform has decaying tail of earlier
|
|
44
|
+
signals, that came before this waveform.
|
|
45
|
+
|
|
46
|
+
The table sits in a tuple, together with a list of the channel
|
|
47
|
+
identifiers, with the same order as in the data array.
|
|
48
|
+
|
|
49
|
+
Parameters
|
|
50
|
+
----------
|
|
51
|
+
evt
|
|
52
|
+
simulated events.
|
|
53
|
+
source_activity
|
|
54
|
+
source activity in Bq.
|
|
55
|
+
n_sim_events
|
|
56
|
+
total number of simulated events.
|
|
57
|
+
tau_preamp
|
|
58
|
+
pre-amplification RC constant in microseconds. the signal model is an
|
|
59
|
+
exponential:
|
|
60
|
+
|
|
61
|
+
.. math::
|
|
62
|
+
|
|
63
|
+
f(t) = E_i * e^{((t - t_i) / \tau)}
|
|
64
|
+
|
|
65
|
+
where :math:`E_i` is the energy of the signal and :math:`t_i` is the
|
|
66
|
+
time it occurred.
|
|
67
|
+
noise_threshold
|
|
68
|
+
threshold (in keV) for a signal to be "visible" above noise. In
|
|
69
|
+
LEGEND-200, the "energy" of forced trigger events is
|
|
70
|
+
gauss-distributed around 0.5 keV with a standard deviation of about
|
|
71
|
+
0.5 keV.
|
|
72
|
+
baseline_slope_threshold
|
|
73
|
+
threshold (in keV/us) on the baseline slope to be tagged as not flat.
|
|
74
|
+
in LEGEND-200, the slope of waveforms in force-triggered events is
|
|
75
|
+
gauss-distributed around 0 with a standard deviation of about 2 keV/ms.
|
|
76
|
+
trigger_threshold
|
|
77
|
+
amplitude (in keV) needed for the DAQ to trigger on a signal.
|
|
78
|
+
waveform_length
|
|
79
|
+
length of the waveform in microseconds stored on disk.
|
|
80
|
+
trigger_position
|
|
81
|
+
location (offset) in microseconds of the triggered signal in the
|
|
82
|
+
waveform.
|
|
83
|
+
"""
|
|
84
|
+
# random engine
|
|
85
|
+
rng = np.random.default_rng()
|
|
86
|
+
|
|
87
|
+
# simulate ORCA
|
|
88
|
+
print_random_crash_msg(rng)
|
|
89
|
+
|
|
90
|
+
# add time of each simulated event, in microseconds, according to the expected event rate
|
|
91
|
+
detected_rate = source_activity * len(evt) / n_sim_events
|
|
92
|
+
evt["t0"] = np.cumsum(rng.exponential(scale=1e6 / detected_rate, size=len(evt)))
|
|
93
|
+
|
|
94
|
+
# get rawids of detectors present in the simulation
|
|
95
|
+
channel_ids = np.sort(np.unique(ak.flatten(evt.geds_rawid_active))).to_list()
|
|
96
|
+
|
|
97
|
+
daq_records = _run_daq_non_sparse_impl(
|
|
98
|
+
evt,
|
|
99
|
+
channel_ids,
|
|
100
|
+
tau_preamp,
|
|
101
|
+
noise_threshold,
|
|
102
|
+
baseline_slope_threshold,
|
|
103
|
+
trigger_threshold,
|
|
104
|
+
waveform_length,
|
|
105
|
+
trigger_position,
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
fields = ["evtid", "timestamp", "has_trigger", "has_pre_pulse", "has_post_pulse", "has_slope"]
|
|
109
|
+
|
|
110
|
+
daq_data = ak.Array(dict(zip(fields, daq_records, strict=False)))
|
|
111
|
+
|
|
112
|
+
return daq_data, channel_ids
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
@numba.njit(cache=True)
|
|
116
|
+
def _run_daq_non_sparse_impl(
|
|
117
|
+
evt: ak.Array,
|
|
118
|
+
chids: list,
|
|
119
|
+
tau_preamp: float,
|
|
120
|
+
noise_threshold: float,
|
|
121
|
+
baseline_slope_threshold: float,
|
|
122
|
+
trigger_threshold: float,
|
|
123
|
+
waveform_length: float,
|
|
124
|
+
trigger_position: float,
|
|
125
|
+
):
|
|
126
|
+
"""Numba-accelerated implementation of :func:`run_daq_non_sparse`."""
|
|
127
|
+
o_evtid = np.full(len(evt), dtype=np.int64, fill_value=-1)
|
|
128
|
+
o_timestamp = np.full(len(evt), dtype=np.float64, fill_value=-1)
|
|
129
|
+
|
|
130
|
+
def _init_data(dtype=np.bool_):
|
|
131
|
+
return np.zeros((len(evt), len(chids)), dtype=dtype)
|
|
132
|
+
|
|
133
|
+
o_has_trigger = _init_data()
|
|
134
|
+
o_has_pre_pulse = _init_data()
|
|
135
|
+
o_has_post_pulse = _init_data()
|
|
136
|
+
o_has_slope = _init_data()
|
|
137
|
+
|
|
138
|
+
# this is the index of the current daq record
|
|
139
|
+
r_idx = -1
|
|
140
|
+
|
|
141
|
+
# list of event indices (fifo) to keep track of past events that have still
|
|
142
|
+
# an effect on the baseline
|
|
143
|
+
evt_idx_buffer = []
|
|
144
|
+
|
|
145
|
+
# TODO: this will need to be updated
|
|
146
|
+
# {
|
|
147
|
+
# evtid: 5.37e+03,
|
|
148
|
+
# geds_energy_active: [756, 152], in keV, "geds" means "HPGe detectors"
|
|
149
|
+
# geds_multiplicity_active: 2,
|
|
150
|
+
# geds_rawid_active: [1110400, 1112000],
|
|
151
|
+
# geds_t0_active: [1.53e+12, 1.53e+12]
|
|
152
|
+
# }
|
|
153
|
+
|
|
154
|
+
# loop over simulated events
|
|
155
|
+
for s_idx, ev in enumerate(evt):
|
|
156
|
+
# loop over the event_buffer and remove events that occurred more
|
|
157
|
+
# than 10 times the tau_preamp ago
|
|
158
|
+
cutoff = ev.t0 - 10 * tau_preamp
|
|
159
|
+
while evt_idx_buffer:
|
|
160
|
+
first_idx = evt_idx_buffer[0]
|
|
161
|
+
if evt[first_idx].t0 < cutoff:
|
|
162
|
+
evt_idx_buffer.pop(0)
|
|
163
|
+
else:
|
|
164
|
+
break
|
|
165
|
+
|
|
166
|
+
# add current event to the buffer for later baseline analysis
|
|
167
|
+
evt_idx_buffer.append(s_idx)
|
|
168
|
+
|
|
169
|
+
# don't do any of this if there is no last record yet
|
|
170
|
+
if r_idx != -1:
|
|
171
|
+
# check if the last trigger was less than (waveform_length - trigger_position)
|
|
172
|
+
# ago. if yes, this is not a trigger but there is a hard post-pile-up
|
|
173
|
+
# on the previous trigger. so we need to check for each channel if the
|
|
174
|
+
# energy deposited is above the noise_threshold. if yes, get the last
|
|
175
|
+
# trigger and set the hard_post_pileup flag of that channel to true.
|
|
176
|
+
# then continue to the next event.
|
|
177
|
+
dt = ev.t0 - o_timestamp[r_idx]
|
|
178
|
+
if dt < (waveform_length - trigger_position):
|
|
179
|
+
for rawid, ene in zip(ev.geds_rawid_active, ev.geds_energy_active): # noqa: B905
|
|
180
|
+
if ene >= noise_threshold:
|
|
181
|
+
o_has_post_pulse[r_idx, chids.index(rawid)] = True
|
|
182
|
+
continue
|
|
183
|
+
|
|
184
|
+
# check if the last trigger was less than waveform_length but more than
|
|
185
|
+
# (waveform_length - trigger_position) ago. if yes, this event is not
|
|
186
|
+
# recorded by the daq (dead time), so do nothing and just continue to the
|
|
187
|
+
# next event.
|
|
188
|
+
if dt > (waveform_length - trigger_position) and dt < waveform_length:
|
|
189
|
+
continue
|
|
190
|
+
|
|
191
|
+
# if we are here it means that we can actively look for new triggers.
|
|
192
|
+
# check if we have a trigger by checking energy in each detector against
|
|
193
|
+
# the trigger_threshold. if not, continue to the next event
|
|
194
|
+
triggered_rawids = []
|
|
195
|
+
for rawid, ene in zip(ev.geds_rawid_active, ev.geds_energy_active): # noqa: B905
|
|
196
|
+
if ene >= trigger_threshold:
|
|
197
|
+
triggered_rawids.append(int(rawid))
|
|
198
|
+
|
|
199
|
+
if not triggered_rawids:
|
|
200
|
+
continue
|
|
201
|
+
|
|
202
|
+
# if we are here, it means we found a trigger. first, we save the last
|
|
203
|
+
# daq record to disk and we initialize a new one
|
|
204
|
+
r_idx += 1
|
|
205
|
+
|
|
206
|
+
o_evtid[r_idx] = ev.evtid
|
|
207
|
+
o_timestamp[r_idx] = ev.t0
|
|
208
|
+
|
|
209
|
+
# then, let's log which channels triggered the daq in the daq_record
|
|
210
|
+
for rawid in triggered_rawids:
|
|
211
|
+
o_has_trigger[r_idx, chids.index(rawid)] = True
|
|
212
|
+
|
|
213
|
+
# time of the start of the waveform
|
|
214
|
+
t0_start = ev.t0 - trigger_position
|
|
215
|
+
# now we need to peek into the event_buffer to check if the baseline is
|
|
216
|
+
# affected by the tails of previous events (soft pile-up) or includes a
|
|
217
|
+
# small in-trace signal (pre-hard-pileup). to take a decision, we use the
|
|
218
|
+
# baseline slope threshold
|
|
219
|
+
for rawid in chids:
|
|
220
|
+
abs_baseline_slope = 0
|
|
221
|
+
for j in evt_idx_buffer:
|
|
222
|
+
_ev = evt[j]
|
|
223
|
+
|
|
224
|
+
# for each event in the buffer get timestamp and energy
|
|
225
|
+
tj = _ev.t0
|
|
226
|
+
ej = 0
|
|
227
|
+
for k, e in zip(_ev.geds_rawid_active, _ev.geds_energy_active): # noqa: B905
|
|
228
|
+
if k == rawid:
|
|
229
|
+
ej = e
|
|
230
|
+
break
|
|
231
|
+
|
|
232
|
+
# if the event occurred before the current waveform window, we
|
|
233
|
+
# account for its tail in the baseline of the current waveform
|
|
234
|
+
# the baseline slope is calculated at the start of the waveform
|
|
235
|
+
if tj <= t0_start:
|
|
236
|
+
abs_baseline_slope += ej / tau_preamp * np.exp(-(t0_start - tj) / tau_preamp)
|
|
237
|
+
|
|
238
|
+
# if there was any energy in a channel that occurred less than
|
|
239
|
+
# (timestamp - trigger_position) ago, this channel has a hard
|
|
240
|
+
# pre-pile-up in the current waveform
|
|
241
|
+
elif tj < ev.t0 and ej >= noise_threshold:
|
|
242
|
+
o_has_pre_pulse[r_idx, chids.index(rawid)] = True
|
|
243
|
+
|
|
244
|
+
# now we have computed the baseline and we can check against the the
|
|
245
|
+
# noise threshold if it's significantly non-flat
|
|
246
|
+
if abs_baseline_slope >= baseline_slope_threshold:
|
|
247
|
+
o_has_slope[r_idx, chids.index(rawid)] = True
|
|
248
|
+
|
|
249
|
+
# the timestamp should refer to the start of the waveform, like in our DAQ
|
|
250
|
+
o_timestamp -= trigger_position
|
|
251
|
+
|
|
252
|
+
# the last event was recorded too
|
|
253
|
+
r_idx += 1
|
|
254
|
+
|
|
255
|
+
return (
|
|
256
|
+
o_evtid[:r_idx],
|
|
257
|
+
o_timestamp[:r_idx],
|
|
258
|
+
o_has_trigger[:r_idx],
|
|
259
|
+
o_has_pre_pulse[:r_idx],
|
|
260
|
+
o_has_post_pulse[:r_idx],
|
|
261
|
+
o_has_slope[:r_idx],
|
|
262
|
+
)
|