reboost 0.8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- reboost/__init__.py +14 -0
- reboost/_version.py +34 -0
- reboost/build_evt.py +134 -0
- reboost/build_glm.py +305 -0
- reboost/build_hit.py +466 -0
- reboost/cli.py +194 -0
- reboost/core.py +526 -0
- reboost/daq/__init__.py +5 -0
- reboost/daq/core.py +262 -0
- reboost/daq/utils.py +28 -0
- reboost/hpge/__init__.py +0 -0
- reboost/hpge/psd.py +847 -0
- reboost/hpge/surface.py +284 -0
- reboost/hpge/utils.py +79 -0
- reboost/iterator.py +226 -0
- reboost/log_utils.py +29 -0
- reboost/math/__init__.py +0 -0
- reboost/math/functions.py +175 -0
- reboost/math/stats.py +119 -0
- reboost/optmap/__init__.py +5 -0
- reboost/optmap/cli.py +246 -0
- reboost/optmap/convolve.py +325 -0
- reboost/optmap/create.py +423 -0
- reboost/optmap/evt.py +141 -0
- reboost/optmap/mapview.py +208 -0
- reboost/optmap/numba_pdg.py +26 -0
- reboost/optmap/optmap.py +328 -0
- reboost/profile.py +82 -0
- reboost/shape/__init__.py +0 -0
- reboost/shape/cluster.py +260 -0
- reboost/shape/group.py +189 -0
- reboost/shape/reduction.py +0 -0
- reboost/spms/__init__.py +5 -0
- reboost/spms/pe.py +178 -0
- reboost/units.py +107 -0
- reboost/utils.py +503 -0
- reboost-0.8.3.dist-info/METADATA +82 -0
- reboost-0.8.3.dist-info/RECORD +42 -0
- reboost-0.8.3.dist-info/WHEEL +5 -0
- reboost-0.8.3.dist-info/entry_points.txt +3 -0
- reboost-0.8.3.dist-info/licenses/LICENSE +674 -0
- reboost-0.8.3.dist-info/top_level.txt +1 -0
reboost/units.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import awkward as ak
|
|
7
|
+
import pint
|
|
8
|
+
import pyg4ometry as pg4
|
|
9
|
+
from lgdo import LGDO
|
|
10
|
+
|
|
11
|
+
log = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
ureg = pint.get_application_registry()
|
|
14
|
+
"""The physical units registry."""
|
|
15
|
+
|
|
16
|
+
# default pretty printing of physical units
|
|
17
|
+
ureg.formatter.default_format = "~P"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def pg4_to_pint(obj: pint.Quantity | pg4.gdml.Defines.VectorBase) -> pint.Quantity:
|
|
21
|
+
"""Convert pyg4ometry object to pint Quantity."""
|
|
22
|
+
if isinstance(obj, pint.Quantity):
|
|
23
|
+
return obj
|
|
24
|
+
if isinstance(obj, pg4.gdml.Defines.VectorBase):
|
|
25
|
+
return [getattr(obj, field).eval() for field in ("x", "y", "z")] * ureg(obj.unit)
|
|
26
|
+
msg = f"I don't know how to convert object of type {type(obj)} to pint object"
|
|
27
|
+
raise ValueError(msg)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def units_convfact(data: Any | LGDO | ak.Array, target_units: pint.Unit | str) -> float:
|
|
31
|
+
"""Calculate numeric conversion factor to reach `target_units`.
|
|
32
|
+
|
|
33
|
+
Parameters
|
|
34
|
+
----------
|
|
35
|
+
data
|
|
36
|
+
starting data structure. If an :class:`LGDO` or :class:`ak.Array`, try to
|
|
37
|
+
determine units by peeking into its attributes. Otherwise, just return 1.
|
|
38
|
+
target_units
|
|
39
|
+
units you wish to convert data to.
|
|
40
|
+
"""
|
|
41
|
+
if isinstance(data, LGDO) and "units" in data.attrs:
|
|
42
|
+
return ureg(data.attrs["units"]).to(target_units).magnitude
|
|
43
|
+
if isinstance(data, ak.Array) and "units" in ak.parameters(data):
|
|
44
|
+
return ureg(ak.parameters(data)["units"]).to(target_units).magnitude
|
|
45
|
+
return 1
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def units_conv_ak(data: Any | LGDO | ak.Array, target_units: pint.Unit | str) -> ak.Array:
|
|
49
|
+
"""Calculate numeric conversion factor to reach `target_units`, and apply to data converted to ak.
|
|
50
|
+
|
|
51
|
+
Parameters
|
|
52
|
+
----------
|
|
53
|
+
data
|
|
54
|
+
starting data structure. If an :class:`LGDO` or :class:`ak.Array`, try to
|
|
55
|
+
determine units by peeking into its attributes. Otherwise, return the data
|
|
56
|
+
unchanged.
|
|
57
|
+
target_units
|
|
58
|
+
units you wish to convert data to.
|
|
59
|
+
"""
|
|
60
|
+
fact = units_convfact(data, target_units)
|
|
61
|
+
if isinstance(data, LGDO) and fact != 1:
|
|
62
|
+
return ak.without_parameters(data.view_as("ak") * fact)
|
|
63
|
+
if isinstance(data, ak.Array) and fact != 1:
|
|
64
|
+
return ak.without_parameters(data * fact)
|
|
65
|
+
return data.view_as("ak") if isinstance(data, LGDO) else data
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def unwrap_lgdo(data: Any | LGDO | ak.Array, library: str = "ak") -> tuple[Any, pint.Unit | None]:
|
|
69
|
+
"""Return a view of the data held by the LGDO and its physical units.
|
|
70
|
+
|
|
71
|
+
Parameters
|
|
72
|
+
----------
|
|
73
|
+
data
|
|
74
|
+
the data container. If not an :class:`LGDO` or :class:`ak.Array`, it will be
|
|
75
|
+
returned as is with ``None`` units.
|
|
76
|
+
library
|
|
77
|
+
forwarded to :meth:`LGDO.view_as`.
|
|
78
|
+
|
|
79
|
+
Returns
|
|
80
|
+
-------
|
|
81
|
+
A tuple of the un-lgdo'd data and the data units.
|
|
82
|
+
"""
|
|
83
|
+
ret_data = data
|
|
84
|
+
ret_units = None
|
|
85
|
+
if isinstance(data, LGDO):
|
|
86
|
+
ret_data = data.view_as(library)
|
|
87
|
+
if "units" in data.attrs:
|
|
88
|
+
ret_units = ureg(data.attrs["units"]).u
|
|
89
|
+
|
|
90
|
+
if isinstance(data, ak.Array):
|
|
91
|
+
if library != "ak":
|
|
92
|
+
msg = "cannot unwrap an awkward array as a non-awkward type"
|
|
93
|
+
raise ValueError(msg)
|
|
94
|
+
|
|
95
|
+
if "units" in ak.parameters(data):
|
|
96
|
+
ret_units = ureg(ak.parameters(data)["units"]).u
|
|
97
|
+
ret_data = ak.without_parameters(data)
|
|
98
|
+
|
|
99
|
+
return ret_data, ret_units
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def unit_to_lh5_attr(unit: pint.Unit) -> str:
|
|
103
|
+
"""Convert Pint unit to a string that can be used as attrs["units"] in an LGDO."""
|
|
104
|
+
# TODO: we should check if this can be always parsed by Unitful.jl
|
|
105
|
+
if isinstance(unit, pint.Unit):
|
|
106
|
+
return f"{unit:~C}"
|
|
107
|
+
return unit
|
reboost/utils.py
ADDED
|
@@ -0,0 +1,503 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import importlib
|
|
4
|
+
import itertools
|
|
5
|
+
import logging
|
|
6
|
+
import re
|
|
7
|
+
import time
|
|
8
|
+
from collections.abc import Iterable, Mapping
|
|
9
|
+
from contextlib import contextmanager
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
import h5py
|
|
13
|
+
from dbetto import AttrsDict
|
|
14
|
+
from lgdo import lh5
|
|
15
|
+
from lgdo.types import Struct, Table, VectorOfVectors
|
|
16
|
+
|
|
17
|
+
from .profile import ProfileDict
|
|
18
|
+
|
|
19
|
+
log = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def get_table_names(tcm: VectorOfVectors) -> dict:
|
|
23
|
+
"""Extract table names from tcm.attrs['tables'] and return them as a dictionary."""
|
|
24
|
+
raw = tcm.attrs["tables"]
|
|
25
|
+
cleaned = raw.strip("[]").replace(" ", "").replace("'", "")
|
|
26
|
+
tables = cleaned.split(",")
|
|
27
|
+
tables = [tab.split("/")[-1] for tab in tables]
|
|
28
|
+
|
|
29
|
+
return {name: idx for idx, name in enumerate(tables)}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def get_wo_mode(
|
|
33
|
+
group: int, out_det: int, in_det: int, chunk: int, new_hit_file: bool, overwrite: bool = False
|
|
34
|
+
) -> str:
|
|
35
|
+
"""Get the mode for lh5 file writing.
|
|
36
|
+
|
|
37
|
+
If all indices are 0 and we are writing a new output file
|
|
38
|
+
then the mode "overwrite_file" is used (if the overwrite) flag
|
|
39
|
+
is set, otherwise the mode "write_safe" is used.
|
|
40
|
+
|
|
41
|
+
Otherwise the code choses between "append_column" if this is the
|
|
42
|
+
first time a group is being written to the file, or "append"
|
|
43
|
+
|
|
44
|
+
Parameters
|
|
45
|
+
----------
|
|
46
|
+
group
|
|
47
|
+
the index of the processing group
|
|
48
|
+
out_det
|
|
49
|
+
the index of the output detector
|
|
50
|
+
in_det
|
|
51
|
+
the index of the input detector
|
|
52
|
+
chunk
|
|
53
|
+
the chunk index
|
|
54
|
+
new_hit_file
|
|
55
|
+
a flag of whether we are writing a new hit file. This does not indicate whether
|
|
56
|
+
the file already exists on disk, but whether the file name is different from the
|
|
57
|
+
last written chunk for this detector.
|
|
58
|
+
overwrite
|
|
59
|
+
a flag of whether to overwrite the old file.
|
|
60
|
+
|
|
61
|
+
Returns
|
|
62
|
+
-------
|
|
63
|
+
the mode for IO
|
|
64
|
+
"""
|
|
65
|
+
indices = [group, out_det, in_det, chunk]
|
|
66
|
+
|
|
67
|
+
good_idx = all(i == 0 for i in indices)
|
|
68
|
+
|
|
69
|
+
if good_idx and new_hit_file:
|
|
70
|
+
return "overwrite_file" if overwrite else "write_safe"
|
|
71
|
+
|
|
72
|
+
# if we have a detector not the first and chunk 0 append column
|
|
73
|
+
is_ac = ((in_det > 0) or (out_det > 0)) & (chunk == 0)
|
|
74
|
+
is_ac = is_ac or (in_det == 0 and out_det == 0 and chunk == 0 and (group > 0))
|
|
75
|
+
|
|
76
|
+
if is_ac and new_hit_file:
|
|
77
|
+
return "append_column"
|
|
78
|
+
return "append"
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def get_wo_mode_forwarded(
|
|
82
|
+
written_tables: set[str], new_hit_file: bool, overwrite: bool = False
|
|
83
|
+
) -> str:
|
|
84
|
+
"""Get the mode for lh5 file writing for forwarded tables tahat will be copied without chunking.
|
|
85
|
+
|
|
86
|
+
If we are writing a new output file and no other tables had been written yet, then
|
|
87
|
+
the mode "overwrite_file" is used if the overwrite flag is set, otherwise the mode
|
|
88
|
+
"write_safe" is used.
|
|
89
|
+
|
|
90
|
+
Otherwise "append" is used.
|
|
91
|
+
|
|
92
|
+
Parameters
|
|
93
|
+
----------
|
|
94
|
+
written_tables
|
|
95
|
+
a set of already written table names, also including other table names of
|
|
96
|
+
non-forwarded (i.e. processed) tables.
|
|
97
|
+
new_hit_file
|
|
98
|
+
a flag of whether we are writing a new hit file. This does not indicate whether
|
|
99
|
+
the file already exists on disk, but whether the file name is different from the
|
|
100
|
+
last written chunk for this forwarded table.
|
|
101
|
+
overwrite
|
|
102
|
+
a flag of whether to overwrite the old file.
|
|
103
|
+
|
|
104
|
+
Returns
|
|
105
|
+
-------
|
|
106
|
+
the mode for IO
|
|
107
|
+
"""
|
|
108
|
+
if not new_hit_file:
|
|
109
|
+
return "append"
|
|
110
|
+
if overwrite and len(written_tables) == 0:
|
|
111
|
+
return "overwrite_file"
|
|
112
|
+
return "write_safe"
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def is_new_hit_file(files: AttrsDict, file_idx: int) -> bool:
|
|
116
|
+
"""Return whether the hit file with the given index is a "new" hit file.
|
|
117
|
+
|
|
118
|
+
A new file is either the first file written, or when the previous file index has a
|
|
119
|
+
different file name.
|
|
120
|
+
"""
|
|
121
|
+
return (file_idx == 0) or (files.hit[file_idx] != files.hit[file_idx - 1])
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def get_file_dict(
|
|
125
|
+
stp_files: list[str] | str,
|
|
126
|
+
glm_files: list[str] | str | None,
|
|
127
|
+
hit_files: list[str] | str | None = None,
|
|
128
|
+
) -> AttrsDict:
|
|
129
|
+
"""Get the file info as a AttrsDict.
|
|
130
|
+
|
|
131
|
+
Creates an :class:`dbetto.AttrsDict` with keys `stp_files`,
|
|
132
|
+
`glm_files` and `hit_files`. Each key contains a list of
|
|
133
|
+
file-paths (or `None`).
|
|
134
|
+
|
|
135
|
+
Parameters
|
|
136
|
+
----------
|
|
137
|
+
stp_files
|
|
138
|
+
string or list of strings of the stp files.
|
|
139
|
+
glm_files
|
|
140
|
+
string or list of strings of the glm files, or None in which
|
|
141
|
+
case the glm will be created in memory.
|
|
142
|
+
hit_files
|
|
143
|
+
string or list of strings of the hit files, if None the output
|
|
144
|
+
files will be created in memory.
|
|
145
|
+
"""
|
|
146
|
+
# make a list of the right length
|
|
147
|
+
if isinstance(stp_files, str):
|
|
148
|
+
stp_files = [stp_files]
|
|
149
|
+
|
|
150
|
+
glm_files_list = [None] * len(stp_files) if glm_files is None else glm_files
|
|
151
|
+
|
|
152
|
+
# make a list of files in case
|
|
153
|
+
# 1) hit_files is a str and stp_files is a list
|
|
154
|
+
# 2) hit_files and stp_files are both lists of different length
|
|
155
|
+
|
|
156
|
+
hit_is_list = isinstance(hit_files, list)
|
|
157
|
+
|
|
158
|
+
if not hit_is_list:
|
|
159
|
+
hit_files_list = [hit_files] * len(stp_files)
|
|
160
|
+
elif hit_is_list and len(hit_files) == 1 and len(stp_files) > 1:
|
|
161
|
+
hit_files_list = [hit_files[0]] * len(stp_files)
|
|
162
|
+
else:
|
|
163
|
+
hit_files_list = hit_files
|
|
164
|
+
|
|
165
|
+
files = {}
|
|
166
|
+
|
|
167
|
+
for file_type, file_list in zip(
|
|
168
|
+
["stp", "glm", "hit"], [stp_files, glm_files_list, hit_files_list], strict=True
|
|
169
|
+
):
|
|
170
|
+
if isinstance(file_list, str):
|
|
171
|
+
files[file_type] = [file_list]
|
|
172
|
+
else:
|
|
173
|
+
files[file_type] = file_list
|
|
174
|
+
|
|
175
|
+
return AttrsDict(files)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def get_file_list(path: str | None, threads: int | None = None) -> list[str]:
|
|
179
|
+
"""Get a list of files accounting for the multithread index."""
|
|
180
|
+
if threads is None or path is None:
|
|
181
|
+
return path
|
|
182
|
+
return [f"{(Path(path).with_suffix(''))}_t{idx}.lh5" for idx in range(threads)]
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def copy_units(tab: Table) -> dict:
|
|
186
|
+
"""Extract a dictionary of attributes (i.e. units).
|
|
187
|
+
|
|
188
|
+
Parameters
|
|
189
|
+
----------
|
|
190
|
+
tab
|
|
191
|
+
Table to get the units from.
|
|
192
|
+
|
|
193
|
+
Returns
|
|
194
|
+
-------
|
|
195
|
+
a dictionary with the units for each field
|
|
196
|
+
in the table.
|
|
197
|
+
"""
|
|
198
|
+
units = {}
|
|
199
|
+
|
|
200
|
+
for field in tab:
|
|
201
|
+
if "units" in tab[field].attrs:
|
|
202
|
+
units[field] = tab[field].attrs["units"]
|
|
203
|
+
|
|
204
|
+
return units
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def assign_units(tab: Table, units: Mapping) -> Table:
|
|
208
|
+
"""Copy the attributes from the map of attributes to the table.
|
|
209
|
+
|
|
210
|
+
Parameters
|
|
211
|
+
----------
|
|
212
|
+
tab
|
|
213
|
+
Table to add attributes to.
|
|
214
|
+
units
|
|
215
|
+
mapping (dictionary like) of units of each field
|
|
216
|
+
|
|
217
|
+
Returns
|
|
218
|
+
-------
|
|
219
|
+
an updated table with LGDO attributes.
|
|
220
|
+
"""
|
|
221
|
+
for field in tab:
|
|
222
|
+
if field in units:
|
|
223
|
+
if not isinstance(tab[field], VectorOfVectors):
|
|
224
|
+
tab[field].attrs["units"] = units[field]
|
|
225
|
+
else:
|
|
226
|
+
tab[field].flattened_data.attrs["units"] = units[field]
|
|
227
|
+
|
|
228
|
+
return tab
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def _search_string(string: str):
|
|
232
|
+
"""Capture the characters matching the pattern for a function call."""
|
|
233
|
+
pattern = r"\b([a-zA-Z_][a-zA-Z0-9_\.]*)\s*\("
|
|
234
|
+
return re.findall(pattern, string)
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def get_function_string(expr: str, aliases: dict | None = None) -> tuple[str, dict]:
|
|
238
|
+
"""Get a function call to evaluate.
|
|
239
|
+
|
|
240
|
+
Search for any patterns matching the pattern for a function call.
|
|
241
|
+
We also detect any cases of aliases being used, by default
|
|
242
|
+
just for `numpy` as `np` and `awkward` as `ak`. In this
|
|
243
|
+
case, the full name is replaces with the alias in the expression
|
|
244
|
+
and also in the output globals dictionary.
|
|
245
|
+
|
|
246
|
+
It is possible to chain together functions eg:
|
|
247
|
+
|
|
248
|
+
.. code-block:: python
|
|
249
|
+
|
|
250
|
+
ak.num(np.array([1, 2]))
|
|
251
|
+
|
|
252
|
+
and all packages will be imported.
|
|
253
|
+
|
|
254
|
+
Parameters
|
|
255
|
+
----------
|
|
256
|
+
expr
|
|
257
|
+
expression to evaluate.
|
|
258
|
+
aliases
|
|
259
|
+
dictionary of package aliases for names used in dictionary. These allow to
|
|
260
|
+
give shorter names to packages. This is combined with two defaults `ak` for
|
|
261
|
+
`awkward` and `np` for `numpy`. If `None` is supplied only these are used.
|
|
262
|
+
|
|
263
|
+
Returns
|
|
264
|
+
-------
|
|
265
|
+
a tuple of call string and dictionary of the imported global packages.
|
|
266
|
+
"""
|
|
267
|
+
# aliases for easier lookup
|
|
268
|
+
aliases = (
|
|
269
|
+
{"numpy": "np", "awkward": "ak"}
|
|
270
|
+
if aliases is None
|
|
271
|
+
else aliases | {"numpy": "np", "awkward": "ak"}
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
# move to only alias names
|
|
275
|
+
for name, short_name in aliases.items():
|
|
276
|
+
expr = expr.replace(name, short_name)
|
|
277
|
+
|
|
278
|
+
globs = {}
|
|
279
|
+
# search on the whole expression
|
|
280
|
+
|
|
281
|
+
funcs = _search_string(expr.strip())
|
|
282
|
+
for func_call in funcs:
|
|
283
|
+
# no "." then can't be a module
|
|
284
|
+
if "." not in func_call:
|
|
285
|
+
continue
|
|
286
|
+
|
|
287
|
+
subpackage, _func = func_call.rsplit(".", 1)
|
|
288
|
+
package = subpackage.split(".")[0]
|
|
289
|
+
|
|
290
|
+
# import the subpackage
|
|
291
|
+
for name, short_name in aliases.items():
|
|
292
|
+
subpackage = subpackage.replace(short_name, name)
|
|
293
|
+
|
|
294
|
+
# handle the aliases
|
|
295
|
+
package_import = package
|
|
296
|
+
for name, short_name in aliases.items():
|
|
297
|
+
if package == short_name:
|
|
298
|
+
package_import = name
|
|
299
|
+
|
|
300
|
+
# build globals
|
|
301
|
+
try:
|
|
302
|
+
importlib.import_module(subpackage, package=__package__)
|
|
303
|
+
|
|
304
|
+
globs = globs | {
|
|
305
|
+
package: importlib.import_module(package_import),
|
|
306
|
+
}
|
|
307
|
+
except Exception as e:
|
|
308
|
+
# for imports of our own package, raise the error back to the user
|
|
309
|
+
if package_import == "reboost":
|
|
310
|
+
raise e
|
|
311
|
+
msg = f"Function {package_import} cannot be imported"
|
|
312
|
+
log.debug(msg)
|
|
313
|
+
continue
|
|
314
|
+
|
|
315
|
+
return expr, globs
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
def get_channels_from_groups(names: list | str | None, groupings: dict | None = None) -> list:
|
|
319
|
+
"""Get a list of channels from a list of groups.
|
|
320
|
+
|
|
321
|
+
Parameters
|
|
322
|
+
----------
|
|
323
|
+
names
|
|
324
|
+
list of channel names
|
|
325
|
+
groupings
|
|
326
|
+
dictionary of the groupings of channels
|
|
327
|
+
|
|
328
|
+
Returns
|
|
329
|
+
-------
|
|
330
|
+
list of channels
|
|
331
|
+
"""
|
|
332
|
+
if names is None:
|
|
333
|
+
channels_e = []
|
|
334
|
+
elif isinstance(names, str):
|
|
335
|
+
channels_e = groupings[names]
|
|
336
|
+
elif isinstance(names, list):
|
|
337
|
+
channels_e = list(itertools.chain.from_iterable([groupings[e] for e in names]))
|
|
338
|
+
else:
|
|
339
|
+
msg = f"names {names} must be list or str or `None`"
|
|
340
|
+
raise ValueError(msg)
|
|
341
|
+
|
|
342
|
+
return channels_e
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
def merge_dicts(dict_list: list) -> dict:
|
|
346
|
+
"""Merge a list of dictionaries, concatenating the items where they exist.
|
|
347
|
+
|
|
348
|
+
Parameters
|
|
349
|
+
----------
|
|
350
|
+
dict_list
|
|
351
|
+
list of dictionaries to merge
|
|
352
|
+
|
|
353
|
+
Returns
|
|
354
|
+
-------
|
|
355
|
+
a new dictionary after merging.
|
|
356
|
+
|
|
357
|
+
Examples
|
|
358
|
+
--------
|
|
359
|
+
>>> merge_dicts([{"a":[1,2,3],"b":[2]},{"a":[4,5,6],"c":[2]}])
|
|
360
|
+
{"a":[1,2,3,4,5,6],"b":[2],"c":[2]}
|
|
361
|
+
"""
|
|
362
|
+
merged = {}
|
|
363
|
+
|
|
364
|
+
for tmp_dict in dict_list:
|
|
365
|
+
for key, item in tmp_dict.items():
|
|
366
|
+
if key in merged:
|
|
367
|
+
merged[key].extend(item)
|
|
368
|
+
else:
|
|
369
|
+
merged[key] = item
|
|
370
|
+
|
|
371
|
+
return merged
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
@contextmanager
|
|
375
|
+
def filter_logging(level):
|
|
376
|
+
logger = logging.getLogger("root")
|
|
377
|
+
old_level = logger.getEffectiveLevel()
|
|
378
|
+
logger.setLevel(level)
|
|
379
|
+
try:
|
|
380
|
+
yield
|
|
381
|
+
finally:
|
|
382
|
+
logger.setLevel(old_level)
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
def _check_input_file(parser, file: str | Iterable[str], descr: str = "input") -> None:
|
|
386
|
+
file = (file,) if isinstance(file, str) else file
|
|
387
|
+
not_existing = [f for f in file if not Path(f).exists()]
|
|
388
|
+
if not_existing != []:
|
|
389
|
+
parser.error(f"{descr} file(s) {''.join(not_existing)} missing")
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
def _check_output_file(parser, file: str | Iterable[str] | None, optional: bool = False) -> None:
|
|
393
|
+
if file is None and optional:
|
|
394
|
+
return
|
|
395
|
+
|
|
396
|
+
file = (file,) if isinstance(file, str) else file
|
|
397
|
+
for f in file:
|
|
398
|
+
if Path(f).exists():
|
|
399
|
+
parser.error(f"output file {f} already exists")
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
def write_lh5(
|
|
403
|
+
hit_table: Table,
|
|
404
|
+
file: str,
|
|
405
|
+
time_dict: ProfileDict,
|
|
406
|
+
out_field: str,
|
|
407
|
+
out_detector: str,
|
|
408
|
+
wo_mode: str,
|
|
409
|
+
):
|
|
410
|
+
"""Write the lh5 file. This function handles writing first the data as a struct and then appending to this.
|
|
411
|
+
|
|
412
|
+
Parameters
|
|
413
|
+
----------
|
|
414
|
+
hit_table
|
|
415
|
+
the table to write
|
|
416
|
+
file
|
|
417
|
+
the file to write to
|
|
418
|
+
time_dict
|
|
419
|
+
the dictionary of timing information to update.
|
|
420
|
+
out_field
|
|
421
|
+
output field
|
|
422
|
+
out_detector
|
|
423
|
+
output detector name
|
|
424
|
+
wo_mode
|
|
425
|
+
the mode to pass to `lh5.write`
|
|
426
|
+
"""
|
|
427
|
+
if time_dict is not None:
|
|
428
|
+
start_time = time.time()
|
|
429
|
+
|
|
430
|
+
if wo_mode not in ("a", "append"):
|
|
431
|
+
lh5.write(
|
|
432
|
+
Struct({out_detector: hit_table}),
|
|
433
|
+
out_field,
|
|
434
|
+
file,
|
|
435
|
+
wo_mode=wo_mode,
|
|
436
|
+
)
|
|
437
|
+
else:
|
|
438
|
+
lh5.write(
|
|
439
|
+
hit_table,
|
|
440
|
+
f"{out_field}/{out_detector}",
|
|
441
|
+
file,
|
|
442
|
+
wo_mode=wo_mode,
|
|
443
|
+
)
|
|
444
|
+
if time_dict is not None:
|
|
445
|
+
time_dict.update_field("write", start_time)
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
def get_remage_detector_uids(h5file: str | Path) -> dict:
|
|
449
|
+
"""Get mapping of detector names to UIDs from a remage output file.
|
|
450
|
+
|
|
451
|
+
The remage LH5 output files contain a link structure that lets the user
|
|
452
|
+
access detector tables by UID. For example:
|
|
453
|
+
|
|
454
|
+
.. code-block:: text
|
|
455
|
+
|
|
456
|
+
├── stp · struct{det1,det2,optdet1,optdet2,scint1,scint2}
|
|
457
|
+
└── __by_uid__ · struct{det001,det002,det011,det012,det101,det102}
|
|
458
|
+
├── det001 -> /stp/scint1
|
|
459
|
+
├── det002 -> /stp/scint2
|
|
460
|
+
├── det011 -> /stp/det1
|
|
461
|
+
├── det012 -> /stp/det2
|
|
462
|
+
├── det101 -> /stp/optdet1
|
|
463
|
+
└── det102 -> /stp/optdet2
|
|
464
|
+
|
|
465
|
+
This function analyzes this structure and returns:
|
|
466
|
+
|
|
467
|
+
.. code-block:: text
|
|
468
|
+
|
|
469
|
+
{1: 'scint1',
|
|
470
|
+
2: 'scint2',
|
|
471
|
+
11: 'det1',
|
|
472
|
+
12: 'det2',
|
|
473
|
+
101: 'optdet1',
|
|
474
|
+
102: 'optdet2'g
|
|
475
|
+
|
|
476
|
+
Parameters
|
|
477
|
+
----------
|
|
478
|
+
h5file
|
|
479
|
+
path to remage output file.
|
|
480
|
+
"""
|
|
481
|
+
if isinstance(h5file, Path):
|
|
482
|
+
h5file = h5file.as_posix()
|
|
483
|
+
|
|
484
|
+
out = {}
|
|
485
|
+
with h5py.File(h5file, "r") as f:
|
|
486
|
+
g = f["/stp/__by_uid__"]
|
|
487
|
+
# loop over links
|
|
488
|
+
for key in g:
|
|
489
|
+
# is this a link?
|
|
490
|
+
link = g.get(key, getlink=True)
|
|
491
|
+
if isinstance(link, h5py.SoftLink):
|
|
492
|
+
m = re.fullmatch(r"det(\d+)", key)
|
|
493
|
+
if m is None:
|
|
494
|
+
msg = rf"'{key}' is not formatted as expected, i.e. 'det(\d+)', skipping"
|
|
495
|
+
log.warning(msg)
|
|
496
|
+
continue
|
|
497
|
+
|
|
498
|
+
# get the name of the link target without trailing groups (to
|
|
499
|
+
# i.e. remove /stp)
|
|
500
|
+
name = link.path.split("/")[-1]
|
|
501
|
+
|
|
502
|
+
out[int(m.group(1))] = name
|
|
503
|
+
return out
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: reboost
|
|
3
|
+
Version: 0.8.3
|
|
4
|
+
Summary: New LEGEND Monte-Carlo simulation post-processing
|
|
5
|
+
Author-email: Manuel Huber <info@manuelhu.de>, Toby Dixon <toby.dixon.23@ucl.ac.uk>, Luigi Pertoldi <gipert@pm.me>
|
|
6
|
+
Maintainer: The LEGEND Collaboration
|
|
7
|
+
License-Expression: GPL-3.0
|
|
8
|
+
Project-URL: Homepage, https://github.com/legend-exp/reboost
|
|
9
|
+
Project-URL: Bug Tracker, https://github.com/legend-exp/reboost/issues
|
|
10
|
+
Project-URL: Discussions, https://github.com/legend-exp/reboost/discussions
|
|
11
|
+
Project-URL: Changelog, https://github.com/legend-exp/reboost/releases
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Intended Audience :: Science/Research
|
|
14
|
+
Classifier: Operating System :: MacOS
|
|
15
|
+
Classifier: Operating System :: POSIX
|
|
16
|
+
Classifier: Operating System :: Unix
|
|
17
|
+
Classifier: Programming Language :: Python
|
|
18
|
+
Classifier: Programming Language :: Python :: 3
|
|
19
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
20
|
+
Classifier: Topic :: Scientific/Engineering
|
|
21
|
+
Requires-Python: >=3.10
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
License-File: LICENSE
|
|
24
|
+
Requires-Dist: hdf5plugin
|
|
25
|
+
Requires-Dist: colorlog
|
|
26
|
+
Requires-Dist: numpy
|
|
27
|
+
Requires-Dist: scipy
|
|
28
|
+
Requires-Dist: numba>=0.60
|
|
29
|
+
Requires-Dist: legend-pydataobj>=1.15.1
|
|
30
|
+
Requires-Dist: legend-pygeom-optics>=0.15.0
|
|
31
|
+
Requires-Dist: legend-pygeom-tools>=0.0.25
|
|
32
|
+
Requires-Dist: legend-pygeom-hpges
|
|
33
|
+
Requires-Dist: hist
|
|
34
|
+
Requires-Dist: dbetto
|
|
35
|
+
Requires-Dist: particle
|
|
36
|
+
Requires-Dist: pandas
|
|
37
|
+
Requires-Dist: matplotlib
|
|
38
|
+
Requires-Dist: pygama
|
|
39
|
+
Requires-Dist: pyg4ometry
|
|
40
|
+
Provides-Extra: all
|
|
41
|
+
Requires-Dist: reboost[docs,test]; extra == "all"
|
|
42
|
+
Provides-Extra: docs
|
|
43
|
+
Requires-Dist: furo; extra == "docs"
|
|
44
|
+
Requires-Dist: myst-parser; extra == "docs"
|
|
45
|
+
Requires-Dist: sphinx; extra == "docs"
|
|
46
|
+
Requires-Dist: sphinx-copybutton; extra == "docs"
|
|
47
|
+
Provides-Extra: test
|
|
48
|
+
Requires-Dist: pre-commit; extra == "test"
|
|
49
|
+
Requires-Dist: pytest>=6.0; extra == "test"
|
|
50
|
+
Requires-Dist: pytest-cov; extra == "test"
|
|
51
|
+
Requires-Dist: legend-pygeom-hpges; extra == "test"
|
|
52
|
+
Requires-Dist: pylegendtestdata>=0.6; extra == "test"
|
|
53
|
+
Dynamic: license-file
|
|
54
|
+
|
|
55
|
+
# reboost
|
|
56
|
+
|
|
57
|
+
[](https://pypi.org/project/reboost/)
|
|
58
|
+
[](https://anaconda.org/conda-forge/reboost)
|
|
59
|
+

|
|
60
|
+
[](https://github.com/legend-exp/reboost/actions)
|
|
61
|
+
[](https://github.com/pre-commit/pre-commit)
|
|
62
|
+
[](https://github.com/psf/black)
|
|
63
|
+
[](https://app.codecov.io/gh/legend-exp/reboost)
|
|
64
|
+

|
|
65
|
+

|
|
66
|
+

|
|
67
|
+
[](https://reboost.readthedocs.io)
|
|
68
|
+
|
|
69
|
+
_reboost_ is a package to post-process
|
|
70
|
+
[remage](https://remage.readthedocs.io/en/stable/) simulations. Post processing
|
|
71
|
+
is the step of applying a detector response model to the (idealised) _remage_ /
|
|
72
|
+
_Geant4_ simulations to ''boost" them allowing comparison to data.
|
|
73
|
+
|
|
74
|
+
_reboost_ provides tools to:
|
|
75
|
+
|
|
76
|
+
- apply a HPGe detector response model to the simulations,
|
|
77
|
+
- dedicated tools to generate optical maps,
|
|
78
|
+
- functionality to control the full post-processing chain with configuration
|
|
79
|
+
files.
|
|
80
|
+
|
|
81
|
+
For more information see our dedicated
|
|
82
|
+
[documentation](https://reboost.readthedocs.io/en/stable/)!
|