reboost 0.9.1__tar.gz → 0.10.0a1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {reboost-0.9.1/src/reboost.egg-info → reboost-0.10.0a1}/PKG-INFO +2 -2
- {reboost-0.9.1 → reboost-0.10.0a1}/pyproject.toml +1 -1
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/_version.py +3 -3
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/build_hit.py +13 -22
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/core.py +5 -1
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/hpge/psd.py +37 -36
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/hpge/surface.py +29 -16
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/math/functions.py +20 -31
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/math/stats.py +3 -6
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/optmap/evt.py +0 -23
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/shape/cluster.py +36 -48
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/shape/group.py +34 -8
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/units.py +75 -3
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/utils.py +1 -47
- {reboost-0.9.1 → reboost-0.10.0a1/src/reboost.egg-info}/PKG-INFO +2 -2
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost.egg-info/requires.txt +1 -1
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hit/configs/hit_config.yaml +1 -1
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hit/test_build_hit.py +41 -24
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hpge/test_current.py +65 -36
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hpge/test_dt_heuristic.py +27 -36
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hpge/test_r90.py +30 -3
- reboost-0.10.0a1/tests/hpge/test_surface.py +118 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/test_core.py +1 -1
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/test_math.py +8 -27
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/test_optmap.py +0 -11
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/test_shape.py +66 -40
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/test_units.py +22 -1
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/test_utils.py +7 -32
- reboost-0.9.1/tests/hpge/test_surface.py +0 -76
- {reboost-0.9.1 → reboost-0.10.0a1}/LICENSE +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/README.md +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/setup.cfg +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/__init__.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/__main__.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/build_evt.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/build_glm.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/cli.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/daq/__init__.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/daq/core.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/daq/utils.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/hpge/__init__.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/hpge/utils.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/iterator.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/log_utils.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/math/__init__.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/optmap/__init__.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/optmap/__main__.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/optmap/cli.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/optmap/convolve.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/optmap/create.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/optmap/mapview.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/optmap/numba_pdg.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/optmap/optmap.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/profile.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/shape/__init__.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/shape/reduction.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/spms/__init__.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost/spms/pe.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost.egg-info/SOURCES.txt +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost.egg-info/dependency_links.txt +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost.egg-info/entry_points.txt +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost.egg-info/not-zip-safe +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/src/reboost.egg-info/top_level.txt +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/conftest.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/evt/test_evt.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/glm/test_build_glm.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hit/configs/args.yaml +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hit/configs/basic.yaml +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hit/configs/foward_only.yaml +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hit/configs/geom.gdml +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hit/configs/pars.yaml +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hit/configs/reshape.yaml +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hit/configs/spms.yaml +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hpge/simulation/gammas.mac +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hpge/simulation/geometry.gdml +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hpge/simulation/make_dt_map.jl +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hpge/simulation/make_geom.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hpge/test_files/drift_time_maps.lh5 +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hpge/test_files/internal_electron.lh5 +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/hpge/test_hpge_map.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/spms/test_pe.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/test_cli.py +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/test_optmap_dets.gdml +0 -0
- {reboost-0.9.1 → reboost-0.10.0a1}/tests/test_profile.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: reboost
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.10.0a1
|
|
4
4
|
Summary: New LEGEND Monte-Carlo simulation post-processing
|
|
5
5
|
Author-email: Manuel Huber <info@manuelhu.de>, Toby Dixon <toby.dixon.23@ucl.ac.uk>, Luigi Pertoldi <gipert@pm.me>
|
|
6
6
|
Maintainer: The LEGEND Collaboration
|
|
@@ -26,7 +26,7 @@ Requires-Dist: colorlog
|
|
|
26
26
|
Requires-Dist: numpy
|
|
27
27
|
Requires-Dist: scipy
|
|
28
28
|
Requires-Dist: numba>=0.60
|
|
29
|
-
Requires-Dist: legend-pydataobj>=1.
|
|
29
|
+
Requires-Dist: legend-pydataobj>=1.17.2
|
|
30
30
|
Requires-Dist: legend-pygeom-optics>=0.15.0
|
|
31
31
|
Requires-Dist: legend-pygeom-tools>=0.0.26
|
|
32
32
|
Requires-Dist: legend-pygeom-hpges
|
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '0.
|
|
32
|
-
__version_tuple__ = version_tuple = (0,
|
|
31
|
+
__version__ = version = '0.10.0a1'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 10, 0, 'a1')
|
|
33
33
|
|
|
34
|
-
__commit_id__ = commit_id = '
|
|
34
|
+
__commit_id__ = commit_id = 'g6793c98a5'
|
|
@@ -51,14 +51,15 @@ A :func:`build_hit` to parse the following configuration file:
|
|
|
51
51
|
|
|
52
52
|
t0: ak.fill_none(ak.firsts(HITS.time, axis=-1), np.nan)
|
|
53
53
|
|
|
54
|
-
evtid: ak.fill_none(ak.firsts(HITS.
|
|
54
|
+
evtid: ak.fill_none(ak.firsts(HITS.evtid, axis=-1), np.nan)
|
|
55
55
|
|
|
56
56
|
# distance to the nplus surface in mm
|
|
57
57
|
distance_to_nplus_surface_mm: reboost.hpge.distance_to_surface(
|
|
58
|
-
HITS.
|
|
58
|
+
HITS.xloc, HITS.yloc, HITS.zloc,
|
|
59
59
|
DETECTOR_OBJECTS.pyobj,
|
|
60
60
|
DETECTOR_OBJECTS.phyvol.position.eval(),
|
|
61
|
-
surface_type='nplus'
|
|
61
|
+
surface_type='nplus',
|
|
62
|
+
unit='m')
|
|
62
63
|
|
|
63
64
|
# activness based on FCCD (no TL)
|
|
64
65
|
activeness: ak.where(
|
|
@@ -75,7 +76,7 @@ A :func:`build_hit` to parse the following configuration file:
|
|
|
75
76
|
)
|
|
76
77
|
|
|
77
78
|
# summed energy of the hit accounting for activeness
|
|
78
|
-
energy_raw: ak.sum(HITS.
|
|
79
|
+
energy_raw: ak.sum(HITS.edep * HITS.activeness, axis=-1)
|
|
79
80
|
|
|
80
81
|
# energy with smearing
|
|
81
82
|
energy: reboost.math.sample_convolve(
|
|
@@ -92,7 +93,7 @@ A :func:`build_hit` to parse the following configuration file:
|
|
|
92
93
|
)
|
|
93
94
|
|
|
94
95
|
# example of low level reduction on clusters
|
|
95
|
-
energy_clustered: ak.sum(ak.unflatten(HITS.
|
|
96
|
+
energy_clustered: ak.sum(ak.unflatten(HITS.edep, HITS.clusters_lengths), axis=-1)
|
|
96
97
|
|
|
97
98
|
# example of using a reboost helper
|
|
98
99
|
steps_clustered: reboost.shape.reduction.energy_weighted_average(HITS, HITS.clusters_lengths)
|
|
@@ -115,7 +116,7 @@ A :func:`build_hit` to parse the following configuration file:
|
|
|
115
116
|
- num_scint_ph_lar
|
|
116
117
|
|
|
117
118
|
operations:
|
|
118
|
-
tot_edep_wlsr: ak.sum(HITS.edep[np.abs(HITS.zloc) <
|
|
119
|
+
tot_edep_wlsr: ak.sum(HITS.edep[np.abs(HITS.zloc) < 3], axis=-1)
|
|
119
120
|
|
|
120
121
|
- name: spms
|
|
121
122
|
|
|
@@ -180,6 +181,8 @@ from dbetto import AttrsDict
|
|
|
180
181
|
from lgdo import lh5
|
|
181
182
|
from lgdo.lh5.exceptions import LH5EncodeError
|
|
182
183
|
|
|
184
|
+
from reboost import units
|
|
185
|
+
|
|
183
186
|
from . import core, utils
|
|
184
187
|
from .iterator import GLMIterator
|
|
185
188
|
from .profile import ProfileDict
|
|
@@ -323,7 +326,7 @@ def build_hit(
|
|
|
323
326
|
if time_dict is not None:
|
|
324
327
|
start_time = time.time()
|
|
325
328
|
|
|
326
|
-
ak_obj = stps.view_as("ak")
|
|
329
|
+
ak_obj = stps.view_as("ak", with_units=True)
|
|
327
330
|
|
|
328
331
|
if time_dict is not None:
|
|
329
332
|
time_dict[proc_name].update_field("conv", start_time)
|
|
@@ -334,6 +337,7 @@ def build_hit(
|
|
|
334
337
|
expression=proc_group["hit_table_layout"],
|
|
335
338
|
time_dict=time_dict[proc_name],
|
|
336
339
|
)
|
|
340
|
+
|
|
337
341
|
else:
|
|
338
342
|
hit_table_layouted = copy.deepcopy(stps)
|
|
339
343
|
|
|
@@ -349,9 +353,6 @@ def build_hit(
|
|
|
349
353
|
if out_detector not in output_tables and files.hit[file_idx] is None:
|
|
350
354
|
output_tables[out_detector] = None
|
|
351
355
|
|
|
352
|
-
# get the attributes
|
|
353
|
-
attrs = utils.copy_units(stps)
|
|
354
|
-
|
|
355
356
|
# if we have more than one output detector, make an independent copy.
|
|
356
357
|
hit_table = (
|
|
357
358
|
copy.deepcopy(hit_table_layouted)
|
|
@@ -376,9 +377,6 @@ def build_hit(
|
|
|
376
377
|
hit_table, outputs=proc_group["outputs"]
|
|
377
378
|
)
|
|
378
379
|
|
|
379
|
-
# assign units in the output table
|
|
380
|
-
hit_table = utils.assign_units(hit_table, attrs)
|
|
381
|
-
|
|
382
380
|
# now write
|
|
383
381
|
if files.hit[file_idx] is not None:
|
|
384
382
|
# get modes to write with
|
|
@@ -443,12 +441,7 @@ def build_hit(
|
|
|
443
441
|
def _evaluate_operation(
|
|
444
442
|
hit_table, field: str, info: str | dict, local_dict: dict, time_dict: ProfileDict
|
|
445
443
|
) -> None:
|
|
446
|
-
if isinstance(info, str)
|
|
447
|
-
expression = info
|
|
448
|
-
units = None
|
|
449
|
-
else:
|
|
450
|
-
expression = info["expression"]
|
|
451
|
-
units = info.get("units", None)
|
|
444
|
+
expression = info if isinstance(info, str) else info["expression"]
|
|
452
445
|
|
|
453
446
|
# evaluate the expression
|
|
454
447
|
col = core.evaluate_output_column(
|
|
@@ -459,8 +452,6 @@ def _evaluate_operation(
|
|
|
459
452
|
time_dict=time_dict,
|
|
460
453
|
name=field,
|
|
461
454
|
)
|
|
462
|
-
|
|
463
|
-
if units is not None:
|
|
464
|
-
col.attrs["units"] = units
|
|
455
|
+
units.move_units_to_flattened_data(col)
|
|
465
456
|
|
|
466
457
|
core.add_field_with_nesting(hit_table, field, col)
|
|
@@ -11,7 +11,7 @@ from dbetto import AttrsDict
|
|
|
11
11
|
from lgdo import lh5
|
|
12
12
|
from lgdo.types import LGDO, Table
|
|
13
13
|
|
|
14
|
-
from . import utils
|
|
14
|
+
from . import units, utils
|
|
15
15
|
from .profile import ProfileDict
|
|
16
16
|
|
|
17
17
|
log = logging.getLogger(__name__)
|
|
@@ -456,6 +456,10 @@ def evaluate_hit_table_layout(
|
|
|
456
456
|
|
|
457
457
|
res = eval(group_func, globs, locs)
|
|
458
458
|
|
|
459
|
+
if isinstance(res, Table):
|
|
460
|
+
for data in res.values():
|
|
461
|
+
units.move_units_to_flattened_data(data)
|
|
462
|
+
|
|
459
463
|
if time_dict is not None:
|
|
460
464
|
time_dict.update_field(name="hit_layout", time_start=time_start)
|
|
461
465
|
|
|
@@ -18,7 +18,7 @@ from .utils import HPGePulseShapeLibrary, HPGeRZField
|
|
|
18
18
|
log = logging.getLogger(__name__)
|
|
19
19
|
|
|
20
20
|
|
|
21
|
-
def r90(edep: ak.Array, xloc: ak.Array, yloc: ak.Array, zloc: ak.Array) -> Array:
|
|
21
|
+
def r90(edep: ak.Array, xloc: ak.Array, yloc: ak.Array, zloc: ak.Array) -> ak.Array:
|
|
22
22
|
"""R90 HPGe pulse shape heuristic.
|
|
23
23
|
|
|
24
24
|
Parameters
|
|
@@ -31,18 +31,22 @@ def r90(edep: ak.Array, xloc: ak.Array, yloc: ak.Array, zloc: ak.Array) -> Array
|
|
|
31
31
|
array of y coordinate position.
|
|
32
32
|
zloc
|
|
33
33
|
array of z coordinate position.
|
|
34
|
+
|
|
35
|
+
Returns
|
|
36
|
+
-------
|
|
37
|
+
calculated r90 for each hit
|
|
34
38
|
"""
|
|
39
|
+
pos = [units.units_conv_ak(pos, "mm") for pos in [xloc, yloc, zloc]]
|
|
40
|
+
|
|
41
|
+
edep = units.units_conv_ak(edep, "keV")
|
|
42
|
+
|
|
35
43
|
tot_energy = ak.sum(edep, axis=-1, keepdims=True)
|
|
36
44
|
|
|
37
45
|
def eweight_mean(field, energy):
|
|
38
46
|
return ak.sum(energy * field, axis=-1, keepdims=True) / tot_energy
|
|
39
47
|
|
|
40
48
|
# Compute distance of each edep to the weighted mean
|
|
41
|
-
dist = np.sqrt(
|
|
42
|
-
(xloc - eweight_mean(edep, xloc)) ** 2
|
|
43
|
-
+ (yloc - eweight_mean(edep, yloc)) ** 2
|
|
44
|
-
+ (zloc - eweight_mean(edep, zloc)) ** 2
|
|
45
|
-
)
|
|
49
|
+
dist = np.sqrt(sum((p - eweight_mean(edep, p)) ** 2 for p in pos))
|
|
46
50
|
|
|
47
51
|
# Sort distances and corresponding edep within each event
|
|
48
52
|
sorted_indices = ak.argsort(dist, axis=-1)
|
|
@@ -76,16 +80,16 @@ def r90(edep: ak.Array, xloc: ak.Array, yloc: ak.Array, zloc: ak.Array) -> Array
|
|
|
76
80
|
r90_indices = ak.argmax(cumsum_edep_corrected >= threshold, axis=-1, keepdims=True)
|
|
77
81
|
r90 = sorted_dist[r90_indices]
|
|
78
82
|
|
|
79
|
-
return Array(ak.flatten(r90)
|
|
83
|
+
return units.attach_units(ak.Array(ak.flatten(r90)), "mm")
|
|
80
84
|
|
|
81
85
|
|
|
82
86
|
def drift_time(
|
|
83
|
-
xloc:
|
|
84
|
-
yloc:
|
|
85
|
-
zloc:
|
|
87
|
+
xloc: ak.Array | VectorOfVectors,
|
|
88
|
+
yloc: ak.Array | VectorOfVectors,
|
|
89
|
+
zloc: ak.Array | VectorOfVectors,
|
|
86
90
|
dt_map: HPGeRZField,
|
|
87
91
|
coord_offset: pint.Quantity | pyg4ometry.gdml.Position = (0, 0, 0) * u.m,
|
|
88
|
-
) ->
|
|
92
|
+
) -> ak.Array:
|
|
89
93
|
"""Calculates drift times for each step (cluster) in an HPGe detector.
|
|
90
94
|
|
|
91
95
|
Parameters
|
|
@@ -106,7 +110,7 @@ def drift_time(
|
|
|
106
110
|
# sanitize coord_offset
|
|
107
111
|
coord_offset = units.pg4_to_pint(coord_offset)
|
|
108
112
|
|
|
109
|
-
# unit handling (for
|
|
113
|
+
# unit handling (.r_units) for data in (xloc, yloc)]
|
|
110
114
|
xu, yu = [units.units_convfact(data, dt_map.r_units) for data in (xloc, yloc)]
|
|
111
115
|
zu = units.units_convfact(zloc, dt_map.z_units)
|
|
112
116
|
|
|
@@ -122,7 +126,6 @@ def drift_time(
|
|
|
122
126
|
|
|
123
127
|
return None
|
|
124
128
|
|
|
125
|
-
# transform coordinates
|
|
126
129
|
xloc = xu * xloc - coord_offset[0].to(dt_map.r_units).m
|
|
127
130
|
yloc = yu * yloc - coord_offset[1].to(dt_map.r_units).m
|
|
128
131
|
zloc = zu * zloc - coord_offset[2].to(dt_map.z_units).m
|
|
@@ -133,16 +136,13 @@ def drift_time(
|
|
|
133
136
|
np.sqrt(xloc**2 + yloc**2),
|
|
134
137
|
zloc,
|
|
135
138
|
)
|
|
136
|
-
return
|
|
137
|
-
dt_values,
|
|
138
|
-
attrs={"units": units.unit_to_lh5_attr(dt_map.φ_units)},
|
|
139
|
-
)
|
|
139
|
+
return units.attach_units(ak.Array(dt_values), units.unit_to_lh5_attr(dt_map.φ_units))
|
|
140
140
|
|
|
141
141
|
|
|
142
142
|
def drift_time_heuristic(
|
|
143
|
-
drift_time:
|
|
144
|
-
edep:
|
|
145
|
-
) -> Array:
|
|
143
|
+
drift_time: ak.Array | VectorOfVectors,
|
|
144
|
+
edep: ak.Array | VectorOfVectors,
|
|
145
|
+
) -> ak.Array:
|
|
146
146
|
"""HPGe drift-time-based pulse-shape heuristic.
|
|
147
147
|
|
|
148
148
|
See :func:`_drift_time_heuristic_impl` for a description of the algorithm.
|
|
@@ -160,11 +160,11 @@ def drift_time_heuristic(
|
|
|
160
160
|
edep, e_units = units.unwrap_lgdo(edep)
|
|
161
161
|
|
|
162
162
|
# we want to attach the right units to the dt heuristic, if possible
|
|
163
|
-
attrs = {}
|
|
164
163
|
if t_units is not None and e_units is not None:
|
|
165
|
-
|
|
164
|
+
unit = units.unit_to_lh5_attr(t_units / e_units)
|
|
165
|
+
return units.attach_units(ak.Array(_drift_time_heuristic_impl(drift_time, edep)), unit)
|
|
166
166
|
|
|
167
|
-
return Array(_drift_time_heuristic_impl(drift_time, edep)
|
|
167
|
+
return ak.Array(_drift_time_heuristic_impl(drift_time, edep))
|
|
168
168
|
|
|
169
169
|
|
|
170
170
|
@numba.njit(cache=True)
|
|
@@ -828,15 +828,15 @@ def maximum_current(
|
|
|
828
828
|
activeness_surface: ArrayLike | None = None,
|
|
829
829
|
surface_step_in_um: float = 10,
|
|
830
830
|
return_mode: str = "current",
|
|
831
|
-
) -> Array:
|
|
831
|
+
) -> ak.Array:
|
|
832
832
|
"""Estimate the maximum current in the HPGe detector based on :func:`_estimate_current_impl`.
|
|
833
833
|
|
|
834
834
|
Parameters
|
|
835
835
|
----------
|
|
836
836
|
edep
|
|
837
|
-
|
|
837
|
+
Energies for each step.
|
|
838
838
|
drift_time
|
|
839
|
-
|
|
839
|
+
Drift times for each step.
|
|
840
840
|
dist_to_nplus
|
|
841
841
|
Distance to n-plus electrode, only needed if surface heuristics are enabled.
|
|
842
842
|
r
|
|
@@ -844,7 +844,7 @@ def maximum_current(
|
|
|
844
844
|
z
|
|
845
845
|
z coordinate (only needed if a full PSS library is used).
|
|
846
846
|
template
|
|
847
|
-
|
|
847
|
+
Array of the bulk pulse template
|
|
848
848
|
times
|
|
849
849
|
time-stamps for the bulk pulse template
|
|
850
850
|
fccd_in_um
|
|
@@ -864,12 +864,12 @@ def maximum_current(
|
|
|
864
864
|
-------
|
|
865
865
|
An Array of the maximum current/ time / energy for each hit.
|
|
866
866
|
"""
|
|
867
|
-
#
|
|
868
|
-
drift_time
|
|
869
|
-
edep
|
|
870
|
-
dist_to_nplus
|
|
871
|
-
r
|
|
872
|
-
z
|
|
867
|
+
# convert to target units
|
|
868
|
+
drift_time = units.units_conv_ak(drift_time, "ns")
|
|
869
|
+
edep = units.units_conv_ak(edep, "keV")
|
|
870
|
+
dist_to_nplus = units.units_conv_ak(dist_to_nplus, "mm")
|
|
871
|
+
r = units.units_conv_ak(r, "mm")
|
|
872
|
+
z = units.units_conv_ak(z, "mm")
|
|
873
873
|
|
|
874
874
|
# prepare inputs for surface sims
|
|
875
875
|
include_surface_effects, dist_to_nplus, templates_surface, activeness_surface = (
|
|
@@ -901,11 +901,12 @@ def maximum_current(
|
|
|
901
901
|
|
|
902
902
|
# return
|
|
903
903
|
if return_mode == "max_time":
|
|
904
|
-
return Array(time)
|
|
904
|
+
return units.attach_units(ak.Array(time), "ns")
|
|
905
905
|
if return_mode == "current":
|
|
906
|
-
|
|
906
|
+
# current has no unit (depends on the template)
|
|
907
|
+
return ak.Array(curr)
|
|
907
908
|
if return_mode == "energy":
|
|
908
|
-
return Array(energy)
|
|
909
|
+
return units.attach_units(ak.Array(energy), "keV")
|
|
909
910
|
|
|
910
911
|
msg = f"Return mode {return_mode} is not implemented."
|
|
911
912
|
raise ValueError(msg)
|
|
@@ -5,27 +5,32 @@ import logging
|
|
|
5
5
|
import awkward as ak
|
|
6
6
|
import numba
|
|
7
7
|
import numpy as np
|
|
8
|
+
import pint
|
|
9
|
+
import pyg4ometry
|
|
8
10
|
import pygeomhpges
|
|
9
11
|
from lgdo import VectorOfVectors
|
|
10
12
|
from lgdo.types import LGDO
|
|
11
|
-
from numpy.typing import
|
|
13
|
+
from numpy.typing import NDArray
|
|
12
14
|
from scipy import stats
|
|
13
15
|
|
|
16
|
+
from reboost.units import ureg as u
|
|
17
|
+
|
|
18
|
+
from .. import units
|
|
19
|
+
|
|
14
20
|
log = logging.getLogger(__name__)
|
|
15
21
|
|
|
16
22
|
|
|
17
23
|
def distance_to_surface(
|
|
18
|
-
positions_x: VectorOfVectors,
|
|
19
|
-
positions_y: VectorOfVectors,
|
|
20
|
-
positions_z: VectorOfVectors,
|
|
24
|
+
positions_x: ak.Array | VectorOfVectors,
|
|
25
|
+
positions_y: ak.Array | VectorOfVectors,
|
|
26
|
+
positions_z: ak.Array | VectorOfVectors,
|
|
21
27
|
hpge: pygeomhpges.base.HPGe,
|
|
22
|
-
det_pos:
|
|
28
|
+
det_pos: pint.Quantity | pyg4ometry.gdml.Position | tuple = (0, 0, 0) * u.m,
|
|
23
29
|
*,
|
|
24
30
|
surface_type: str | None = None,
|
|
25
|
-
|
|
26
|
-
distances_precompute: VectorOfVectors | None = None,
|
|
31
|
+
distances_precompute: ak.Array | None = None,
|
|
27
32
|
precompute_cutoff: float | None = None,
|
|
28
|
-
) ->
|
|
33
|
+
) -> ak.Array:
|
|
29
34
|
"""Computes the distance from each step to the detector surface.
|
|
30
35
|
|
|
31
36
|
The calculation can be performed for any surface type `nplus`, `pplus`,
|
|
@@ -44,32 +49,36 @@ def distance_to_surface(
|
|
|
44
49
|
hpge
|
|
45
50
|
HPGe object.
|
|
46
51
|
det_pos
|
|
47
|
-
position of the detector origin, must be a 3 component array corresponding to `(x,y,z)`.
|
|
52
|
+
position of the detector origin, must be a 3 component array corresponding to `(x,y,z)`. If no units
|
|
53
|
+
are specified mm is assumed.
|
|
48
54
|
surface_type
|
|
49
55
|
string of which surface to use, can be `nplus`, `pplus` `passive` or None (in which case the distance to any surface is calculated).
|
|
50
56
|
unit
|
|
51
57
|
unit for the hit tier positions table.
|
|
52
58
|
distances_precompute
|
|
53
|
-
|
|
59
|
+
Distance to any surface computed by remage.
|
|
54
60
|
precompute_cutoff
|
|
55
61
|
cutoff on distances_precompute to not compute the distance for (in mm)
|
|
56
62
|
|
|
57
63
|
Returns
|
|
58
64
|
-------
|
|
59
|
-
|
|
65
|
+
Distance to the surface for each hit with the same shape as `positions_x/y/z`.
|
|
60
66
|
|
|
61
67
|
Note
|
|
62
68
|
----
|
|
63
69
|
`positions_x/positions_y/positions_z` must all have the same shape.
|
|
64
70
|
"""
|
|
65
|
-
factor = np.array([1, 100, 1000])[unit == np.array(["mm", "cm", "m"])][0]
|
|
66
|
-
|
|
67
71
|
# compute local positions
|
|
68
72
|
pos = []
|
|
69
73
|
sizes = []
|
|
70
74
|
|
|
75
|
+
if not isinstance(det_pos, pint.Quantity | pyg4ometry.gdml.Position):
|
|
76
|
+
det_pos = det_pos * u.mm
|
|
77
|
+
|
|
78
|
+
det_pos = units.pg4_to_pint(det_pos)
|
|
79
|
+
|
|
71
80
|
for idx, pos_tmp in enumerate([positions_x, positions_y, positions_z]):
|
|
72
|
-
local_pos_tmp =
|
|
81
|
+
local_pos_tmp = units.units_conv_ak(pos_tmp, "mm") - det_pos[idx].to("mm").m
|
|
73
82
|
local_pos_flat_tmp = ak.flatten(local_pos_tmp).to_numpy()
|
|
74
83
|
pos.append(local_pos_flat_tmp)
|
|
75
84
|
sizes.append(ak.num(local_pos_tmp, axis=1))
|
|
@@ -106,7 +115,7 @@ def distance_to_surface(
|
|
|
106
115
|
local_positions[indices], surface_indices=surface_indices
|
|
107
116
|
)
|
|
108
117
|
|
|
109
|
-
return
|
|
118
|
+
return units.attach_units(ak.Array(ak.unflatten(distances, size)), "mm")
|
|
110
119
|
|
|
111
120
|
|
|
112
121
|
@numba.njit(cache=True)
|
|
@@ -222,7 +231,7 @@ def get_surface_response(
|
|
|
222
231
|
factor: float = 0.29,
|
|
223
232
|
nsteps: int = 10000,
|
|
224
233
|
delta_x: float = 10,
|
|
225
|
-
):
|
|
234
|
+
) -> NDArray:
|
|
226
235
|
"""Extract the surface response current pulse based on diffusion.
|
|
227
236
|
|
|
228
237
|
This extracts the amount of charge arrived (cumulative) at the p-n
|
|
@@ -248,6 +257,10 @@ def get_surface_response(
|
|
|
248
257
|
the number of time steps.
|
|
249
258
|
delta_x
|
|
250
259
|
the width of each position bin.
|
|
260
|
+
|
|
261
|
+
Returns
|
|
262
|
+
-------
|
|
263
|
+
2D array of the amount of charge arriving at the p-n junction as a function of time for each depth.
|
|
251
264
|
"""
|
|
252
265
|
# number of position steps
|
|
253
266
|
nx = int(fccd / delta_x)
|
|
@@ -5,14 +5,13 @@ import logging
|
|
|
5
5
|
import awkward as ak
|
|
6
6
|
import numpy as np
|
|
7
7
|
from lgdo import Array, VectorOfVectors
|
|
8
|
-
|
|
8
|
+
|
|
9
|
+
from .. import units
|
|
9
10
|
|
|
10
11
|
log = logging.getLogger(__name__)
|
|
11
12
|
|
|
12
13
|
|
|
13
|
-
def piecewise_linear_activeness(
|
|
14
|
-
distances: VectorOfVectors | ak.Array, fccd: float, dlf: float
|
|
15
|
-
) -> VectorOfVectors | Array:
|
|
14
|
+
def piecewise_linear_activeness(distances: ak.Array, fccd_in_mm: float, dlf: float) -> ak.Array:
|
|
16
15
|
r"""Piecewise linear HPGe activeness model.
|
|
17
16
|
|
|
18
17
|
Based on:
|
|
@@ -38,11 +37,10 @@ def piecewise_linear_activeness(
|
|
|
38
37
|
Parameters
|
|
39
38
|
----------
|
|
40
39
|
distances
|
|
41
|
-
the distance from each step to the detector surface.
|
|
42
|
-
`numpy` or `awkward` array, or a LGDO `VectorOfVectors` or `Array`. The computation
|
|
40
|
+
the distance from each step to the detector surface. The computation
|
|
43
41
|
is performed for each element and the shape preserved in the output.
|
|
44
42
|
|
|
45
|
-
|
|
43
|
+
fccd_in_mm
|
|
46
44
|
the value of the FCCD
|
|
47
45
|
dlf
|
|
48
46
|
the fraction of the FCCD which is fully inactive.
|
|
@@ -52,14 +50,9 @@ def piecewise_linear_activeness(
|
|
|
52
50
|
a :class:`VectorOfVectors` or :class:`Array` of the activeness
|
|
53
51
|
"""
|
|
54
52
|
# convert to ak
|
|
55
|
-
|
|
56
|
-
distances_ak = distances.view_as("ak")
|
|
57
|
-
elif not isinstance(distances, ak.Array):
|
|
58
|
-
distances_ak = ak.Array(distances)
|
|
59
|
-
else:
|
|
60
|
-
distances_ak = distances
|
|
53
|
+
distances_ak = units.units_conv_ak(distances, "mm")
|
|
61
54
|
|
|
62
|
-
dl =
|
|
55
|
+
dl = fccd_in_mm * dlf
|
|
63
56
|
distances_flat = (
|
|
64
57
|
ak.flatten(distances_ak).to_numpy() if distances_ak.ndim > 1 else distances_ak.to_numpy()
|
|
65
58
|
)
|
|
@@ -68,24 +61,26 @@ def piecewise_linear_activeness(
|
|
|
68
61
|
results = np.full_like(distances_flat, np.nan, dtype=np.float64)
|
|
69
62
|
lengths = ak.num(distances_ak) if distances_ak.ndim > 1 else len(distances_ak)
|
|
70
63
|
|
|
71
|
-
mask1 = (distances_flat >
|
|
64
|
+
mask1 = (distances_flat > fccd_in_mm) | np.isnan(distances_flat)
|
|
72
65
|
mask2 = (distances_flat <= dl) & (~mask1)
|
|
73
66
|
mask3 = ~(mask1 | mask2)
|
|
74
67
|
|
|
75
68
|
# assign the values
|
|
76
69
|
results[mask1] = 1
|
|
77
70
|
results[mask2] = 0
|
|
78
|
-
results[mask3] = (distances_flat[mask3] - dl) / (
|
|
71
|
+
results[mask3] = (distances_flat[mask3] - dl) / (fccd_in_mm - dl)
|
|
79
72
|
|
|
80
73
|
# reshape
|
|
81
74
|
results = ak.unflatten(ak.Array(results), lengths) if distances_ak.ndim > 1 else results
|
|
82
75
|
|
|
83
|
-
|
|
76
|
+
results = ak.Array(results)
|
|
77
|
+
|
|
78
|
+
return units.attach_units(results, "mm")
|
|
84
79
|
|
|
85
80
|
|
|
86
81
|
def vectorised_active_energy(
|
|
87
|
-
distances:
|
|
88
|
-
edep:
|
|
82
|
+
distances: ak.Array,
|
|
83
|
+
edep: ak.Array,
|
|
89
84
|
fccd: float | list,
|
|
90
85
|
dlf: float | list,
|
|
91
86
|
) -> VectorOfVectors | Array:
|
|
@@ -115,7 +110,7 @@ def vectorised_active_energy(
|
|
|
115
110
|
|
|
116
111
|
Returns
|
|
117
112
|
-------
|
|
118
|
-
|
|
113
|
+
Activeness for each set of parameters
|
|
119
114
|
"""
|
|
120
115
|
# add checks on fccd, dlf
|
|
121
116
|
fccd = np.array(fccd)
|
|
@@ -133,20 +128,14 @@ def vectorised_active_energy(
|
|
|
133
128
|
|
|
134
129
|
dl = fccd * dlf
|
|
135
130
|
|
|
136
|
-
def _convert(field):
|
|
131
|
+
def _convert(field, unit):
|
|
137
132
|
# convert to ak
|
|
138
|
-
|
|
139
|
-
field_ak = field.view_as("ak")
|
|
140
|
-
elif not isinstance(field, ak.Array):
|
|
141
|
-
field_ak = ak.Array(field)
|
|
142
|
-
else:
|
|
143
|
-
msg = f"{field} must be an awkward array or VectorOfVectors"
|
|
144
|
-
raise TypeError(msg)
|
|
133
|
+
field_ak = units.units_conv_ak(field, unit)
|
|
145
134
|
|
|
146
135
|
return field_ak, ak.flatten(field_ak).to_numpy()[:, np.newaxis]
|
|
147
136
|
|
|
148
|
-
distances_ak, distances_flat = _convert(distances)
|
|
149
|
-
_, edep_flat = _convert(edep)
|
|
137
|
+
distances_ak, distances_flat = _convert(distances, "mm")
|
|
138
|
+
_, edep_flat = _convert(edep, "keV")
|
|
150
139
|
runs = ak.num(distances_ak, axis=-1)
|
|
151
140
|
|
|
152
141
|
# vectorise fccd or tl
|
|
@@ -172,4 +161,4 @@ def vectorised_active_energy(
|
|
|
172
161
|
|
|
173
162
|
energy = ak.sum(ak.unflatten(results * edep_flat, runs), axis=-2)
|
|
174
163
|
|
|
175
|
-
return
|
|
164
|
+
return units.attach_units(energy, "keV")
|
|
@@ -6,7 +6,6 @@ from collections.abc import Callable
|
|
|
6
6
|
import awkward as ak
|
|
7
7
|
import numpy as np
|
|
8
8
|
from lgdo import Array
|
|
9
|
-
from numpy.typing import ArrayLike
|
|
10
9
|
|
|
11
10
|
log = logging.getLogger(__name__)
|
|
12
11
|
|
|
@@ -72,7 +71,7 @@ def apply_energy_resolution(
|
|
|
72
71
|
return ak.unflatten(energies_flat_smear, num)
|
|
73
72
|
|
|
74
73
|
|
|
75
|
-
def gaussian_sample(mu:
|
|
74
|
+
def gaussian_sample(mu: ak.Array, sigma: ak.Array | float, *, seed: int | None = None) -> ak.Array:
|
|
76
75
|
r"""Generate samples from a gaussian.
|
|
77
76
|
|
|
78
77
|
Based on:
|
|
@@ -99,9 +98,7 @@ def gaussian_sample(mu: ArrayLike, sigma: ArrayLike | float, *, seed: int | None
|
|
|
99
98
|
"""
|
|
100
99
|
# convert inputs
|
|
101
100
|
|
|
102
|
-
if isinstance(mu, Array):
|
|
103
|
-
mu = mu.view_as("np")
|
|
104
|
-
elif isinstance(mu, ak.Array):
|
|
101
|
+
if isinstance(mu, ak.Array):
|
|
105
102
|
mu = mu.to_numpy()
|
|
106
103
|
elif not isinstance(mu, np.ndarray):
|
|
107
104
|
mu = np.array(mu)
|
|
@@ -116,4 +113,4 @@ def gaussian_sample(mu: ArrayLike, sigma: ArrayLike | float, *, seed: int | None
|
|
|
116
113
|
|
|
117
114
|
rng = np.random.default_rng(seed=seed) # Create a random number generator
|
|
118
115
|
|
|
119
|
-
return Array(rng.normal(loc=mu, scale=sigma))
|
|
116
|
+
return ak.Array(rng.normal(loc=mu, scale=sigma))
|
|
@@ -3,10 +3,8 @@ from __future__ import annotations
|
|
|
3
3
|
import logging
|
|
4
4
|
from collections import OrderedDict
|
|
5
5
|
from collections.abc import Generator, Iterable
|
|
6
|
-
from pathlib import Path
|
|
7
6
|
|
|
8
7
|
import numpy as np
|
|
9
|
-
from lgdo import lh5
|
|
10
8
|
from lgdo.lh5 import LH5Iterator
|
|
11
9
|
from lgdo.types import Table
|
|
12
10
|
|
|
@@ -105,27 +103,6 @@ def generate_optmap_evt(
|
|
|
105
103
|
assert had_last_chunk, "did not reach last chunk in optmap-evt building"
|
|
106
104
|
|
|
107
105
|
|
|
108
|
-
def build_optmap_evt(
|
|
109
|
-
lh5_in_file: str, lh5_out_file: str, detectors: Iterable[str | int], buffer_len: int = int(5e6)
|
|
110
|
-
) -> None:
|
|
111
|
-
"""Create a faster map for lookup of the hits in each detector, for each primary event."""
|
|
112
|
-
lh5_out_file = Path(lh5_out_file)
|
|
113
|
-
lh5_out_file_tmp = lh5_out_file.with_stem(".evt-tmp." + lh5_out_file.stem)
|
|
114
|
-
if lh5_out_file_tmp.exists():
|
|
115
|
-
msg = f"temporary output file {lh5_out_file_tmp} already exists"
|
|
116
|
-
raise RuntimeError(msg)
|
|
117
|
-
|
|
118
|
-
for vert_it_count, chunk in enumerate(generate_optmap_evt(lh5_in_file, detectors, buffer_len)):
|
|
119
|
-
log.info("store evt file %s (%d)", lh5_out_file_tmp, vert_it_count - 1)
|
|
120
|
-
lh5.write(Table(chunk), name=EVT_TABLE_NAME, lh5_file=lh5_out_file_tmp, wo_mode="append")
|
|
121
|
-
|
|
122
|
-
# after finishing the output file, rename to the actual output file name.
|
|
123
|
-
if lh5_out_file.exists():
|
|
124
|
-
msg = f"output file {lh5_out_file_tmp} already exists after writing tmp output file"
|
|
125
|
-
raise RuntimeError(msg)
|
|
126
|
-
lh5_out_file_tmp.rename(lh5_out_file)
|
|
127
|
-
|
|
128
|
-
|
|
129
106
|
def get_optical_detectors_from_geom(geom_fn) -> dict[int, str]:
|
|
130
107
|
import pyg4ometry
|
|
131
108
|
import pygeomtools
|