legend-dataflow-scripts 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- legend_dataflow_scripts-0.1.0.dist-info/METADATA +57 -0
- legend_dataflow_scripts-0.1.0.dist-info/RECORD +36 -0
- legend_dataflow_scripts-0.1.0.dist-info/WHEEL +5 -0
- legend_dataflow_scripts-0.1.0.dist-info/entry_points.txt +18 -0
- legend_dataflow_scripts-0.1.0.dist-info/top_level.txt +1 -0
- legenddataflowscripts/__init__.py +17 -0
- legenddataflowscripts/_version.py +21 -0
- legenddataflowscripts/par/__init__.py +0 -0
- legenddataflowscripts/par/geds/__init__.py +0 -0
- legenddataflowscripts/par/geds/dsp/__init__.py +0 -0
- legenddataflowscripts/par/geds/dsp/dplms.py +145 -0
- legenddataflowscripts/par/geds/dsp/eopt.py +398 -0
- legenddataflowscripts/par/geds/dsp/evtsel.py +400 -0
- legenddataflowscripts/par/geds/dsp/nopt.py +120 -0
- legenddataflowscripts/par/geds/dsp/pz.py +217 -0
- legenddataflowscripts/par/geds/dsp/svm.py +28 -0
- legenddataflowscripts/par/geds/dsp/svm_build.py +69 -0
- legenddataflowscripts/par/geds/hit/__init__.py +0 -0
- legenddataflowscripts/par/geds/hit/aoe.py +245 -0
- legenddataflowscripts/par/geds/hit/ecal.py +778 -0
- legenddataflowscripts/par/geds/hit/lq.py +213 -0
- legenddataflowscripts/par/geds/hit/qc.py +326 -0
- legenddataflowscripts/tier/__init__.py +0 -0
- legenddataflowscripts/tier/dsp.py +263 -0
- legenddataflowscripts/tier/hit.py +148 -0
- legenddataflowscripts/utils/__init__.py +15 -0
- legenddataflowscripts/utils/alias_table.py +28 -0
- legenddataflowscripts/utils/cfgtools.py +14 -0
- legenddataflowscripts/utils/convert_np.py +31 -0
- legenddataflowscripts/utils/log.py +77 -0
- legenddataflowscripts/utils/pulser_removal.py +16 -0
- legenddataflowscripts/workflow/__init__.py +20 -0
- legenddataflowscripts/workflow/execenv.py +327 -0
- legenddataflowscripts/workflow/filedb.py +107 -0
- legenddataflowscripts/workflow/pre_compile_catalog.py +24 -0
- legenddataflowscripts/workflow/utils.py +113 -0
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import copy
|
|
5
|
+
import pickle as pkl
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
from dbetto import TextDB
|
|
10
|
+
from dbetto.catalog import Props
|
|
11
|
+
from lgdo import lh5
|
|
12
|
+
from pygama.pargen.data_cleaning import get_cut_indexes
|
|
13
|
+
from pygama.pargen.dsp_optimize import run_one_dsp
|
|
14
|
+
from pygama.pargen.pz_correct import PZCorrect
|
|
15
|
+
|
|
16
|
+
from ....utils import (
|
|
17
|
+
build_log,
|
|
18
|
+
convert_dict_np_to_float,
|
|
19
|
+
get_pulser_mask,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def par_geds_dsp_pz() -> None:
|
|
24
|
+
argparser = argparse.ArgumentParser()
|
|
25
|
+
argparser.add_argument("--configs", help="configs path", type=str, required=True)
|
|
26
|
+
argparser.add_argument("--log", help="log file", type=str)
|
|
27
|
+
argparser.add_argument(
|
|
28
|
+
"-p", "--no-pulse", help="no pulser present", action="store_true"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
argparser.add_argument("--datatype", help="Datatype", type=str, required=True)
|
|
32
|
+
argparser.add_argument("--timestamp", help="Timestamp", type=str, required=True)
|
|
33
|
+
argparser.add_argument("--channel", help="Channel", type=str, required=True)
|
|
34
|
+
argparser.add_argument(
|
|
35
|
+
"--raw-table-name", help="raw table name", type=str, required=True
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
argparser.add_argument("--plot-path", help="plot path", type=str, required=False)
|
|
39
|
+
argparser.add_argument("--output-file", help="output file", type=str, required=True)
|
|
40
|
+
|
|
41
|
+
argparser.add_argument(
|
|
42
|
+
"--pulser-file", help="pulser file", type=str, required=False
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
argparser.add_argument("--raw-files", help="input files", nargs="*", type=str)
|
|
46
|
+
argparser.add_argument("--pz-files", help="input files", nargs="*", type=str)
|
|
47
|
+
args = argparser.parse_args()
|
|
48
|
+
|
|
49
|
+
configs = TextDB(args.configs, lazy=True).on(args.timestamp, system=args.datatype)
|
|
50
|
+
config_dict = configs["snakemake_rules"]["pars_dsp_tau"]
|
|
51
|
+
|
|
52
|
+
log = build_log(config_dict, args.log)
|
|
53
|
+
|
|
54
|
+
channel_dict = config_dict["inputs"]["processing_chain"][args.channel]
|
|
55
|
+
kwarg_dict = config_dict["inputs"]["tau_config"][args.channel]
|
|
56
|
+
|
|
57
|
+
kwarg_dict = Props.read_from(kwarg_dict)
|
|
58
|
+
|
|
59
|
+
if kwarg_dict["run_tau"] is True:
|
|
60
|
+
dsp_config = Props.read_from(channel_dict)
|
|
61
|
+
kwarg_dict.pop("run_tau")
|
|
62
|
+
if args.pz_files is not None and len(args.pz_files) > 0:
|
|
63
|
+
if (
|
|
64
|
+
isinstance(args.pz_files, list)
|
|
65
|
+
and args.pz_files[0].split(".")[-1] == "filelist"
|
|
66
|
+
):
|
|
67
|
+
input_file = args.pz_files[0]
|
|
68
|
+
with Path(input_file).open() as f:
|
|
69
|
+
input_file = f.read().splitlines()
|
|
70
|
+
else:
|
|
71
|
+
input_file = args.pz_files
|
|
72
|
+
if len(input_file) == 0:
|
|
73
|
+
if (
|
|
74
|
+
isinstance(args.raw_files, list)
|
|
75
|
+
and args.raw_files[0].split(".")[-1] == "filelist"
|
|
76
|
+
):
|
|
77
|
+
input_file = args.raw_files[0]
|
|
78
|
+
with Path(input_file).open() as f:
|
|
79
|
+
input_file = f.read().splitlines()
|
|
80
|
+
else:
|
|
81
|
+
input_file = args.raw_files
|
|
82
|
+
|
|
83
|
+
msg = f"Reading Data for {args.raw_table_name} from:"
|
|
84
|
+
log.debug(msg)
|
|
85
|
+
log.debug(input_file)
|
|
86
|
+
|
|
87
|
+
data = lh5.read(
|
|
88
|
+
args.raw_table_name,
|
|
89
|
+
input_file,
|
|
90
|
+
field_mask=["daqenergy", "timestamp", "t_sat_lo"],
|
|
91
|
+
).view_as("pd")
|
|
92
|
+
threshold = kwarg_dict.pop("threshold")
|
|
93
|
+
|
|
94
|
+
if args.no_pulse is False and (
|
|
95
|
+
args.pz_files is None or len(args.pz_files) == 0
|
|
96
|
+
):
|
|
97
|
+
mask = get_pulser_mask(args.pulser_file)
|
|
98
|
+
else:
|
|
99
|
+
mask = np.full(len(data), False)
|
|
100
|
+
|
|
101
|
+
discharges = data["t_sat_lo"] > 0
|
|
102
|
+
discharge_timestamps = np.where(data["timestamp"][discharges])[0]
|
|
103
|
+
is_recovering = np.full(len(data), False, dtype=bool)
|
|
104
|
+
for tstamp in discharge_timestamps:
|
|
105
|
+
is_recovering = is_recovering | np.where(
|
|
106
|
+
(
|
|
107
|
+
((data["timestamp"] - tstamp) < 0.01)
|
|
108
|
+
& ((data["timestamp"] - tstamp) > 0)
|
|
109
|
+
),
|
|
110
|
+
True,
|
|
111
|
+
False,
|
|
112
|
+
)
|
|
113
|
+
cuts = np.where(
|
|
114
|
+
(data.daqenergy.to_numpy() > threshold) & (~mask) & (~is_recovering)
|
|
115
|
+
)[0]
|
|
116
|
+
msg = f"{len(cuts)} events passed threshold and pulser cuts"
|
|
117
|
+
log.debug(msg)
|
|
118
|
+
log.debug(cuts)
|
|
119
|
+
tb_data = lh5.read(
|
|
120
|
+
args.raw_table_name,
|
|
121
|
+
input_file,
|
|
122
|
+
idx=cuts,
|
|
123
|
+
n_rows=kwarg_dict["n_events"] * 2,
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
dsp_config_optimise_removed = copy.deepcopy(dsp_config)
|
|
127
|
+
if "tau1" in dsp_config["outputs"]:
|
|
128
|
+
dsp_config_optimise_removed["outputs"].remove("tau1")
|
|
129
|
+
if "tau2" in dsp_config["outputs"]:
|
|
130
|
+
dsp_config_optimise_removed["outputs"].remove("tau2")
|
|
131
|
+
if "frac" in dsp_config["outputs"]:
|
|
132
|
+
dsp_config_optimise_removed["outputs"].remove("frac")
|
|
133
|
+
|
|
134
|
+
tb_out = run_one_dsp(tb_data, dsp_config_optimise_removed)
|
|
135
|
+
log.debug("Processed Data")
|
|
136
|
+
cut_parameters = kwarg_dict.get("cut_parameters", None)
|
|
137
|
+
if cut_parameters is not None:
|
|
138
|
+
idxs = get_cut_indexes(tb_out, cut_parameters=cut_parameters)
|
|
139
|
+
log.debug("Applied cuts")
|
|
140
|
+
msg = f"{len(idxs)} events passed cuts"
|
|
141
|
+
log.debug(msg)
|
|
142
|
+
tb_data = lh5.read(
|
|
143
|
+
args.raw_table_name,
|
|
144
|
+
input_file,
|
|
145
|
+
idx=cuts[: 2 * kwarg_dict["n_events"]][idxs],
|
|
146
|
+
n_rows=kwarg_dict.pop("n_events"),
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
tau = PZCorrect(
|
|
150
|
+
dsp_config,
|
|
151
|
+
kwarg_dict["wf_field"],
|
|
152
|
+
debug_mode=kwarg_dict.get("debug_mode", False),
|
|
153
|
+
)
|
|
154
|
+
log.debug("Calculating pz constant")
|
|
155
|
+
if kwarg_dict["mode"] == "single":
|
|
156
|
+
tau.get_single_decay_constant(
|
|
157
|
+
tb_data, kwarg_dict.get("slope_param", "tail_slope")
|
|
158
|
+
)
|
|
159
|
+
msg = f"Found tau: {tau.output_dict['pz']['tau']}+- {tau.output_dict['pz']['tau_err']}"
|
|
160
|
+
log.debug(msg)
|
|
161
|
+
elif kwarg_dict["mode"] == "double":
|
|
162
|
+
tau.get_dpz_decay_constants(
|
|
163
|
+
tb_data,
|
|
164
|
+
kwarg_dict.get("percent_tau1_fit", 0.1),
|
|
165
|
+
kwarg_dict.get("percent_tau2_fit", 0.2),
|
|
166
|
+
kwarg_dict.get("offset_from_wf_max", 10),
|
|
167
|
+
kwarg_dict.get("superpulse_bl_idx", 25),
|
|
168
|
+
kwarg_dict.get("superpulse_window_width", 13),
|
|
169
|
+
)
|
|
170
|
+
log.debug("found dpz constants : ")
|
|
171
|
+
for entry in ["tau1", "tau2", "frac"]:
|
|
172
|
+
msg = f"{entry}:{tau.output_dict['pz'][entry]}+- {tau.output_dict['pz'][f'{entry}_err']}"
|
|
173
|
+
log.debug(msg)
|
|
174
|
+
else:
|
|
175
|
+
msg = f"Unknown mode: {kwarg_dict['mode']}, must be either single or double"
|
|
176
|
+
raise ValueError(msg)
|
|
177
|
+
tau.dsp_config = dsp_config_optimise_removed
|
|
178
|
+
|
|
179
|
+
if args.plot_path:
|
|
180
|
+
Path(args.plot_path).parent.mkdir(parents=True, exist_ok=True)
|
|
181
|
+
|
|
182
|
+
plot_dict = tau.plot_waveforms_after_correction(
|
|
183
|
+
tb_data,
|
|
184
|
+
kwarg_dict("wf_pz_field", "wf_pz"),
|
|
185
|
+
norm_param=kwarg_dict.get("norm_param", "pz_mean"),
|
|
186
|
+
xlim=[0, len(tb_data[kwarg_dict["wf_field"]]["values"].nda[0])],
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
zoomed = tau.plot_waveforms_after_correction(
|
|
190
|
+
tb_data,
|
|
191
|
+
kwarg_dict("wf_pz_field", "wf_pz"),
|
|
192
|
+
norm_param=kwarg_dict.get("norm_param", "pz_mean"),
|
|
193
|
+
xlim=[400, len(tb_data[kwarg_dict["wf_field"]]["values"].nda[0])],
|
|
194
|
+
ylim=[0.8, 1.1],
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
plot_dict.update({"waveforms_zoomed": zoomed["waveforms"]})
|
|
198
|
+
|
|
199
|
+
plot_dict.update(
|
|
200
|
+
tau.plot_slopes(
|
|
201
|
+
tb_data, kwarg_dict.get("final_slope_param", "pz_slope")
|
|
202
|
+
)
|
|
203
|
+
)
|
|
204
|
+
plot_dict.update(
|
|
205
|
+
tau.plot_slopes(
|
|
206
|
+
tb_data, kwarg_dict.get("final_slope_param", "pz_slope"), True
|
|
207
|
+
)
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
with Path(args.plot_path).open("wb") as f:
|
|
211
|
+
pkl.dump({"pz": plot_dict}, f, protocol=pkl.HIGHEST_PROTOCOL)
|
|
212
|
+
out_dict = convert_dict_np_to_float(tau.output_dict)
|
|
213
|
+
else:
|
|
214
|
+
out_dict = {}
|
|
215
|
+
|
|
216
|
+
Path(args.output_file).parent.mkdir(parents=True, exist_ok=True)
|
|
217
|
+
Props.write_to(args.output_file, out_dict)
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
from dbetto.catalog import Props
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def par_geds_dsp_svm() -> None:
|
|
10
|
+
argparser = argparse.ArgumentParser()
|
|
11
|
+
argparser.add_argument("--log", help="log file", type=str)
|
|
12
|
+
argparser.add_argument(
|
|
13
|
+
"--output-file", help="output par file", type=str, required=True
|
|
14
|
+
)
|
|
15
|
+
argparser.add_argument(
|
|
16
|
+
"--input-file", help="input par file", type=str, required=True
|
|
17
|
+
)
|
|
18
|
+
argparser.add_argument("--svm-file", help="svm file", required=True)
|
|
19
|
+
args = argparser.parse_args()
|
|
20
|
+
|
|
21
|
+
par_data = Props.read_from(args.input_file)
|
|
22
|
+
|
|
23
|
+
file = f"'$_/{Path(args.svm_file).name}'"
|
|
24
|
+
|
|
25
|
+
par_data["svm"] = {"model_file": file}
|
|
26
|
+
|
|
27
|
+
Path(args.output_file).parent.mkdir(parents=True, exist_ok=True)
|
|
28
|
+
Props.write_to(args.output_file, par_data)
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import pickle as pkl
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from dbetto import TextDB
|
|
8
|
+
from dbetto.catalog import Props
|
|
9
|
+
from lgdo import lh5
|
|
10
|
+
from sklearn.svm import SVC
|
|
11
|
+
|
|
12
|
+
from ....utils import build_log
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def par_geds_dsp_svm_build() -> None:
|
|
16
|
+
argparser = argparse.ArgumentParser()
|
|
17
|
+
argparser.add_argument("--log", help="log file", type=str)
|
|
18
|
+
argparser.add_argument("--configs", help="config file", type=str)
|
|
19
|
+
|
|
20
|
+
argparser.add_argument("--datatype", help="Datatype", type=str, required=True)
|
|
21
|
+
argparser.add_argument("--timestamp", help="Timestamp", type=str, required=True)
|
|
22
|
+
|
|
23
|
+
argparser.add_argument(
|
|
24
|
+
"--output-file", help="output SVM file", type=str, required=True
|
|
25
|
+
)
|
|
26
|
+
argparser.add_argument(
|
|
27
|
+
"--train-data", help="input data file", nargs="*", default=None
|
|
28
|
+
)
|
|
29
|
+
argparser.add_argument(
|
|
30
|
+
"--train-hyperpars", help="input hyperparameter file", nargs="*", default=None
|
|
31
|
+
)
|
|
32
|
+
args = argparser.parse_args()
|
|
33
|
+
|
|
34
|
+
configs = TextDB(args.configs, lazy=True).on(args.timestamp, system=args.datatype)
|
|
35
|
+
config_dict = configs["snakemake_rules"]["pars_dsp_build_svm"]
|
|
36
|
+
|
|
37
|
+
log = build_log(config_dict, args.log)
|
|
38
|
+
|
|
39
|
+
if args.train_data is not None and len(args.train_data) > 0:
|
|
40
|
+
# Load files
|
|
41
|
+
tb = lh5.read("ml_train/dsp", args.train_data)
|
|
42
|
+
log.debug("loaded data")
|
|
43
|
+
|
|
44
|
+
hyperpars = Props.read_from(args.train_hyperpars)
|
|
45
|
+
|
|
46
|
+
# Define training inputs
|
|
47
|
+
dwts_norm = tb["dwt_norm"].nda
|
|
48
|
+
labels = tb["dc_label"].nda
|
|
49
|
+
|
|
50
|
+
log.debug("training model")
|
|
51
|
+
# Initialize and train SVM
|
|
52
|
+
svm = SVC(
|
|
53
|
+
random_state=int(hyperpars["random_state"]),
|
|
54
|
+
kernel=hyperpars["kernel"],
|
|
55
|
+
decision_function_shape=hyperpars["decision_function_shape"],
|
|
56
|
+
class_weight=hyperpars["class_weight"],
|
|
57
|
+
C=float(hyperpars["C"]),
|
|
58
|
+
gamma=float(hyperpars["gamma"]),
|
|
59
|
+
cache_size=1000,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
svm.fit(dwts_norm, labels)
|
|
63
|
+
log.debug("trained model")
|
|
64
|
+
else:
|
|
65
|
+
svm = None
|
|
66
|
+
|
|
67
|
+
# Save trained model with pickle
|
|
68
|
+
with Path(args.output_file).open("wb") as svm_file:
|
|
69
|
+
pkl.dump(svm, svm_file, protocol=pkl.HIGHEST_PROTOCOL)
|
|
File without changes
|
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import pickle as pkl
|
|
5
|
+
import warnings
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
from dbetto import TextDB
|
|
10
|
+
from dbetto.catalog import Props
|
|
11
|
+
from pygama.pargen.AoE_cal import * # noqa: F403
|
|
12
|
+
from pygama.pargen.AoE_cal import CalAoE, Pol1, SigmaFit, aoe_peak
|
|
13
|
+
from pygama.pargen.utils import load_data
|
|
14
|
+
|
|
15
|
+
from ....utils import (
|
|
16
|
+
build_log,
|
|
17
|
+
convert_dict_np_to_float,
|
|
18
|
+
get_pulser_mask,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
warnings.filterwarnings(action="ignore", category=RuntimeWarning)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def get_results_dict(aoe_class):
|
|
25
|
+
return {
|
|
26
|
+
"cal_energy_param": aoe_class.cal_energy_param,
|
|
27
|
+
"dt_param": aoe_class.dt_param,
|
|
28
|
+
"rt_correction": aoe_class.dt_corr,
|
|
29
|
+
"1000-1300keV": aoe_class.timecorr_df.to_dict("index"),
|
|
30
|
+
"correction_fit_results": aoe_class.energy_corr_res_dict,
|
|
31
|
+
"low_cut": aoe_class.low_cut_val,
|
|
32
|
+
"high_cut": aoe_class.high_cut_val,
|
|
33
|
+
"low_side_sfs": aoe_class.low_side_sfs.to_dict("index"),
|
|
34
|
+
"2_side_sfs": aoe_class.two_side_sfs.to_dict("index"),
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def fill_plot_dict(aoe_class, data, plot_options, plot_dict=None):
|
|
39
|
+
if plot_dict is not None:
|
|
40
|
+
for key, item in plot_options.items():
|
|
41
|
+
if item["options"] is not None:
|
|
42
|
+
plot_dict[key] = item["function"](aoe_class, data, **item["options"])
|
|
43
|
+
else:
|
|
44
|
+
plot_dict[key] = item["function"](aoe_class, data)
|
|
45
|
+
else:
|
|
46
|
+
plot_dict = {}
|
|
47
|
+
return plot_dict
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def par_geds_hit_aoe() -> None:
|
|
51
|
+
argparser = argparse.ArgumentParser()
|
|
52
|
+
argparser.add_argument("files", help="files", nargs="*", type=str)
|
|
53
|
+
argparser.add_argument(
|
|
54
|
+
"--pulser-file", help="pulser_file", type=str, required=False
|
|
55
|
+
)
|
|
56
|
+
argparser.add_argument(
|
|
57
|
+
"--tcm-filelist", help="tcm_filelist", type=str, required=False
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
argparser.add_argument("--ecal-file", help="ecal_file", type=str, required=True)
|
|
61
|
+
argparser.add_argument("--eres-file", help="eres_file", type=str, required=True)
|
|
62
|
+
argparser.add_argument("--inplots", help="in_plot_path", type=str, required=False)
|
|
63
|
+
|
|
64
|
+
argparser.add_argument("--configs", help="configs", type=str, required=True)
|
|
65
|
+
argparser.add_argument("--log", help="log_file", type=str)
|
|
66
|
+
|
|
67
|
+
argparser.add_argument("--datatype", help="Datatype", type=str, required=True)
|
|
68
|
+
argparser.add_argument("--timestamp", help="Timestamp", type=str, required=True)
|
|
69
|
+
argparser.add_argument("--channel", help="Channel", type=str, required=True)
|
|
70
|
+
argparser.add_argument("--table-name", help="table name", type=str, required=True)
|
|
71
|
+
|
|
72
|
+
argparser.add_argument("--plot-file", help="plot_file", type=str, required=False)
|
|
73
|
+
argparser.add_argument("--hit-pars", help="hit_pars", type=str)
|
|
74
|
+
argparser.add_argument("--aoe-results", help="aoe_results", type=str)
|
|
75
|
+
|
|
76
|
+
argparser.add_argument("-d", "--debug", help="debug_mode", action="store_true")
|
|
77
|
+
args = argparser.parse_args()
|
|
78
|
+
|
|
79
|
+
configs = TextDB(args.configs, lazy=True).on(args.timestamp, system=args.datatype)
|
|
80
|
+
config_dict = configs["snakemake_rules"]["pars_hit_aoecal"]
|
|
81
|
+
|
|
82
|
+
log = build_log(config_dict, args.log)
|
|
83
|
+
|
|
84
|
+
channel_dict = config_dict["inputs"]["aoecal_config"][args.channel]
|
|
85
|
+
kwarg_dict = Props.read_from(channel_dict)
|
|
86
|
+
|
|
87
|
+
ecal_dict = Props.read_from(args.ecal_file)
|
|
88
|
+
cal_dict = ecal_dict["pars"]
|
|
89
|
+
eres_dict = ecal_dict["results"]["ecal"]
|
|
90
|
+
|
|
91
|
+
with Path(args.eres_file).open("rb") as o:
|
|
92
|
+
object_dict = pkl.load(o)
|
|
93
|
+
|
|
94
|
+
if kwarg_dict["run_aoe"] is True:
|
|
95
|
+
kwarg_dict.pop("run_aoe")
|
|
96
|
+
|
|
97
|
+
pdf = eval(kwarg_dict.pop("pdf")) if "pdf" in kwarg_dict else aoe_peak
|
|
98
|
+
|
|
99
|
+
sigma_func = (
|
|
100
|
+
eval(kwarg_dict.pop("sigma_func"))
|
|
101
|
+
if "sigma_func" in kwarg_dict
|
|
102
|
+
else SigmaFit
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
mean_func = (
|
|
106
|
+
eval(kwarg_dict.pop("mean_func")) if "mean_func" in kwarg_dict else Pol1
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
if "plot_options" in kwarg_dict:
|
|
110
|
+
for field, item in kwarg_dict["plot_options"].items():
|
|
111
|
+
kwarg_dict["plot_options"][field]["function"] = eval(item["function"])
|
|
112
|
+
|
|
113
|
+
with Path(args.files[0]).open() as f:
|
|
114
|
+
files = f.read().splitlines()
|
|
115
|
+
files = sorted(files)
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
eres = eres_dict[kwarg_dict["cal_energy_param"]]["eres_linear"].copy()
|
|
119
|
+
|
|
120
|
+
def eres_func(x):
|
|
121
|
+
return eval(eres["expression"], dict(x=x, **eres["parameters"]))
|
|
122
|
+
|
|
123
|
+
except KeyError:
|
|
124
|
+
|
|
125
|
+
def eres_func(x):
|
|
126
|
+
return x * np.nan
|
|
127
|
+
|
|
128
|
+
params = [
|
|
129
|
+
kwarg_dict["current_param"],
|
|
130
|
+
"tp_0_est",
|
|
131
|
+
"tp_99",
|
|
132
|
+
kwarg_dict["energy_param"],
|
|
133
|
+
kwarg_dict["cal_energy_param"],
|
|
134
|
+
kwarg_dict["cut_field"],
|
|
135
|
+
"timestamp",
|
|
136
|
+
]
|
|
137
|
+
|
|
138
|
+
if "dt_param" in kwarg_dict:
|
|
139
|
+
params += kwarg_dict["dt_param"]
|
|
140
|
+
else:
|
|
141
|
+
params.append("dt_eff")
|
|
142
|
+
|
|
143
|
+
if "dt_cut" in kwarg_dict and kwarg_dict["dt_cut"] is not None:
|
|
144
|
+
cal_dict.update(kwarg_dict["dt_cut"]["cut"])
|
|
145
|
+
params.append(kwarg_dict["dt_cut"]["out_param"])
|
|
146
|
+
|
|
147
|
+
# load data in
|
|
148
|
+
data, threshold_mask = load_data(
|
|
149
|
+
files,
|
|
150
|
+
args.table_name,
|
|
151
|
+
cal_dict,
|
|
152
|
+
params=params,
|
|
153
|
+
threshold=kwarg_dict.pop("threshold"),
|
|
154
|
+
return_selection_mask=True,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
mask = get_pulser_mask(
|
|
158
|
+
pulser_file=args.pulser_file,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
data["is_pulser"] = mask[threshold_mask]
|
|
162
|
+
|
|
163
|
+
data["AoE_Uncorr"] = (
|
|
164
|
+
data[kwarg_dict["current_param"]] / data[kwarg_dict["energy_param"]]
|
|
165
|
+
)
|
|
166
|
+
aoe = CalAoE(
|
|
167
|
+
cal_dicts=cal_dict,
|
|
168
|
+
cal_energy_param=kwarg_dict["cal_energy_param"],
|
|
169
|
+
eres_func=eres_func,
|
|
170
|
+
pdf=pdf,
|
|
171
|
+
mean_func=mean_func,
|
|
172
|
+
sigma_func=sigma_func,
|
|
173
|
+
selection_string=f"{kwarg_dict.pop('cut_field')}&(~is_pulser)",
|
|
174
|
+
dt_corr=kwarg_dict.get("dt_corr", False),
|
|
175
|
+
dep_correct=kwarg_dict.get("dep_correct", False),
|
|
176
|
+
dt_cut=kwarg_dict.get("dt_cut", None),
|
|
177
|
+
dt_param=kwarg_dict.get("dt_param", 3),
|
|
178
|
+
high_cut_val=kwarg_dict.get("high_cut_val", 3),
|
|
179
|
+
compt_bands_width=kwarg_dict.get("debug_mode", 20),
|
|
180
|
+
debug_mode=args.debug | kwarg_dict.get("debug_mode", False),
|
|
181
|
+
)
|
|
182
|
+
aoe.update_cal_dicts(
|
|
183
|
+
{
|
|
184
|
+
"AoE_Uncorr": {
|
|
185
|
+
"expression": f"{kwarg_dict['current_param']}/{kwarg_dict['energy_param']}",
|
|
186
|
+
"parameters": {},
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
)
|
|
190
|
+
aoe.calibrate(data, "AoE_Uncorr")
|
|
191
|
+
|
|
192
|
+
log.info("Calibrated A/E")
|
|
193
|
+
out_dict = get_results_dict(aoe)
|
|
194
|
+
plot_dict = fill_plot_dict(aoe, data, kwarg_dict.get("plot_options", None))
|
|
195
|
+
|
|
196
|
+
aoe.pdf = aoe.pdf.name
|
|
197
|
+
|
|
198
|
+
# need to change eres func as can't pickle lambdas
|
|
199
|
+
try:
|
|
200
|
+
aoe.eres_func = eres_dict[kwarg_dict["cal_energy_param"]][
|
|
201
|
+
"eres_linear"
|
|
202
|
+
].copy()
|
|
203
|
+
except KeyError:
|
|
204
|
+
aoe.eres_func = {}
|
|
205
|
+
else:
|
|
206
|
+
out_dict = {}
|
|
207
|
+
plot_dict = {}
|
|
208
|
+
aoe = None
|
|
209
|
+
|
|
210
|
+
if args.plot_file:
|
|
211
|
+
common_dict = plot_dict.pop("common") if "common" in list(plot_dict) else None
|
|
212
|
+
if args.inplots:
|
|
213
|
+
with Path(args.inplots).open("rb") as r:
|
|
214
|
+
out_plot_dict = pkl.load(r)
|
|
215
|
+
out_plot_dict.update({"aoe": plot_dict})
|
|
216
|
+
else:
|
|
217
|
+
out_plot_dict = {"aoe": plot_dict}
|
|
218
|
+
|
|
219
|
+
if "common" in list(out_plot_dict) and common_dict is not None:
|
|
220
|
+
out_plot_dict["common"].update(common_dict)
|
|
221
|
+
elif common_dict is not None:
|
|
222
|
+
out_plot_dict["common"] = common_dict
|
|
223
|
+
|
|
224
|
+
Path(args.plot_file).parent.mkdir(parents=True, exist_ok=True)
|
|
225
|
+
with Path(args.plot_file).open("wb") as w:
|
|
226
|
+
pkl.dump(out_plot_dict, w, protocol=pkl.HIGHEST_PROTOCOL)
|
|
227
|
+
|
|
228
|
+
Path(args.hit_pars).parent.mkdir(parents=True, exist_ok=True)
|
|
229
|
+
results_dict = dict(**ecal_dict["results"], aoe=out_dict)
|
|
230
|
+
final_hit_dict = {
|
|
231
|
+
"pars": {"operations": cal_dict},
|
|
232
|
+
"results": results_dict,
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
final_hit_dict = convert_dict_np_to_float(final_hit_dict)
|
|
236
|
+
|
|
237
|
+
Props.write_to(args.hit_pars, final_hit_dict)
|
|
238
|
+
|
|
239
|
+
Path(args.aoe_results).parent.mkdir(parents=True, exist_ok=True)
|
|
240
|
+
final_object_dict = dict(
|
|
241
|
+
**object_dict,
|
|
242
|
+
aoe=aoe,
|
|
243
|
+
)
|
|
244
|
+
with Path(args.aoe_results).open("wb") as w:
|
|
245
|
+
pkl.dump(final_object_dict, w, protocol=pkl.HIGHEST_PROTOCOL)
|