legend-dataflow-scripts 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. legend_dataflow_scripts-0.1.0.dist-info/METADATA +57 -0
  2. legend_dataflow_scripts-0.1.0.dist-info/RECORD +36 -0
  3. legend_dataflow_scripts-0.1.0.dist-info/WHEEL +5 -0
  4. legend_dataflow_scripts-0.1.0.dist-info/entry_points.txt +18 -0
  5. legend_dataflow_scripts-0.1.0.dist-info/top_level.txt +1 -0
  6. legenddataflowscripts/__init__.py +17 -0
  7. legenddataflowscripts/_version.py +21 -0
  8. legenddataflowscripts/par/__init__.py +0 -0
  9. legenddataflowscripts/par/geds/__init__.py +0 -0
  10. legenddataflowscripts/par/geds/dsp/__init__.py +0 -0
  11. legenddataflowscripts/par/geds/dsp/dplms.py +145 -0
  12. legenddataflowscripts/par/geds/dsp/eopt.py +398 -0
  13. legenddataflowscripts/par/geds/dsp/evtsel.py +400 -0
  14. legenddataflowscripts/par/geds/dsp/nopt.py +120 -0
  15. legenddataflowscripts/par/geds/dsp/pz.py +217 -0
  16. legenddataflowscripts/par/geds/dsp/svm.py +28 -0
  17. legenddataflowscripts/par/geds/dsp/svm_build.py +69 -0
  18. legenddataflowscripts/par/geds/hit/__init__.py +0 -0
  19. legenddataflowscripts/par/geds/hit/aoe.py +245 -0
  20. legenddataflowscripts/par/geds/hit/ecal.py +778 -0
  21. legenddataflowscripts/par/geds/hit/lq.py +213 -0
  22. legenddataflowscripts/par/geds/hit/qc.py +326 -0
  23. legenddataflowscripts/tier/__init__.py +0 -0
  24. legenddataflowscripts/tier/dsp.py +263 -0
  25. legenddataflowscripts/tier/hit.py +148 -0
  26. legenddataflowscripts/utils/__init__.py +15 -0
  27. legenddataflowscripts/utils/alias_table.py +28 -0
  28. legenddataflowscripts/utils/cfgtools.py +14 -0
  29. legenddataflowscripts/utils/convert_np.py +31 -0
  30. legenddataflowscripts/utils/log.py +77 -0
  31. legenddataflowscripts/utils/pulser_removal.py +16 -0
  32. legenddataflowscripts/workflow/__init__.py +20 -0
  33. legenddataflowscripts/workflow/execenv.py +327 -0
  34. legenddataflowscripts/workflow/filedb.py +107 -0
  35. legenddataflowscripts/workflow/pre_compile_catalog.py +24 -0
  36. legenddataflowscripts/workflow/utils.py +113 -0
@@ -0,0 +1,400 @@
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import json
5
+ import time
6
+ import warnings
7
+ from bisect import bisect_left
8
+ from pathlib import Path
9
+
10
+ import lgdo
11
+ import numpy as np
12
+ import pygama.math.histogram as pgh
13
+ import pygama.pargen.energy_cal as pgc
14
+ from dbetto import TextDB
15
+ from dbetto.catalog import Props
16
+ from lgdo import lh5
17
+ from pygama.pargen.data_cleaning import generate_cuts, get_keys
18
+ from pygama.pargen.dsp_optimize import run_one_dsp
19
+
20
+ from ....utils import build_log, get_pulser_mask
21
+
22
+ warnings.filterwarnings(action="ignore", category=RuntimeWarning)
23
+
24
+
25
+ def get_out_data(
26
+ raw_data,
27
+ dsp_data,
28
+ cut_dict,
29
+ e_lower_lim,
30
+ e_upper_lim,
31
+ ecal_pars,
32
+ raw_dict,
33
+ peak,
34
+ final_cut_field="is_valid_cal",
35
+ energy_param="trapTmax",
36
+ ):
37
+ for outname, info in cut_dict.items():
38
+ outcol = dsp_data.eval(info["expression"], info.get("parameters", None))
39
+ dsp_data.add_column(outname, outcol)
40
+
41
+ for outname, info in raw_dict.items():
42
+ outcol = raw_data.eval(info["expression"], info.get("parameters", None))
43
+ raw_data.add_column(outname, outcol)
44
+
45
+ final_mask = (
46
+ (dsp_data[energy_param].nda > e_lower_lim)
47
+ & (dsp_data[energy_param].nda < e_upper_lim)
48
+ & (dsp_data[final_cut_field].nda)
49
+ )
50
+
51
+ wavefrom_windowed = lgdo.WaveformTable(
52
+ t0=raw_data["waveform_windowed"]["t0"].nda[final_mask],
53
+ t0_units=raw_data["waveform_windowed"]["t0"].attrs["units"],
54
+ dt=raw_data["waveform_windowed"]["dt"].nda[final_mask],
55
+ dt_units=raw_data["waveform_windowed"]["dt"].attrs["units"],
56
+ values=raw_data["waveform_windowed"]["values"].nda[final_mask],
57
+ )
58
+ wavefrom_presummed = lgdo.WaveformTable(
59
+ t0=raw_data["waveform_presummed"]["t0"].nda[final_mask],
60
+ t0_units=raw_data["waveform_presummed"]["t0"].attrs["units"],
61
+ dt=raw_data["waveform_presummed"]["dt"].nda[final_mask],
62
+ dt_units=raw_data["waveform_presummed"]["dt"].attrs["units"],
63
+ values=raw_data["waveform_presummed"]["values"].nda[final_mask],
64
+ )
65
+
66
+ out_tbl = lgdo.Table(
67
+ col_dict={
68
+ "waveform_presummed": wavefrom_presummed,
69
+ "waveform_windowed": wavefrom_windowed,
70
+ "presum_rate": lgdo.Array(raw_data["presum_rate"].nda[final_mask]),
71
+ "timestamp": lgdo.Array(raw_data["timestamp"].nda[final_mask]),
72
+ "baseline": lgdo.Array(raw_data["baseline"].nda[final_mask]),
73
+ "daqenergy": lgdo.Array(raw_data["daqenergy"].nda[final_mask]),
74
+ "daqenergy_cal": lgdo.Array(raw_data["daqenergy_cal"].nda[final_mask]),
75
+ "trapTmax_cal": lgdo.Array(
76
+ dsp_data["trapTmax"].nda[final_mask] * ecal_pars
77
+ ),
78
+ "peak": lgdo.Array(np.full(len(np.where(final_mask)[0]), int(peak))),
79
+ }
80
+ )
81
+ return out_tbl, len(np.where(final_mask)[0])
82
+
83
+
84
+ def par_geds_dsp_evtsel() -> None:
85
+ argparser = argparse.ArgumentParser()
86
+ argparser.add_argument("--raw-filelist", help="raw_filelist", type=str)
87
+ argparser.add_argument(
88
+ "--pulser-file", help="pulser-file", type=str, required=False
89
+ )
90
+ argparser.add_argument(
91
+ "-p", "--no-pulse", help="no pulser present", action="store_true"
92
+ )
93
+
94
+ argparser.add_argument("--decay-const", help="decay_const", type=str, required=True)
95
+ argparser.add_argument(
96
+ "--raw-cal-curve",
97
+ help="raw calibration curve file(s)",
98
+ type=str,
99
+ nargs="*",
100
+ required=True,
101
+ )
102
+
103
+ argparser.add_argument("--log", help="log_file", type=str)
104
+ argparser.add_argument("--configs", help="configs", type=str, required=True)
105
+
106
+ argparser.add_argument("--datatype", help="Datatype", type=str, required=True)
107
+ argparser.add_argument("--timestamp", help="Timestamp", type=str, required=True)
108
+ argparser.add_argument("--channel", help="Channel", type=str, required=True)
109
+ argparser.add_argument(
110
+ "--raw-table-name", help="raw table name", type=str, required=True
111
+ )
112
+
113
+ argparser.add_argument("--peak-file", help="peak_file", type=str, required=True)
114
+ args = argparser.parse_args()
115
+
116
+ configs = TextDB(args.configs, lazy=True).on(args.timestamp, system=args.datatype)
117
+ config_dict = configs["snakemake_rules"]["pars_dsp_peak_selection"]
118
+
119
+ log = build_log(config_dict, args.log)
120
+
121
+ sto = lh5.LH5Store()
122
+ t0 = time.time()
123
+
124
+ dsp_config = config_dict["inputs"]["processing_chain"][args.channel]
125
+ peak_json = config_dict["inputs"]["peak_config"][args.channel]
126
+
127
+ peak_dict = Props.read_from(peak_json)
128
+ db_dict = Props.read_from(args.decay_const)
129
+
130
+ Path(args.peak_file).parent.mkdir(parents=True, exist_ok=True)
131
+ rng = np.random.default_rng()
132
+ rand_num = f"{rng.integers(0, 99999):05d}"
133
+ temp_output = f"{args.peak_file}.{rand_num}"
134
+ if peak_dict.pop("run_selection") is True:
135
+ log.debug("Starting peak selection")
136
+
137
+ with Path(args.raw_filelist).open() as f:
138
+ files = f.read().splitlines()
139
+ raw_files = sorted(files)
140
+
141
+ raw_dict = Props.read_from(args.raw_cal_curve)[args.channel]["pars"][
142
+ "operations"
143
+ ]
144
+
145
+ peaks_kev = peak_dict["peaks"]
146
+ kev_widths = peak_dict["kev_widths"]
147
+ cut_parameters = peak_dict["cut_parameters"]
148
+ n_events = peak_dict["n_events"]
149
+ final_cut_field = peak_dict["final_cut_field"]
150
+ energy_parameter = peak_dict.get("energy_parameter", "trapTmax")
151
+
152
+ lh5_path = args.raw_table_name
153
+
154
+ if not isinstance(kev_widths, list):
155
+ kev_widths = [kev_widths]
156
+
157
+ if lh5_path[-1] != "/":
158
+ lh5_path += "/"
159
+
160
+ tb = lh5.read(
161
+ lh5_path, raw_files, field_mask=["daqenergy", "t_sat_lo", "timestamp"]
162
+ )
163
+
164
+ if args.no_pulse is False:
165
+ mask = get_pulser_mask(
166
+ args.pulser_file,
167
+ )
168
+ else:
169
+ mask = np.full(len(tb), False)
170
+
171
+ discharges = tb["t_sat_lo"].nda > 0
172
+ discharge_timestamps = np.where(tb["timestamp"].nda[discharges])[0]
173
+ is_recovering = np.full(len(tb), False, dtype=bool)
174
+ for tstamp in discharge_timestamps:
175
+ is_recovering = is_recovering | np.where(
176
+ (
177
+ ((tb["timestamp"].nda - tstamp) < 0.01)
178
+ & ((tb["timestamp"].nda - tstamp) > 0)
179
+ ),
180
+ True,
181
+ False,
182
+ )
183
+
184
+ for outname, info in raw_dict.items():
185
+ outcol = tb.eval(info["expression"], info.get("parameters", None))
186
+ tb.add_column(outname, outcol)
187
+
188
+ rough_energy = tb["daqenergy_cal"].nda
189
+
190
+ masks = {}
191
+ for peak, kev_width in zip(peaks_kev, kev_widths, strict=False):
192
+ e_mask = (
193
+ (rough_energy > peak - 1.1 * kev_width[0])
194
+ & (rough_energy < peak + 1.1 * kev_width[0])
195
+ & (~mask)
196
+ )
197
+ masks[peak] = np.where(e_mask & (~is_recovering))[0]
198
+ msg = f"{len(masks[peak])} events found in energy range for {peak}"
199
+ log.debug(msg)
200
+
201
+ input_data = lh5.read(
202
+ f"{lh5_path}", raw_files, n_rows=10000, idx=np.where(~mask)[0]
203
+ )
204
+
205
+ if isinstance(dsp_config, str):
206
+ dsp_config = Props.read_from(dsp_config)
207
+
208
+ dsp_config["outputs"] = [
209
+ *get_keys(dsp_config["outputs"], cut_parameters),
210
+ energy_parameter,
211
+ ]
212
+
213
+ log.debug("Processing data")
214
+ tb_data = run_one_dsp(input_data, dsp_config, db_dict=db_dict)
215
+
216
+ if cut_parameters is not None:
217
+ cut_dict = generate_cuts(tb_data, cut_parameters)
218
+ msg = f"Cuts are calculated: {json.dumps(cut_dict, indent=2)}"
219
+ log.debug(msg)
220
+ else:
221
+ cut_dict = None
222
+
223
+ pk_dicts = {}
224
+ for peak, kev_width in zip(peaks_kev, kev_widths, strict=False):
225
+ pk_dicts[peak] = {
226
+ "idxs": (masks[peak],),
227
+ "n_rows_read": 0,
228
+ "obj_buf_start": 0,
229
+ "obj_buf": None,
230
+ "kev_width": kev_width,
231
+ }
232
+
233
+ for file in raw_files:
234
+ log.debug(Path(file).name)
235
+ for peak, peak_dict in pk_dicts.items():
236
+ if peak_dict["idxs"] is not None:
237
+ # idx is a long continuous array
238
+ n_rows_i = sto.read_n_rows(lh5_path, file)
239
+ # find the length of the subset of idx that contains indices
240
+ # that are less than n_rows_i
241
+ n_rows_to_read_i = bisect_left(peak_dict["idxs"][0], n_rows_i)
242
+ # now split idx into idx_i and the remainder
243
+ idx_i = (peak_dict["idxs"][0][:n_rows_to_read_i],)
244
+ peak_dict["idxs"] = (
245
+ peak_dict["idxs"][0][n_rows_to_read_i:] - n_rows_i,
246
+ )
247
+ if len(idx_i[0]) > 0:
248
+ peak_dict["obj_buf"] = lh5.read(
249
+ lh5_path,
250
+ file,
251
+ start_row=0,
252
+ idx=idx_i,
253
+ obj_buf=peak_dict["obj_buf"],
254
+ obj_buf_start=peak_dict["obj_buf_start"],
255
+ )
256
+ n_rows_read_i = len(peak_dict["obj_buf"])
257
+
258
+ peak_dict["n_rows_read"] += n_rows_read_i
259
+ msg = f"{peak}: {peak_dict['n_rows_read']}"
260
+ log.debug(msg)
261
+ peak_dict["obj_buf_start"] += n_rows_read_i
262
+ if peak_dict["n_rows_read"] >= 10000 or file == raw_files[-1]:
263
+ if "e_lower_lim" not in peak_dict:
264
+ tb_out = run_one_dsp(
265
+ peak_dict["obj_buf"], dsp_config, db_dict=db_dict
266
+ )
267
+ energy = tb_out[energy_parameter].nda
268
+
269
+ init_bin_width = (
270
+ 2
271
+ * (
272
+ np.nanpercentile(energy, 75)
273
+ - np.nanpercentile(energy, 25)
274
+ )
275
+ * len(energy) ** (-1 / 3)
276
+ )
277
+
278
+ init_bin_width = min(init_bin_width, 2)
279
+
280
+ hist, bins, var = pgh.get_hist(
281
+ energy,
282
+ range=(
283
+ np.floor(np.nanpercentile(energy, 1)),
284
+ np.ceil(np.nanpercentile(energy, 99)),
285
+ ),
286
+ dx=init_bin_width,
287
+ )
288
+ peak_loc = pgh.get_bin_centers(bins)[np.nanargmax(hist)]
289
+
290
+ peak_top_pars = pgc.hpge_fit_energy_peak_tops(
291
+ hist,
292
+ bins,
293
+ var,
294
+ [peak_loc],
295
+ n_to_fit=7,
296
+ )[0][0]
297
+ try:
298
+ mu = peak_top_pars[0]
299
+ if mu > np.nanmax(bins) or mu < np.nanmin(bins):
300
+ raise ValueError
301
+ except Exception:
302
+ mu = np.nan
303
+ if mu is None or np.isnan(mu):
304
+ log.debug("Fit failed, using max guess")
305
+ rough_adc_to_kev = peak / peak_loc
306
+ e_lower_lim = (
307
+ peak_loc
308
+ - (1.5 * peak_dict["kev_width"][0])
309
+ / rough_adc_to_kev
310
+ )
311
+ e_upper_lim = (
312
+ peak_loc
313
+ + (1.5 * peak_dict["kev_width"][1])
314
+ / rough_adc_to_kev
315
+ )
316
+ hist, bins, var = pgh.get_hist(
317
+ energy,
318
+ range=(int(e_lower_lim), int(e_upper_lim)),
319
+ dx=init_bin_width,
320
+ )
321
+ mu = pgh.get_bin_centers(bins)[np.nanargmax(hist)]
322
+
323
+ updated_adc_to_kev = peak / mu
324
+ e_lower_lim = (
325
+ mu - (peak_dict["kev_width"][0]) / updated_adc_to_kev
326
+ )
327
+ e_upper_lim = (
328
+ mu + (peak_dict["kev_width"][1]) / updated_adc_to_kev
329
+ )
330
+ msg = f"{peak}: lower lim is :{e_lower_lim}, upper lim is {e_upper_lim}"
331
+ log.info(msg)
332
+ peak_dict["e_lower_lim"] = e_lower_lim
333
+ peak_dict["e_upper_lim"] = e_upper_lim
334
+ peak_dict["ecal_par"] = updated_adc_to_kev
335
+
336
+ out_tbl, n_wfs = get_out_data(
337
+ peak_dict["obj_buf"],
338
+ tb_out,
339
+ cut_dict,
340
+ e_lower_lim,
341
+ e_upper_lim,
342
+ peak_dict["ecal_par"],
343
+ raw_dict,
344
+ int(peak),
345
+ final_cut_field=final_cut_field,
346
+ energy_param=energy_parameter,
347
+ )
348
+ lh5.write(
349
+ out_tbl,
350
+ name=lh5_path,
351
+ lh5_file=temp_output,
352
+ wo_mode="a",
353
+ )
354
+ peak_dict["obj_buf"] = None
355
+ peak_dict["obj_buf_start"] = 0
356
+ peak_dict["n_events"] = n_wfs
357
+ msg = f"found {peak_dict['n_events']} events for {peak}"
358
+ log.debug(msg)
359
+ elif (
360
+ peak_dict["obj_buf"] is not None
361
+ and len(peak_dict["obj_buf"]) > 0
362
+ ):
363
+ tb_out = run_one_dsp(
364
+ peak_dict["obj_buf"], dsp_config, db_dict=db_dict
365
+ )
366
+ out_tbl, n_wfs = get_out_data(
367
+ peak_dict["obj_buf"],
368
+ tb_out,
369
+ cut_dict,
370
+ peak_dict["e_lower_lim"],
371
+ peak_dict["e_upper_lim"],
372
+ peak_dict["ecal_par"],
373
+ raw_dict,
374
+ int(peak),
375
+ final_cut_field=final_cut_field,
376
+ energy_param=energy_parameter,
377
+ )
378
+ peak_dict["n_events"] += n_wfs
379
+ lh5.write(
380
+ out_tbl,
381
+ name=lh5_path,
382
+ lh5_file=temp_output,
383
+ wo_mode="a",
384
+ )
385
+ peak_dict["obj_buf"] = None
386
+ peak_dict["obj_buf_start"] = 0
387
+ msg = f"found {peak_dict['n_events']} events for {peak}"
388
+ log.debug(msg)
389
+ if peak_dict["n_events"] >= n_events:
390
+ peak_dict["idxs"] = None
391
+ msg = (
392
+ f"{peak} has reached the required number of events"
393
+ )
394
+ log.debug(msg)
395
+
396
+ else:
397
+ Path(temp_output).touch()
398
+ msg = f"event selection completed in {time.time() - t0} seconds"
399
+ log.debug(msg)
400
+ Path(temp_output).rename(args.peak_file)
@@ -0,0 +1,120 @@
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import pickle as pkl
5
+ import time
6
+ from pathlib import Path
7
+
8
+ import numpy as np
9
+ import pygama.pargen.noise_optimization as pno
10
+ from dbetto import TextDB
11
+ from dbetto.catalog import Props
12
+ from lgdo import lh5
13
+ from pygama.pargen.data_cleaning import generate_cuts, get_cut_indexes
14
+ from pygama.pargen.dsp_optimize import run_one_dsp
15
+
16
+ from ....utils import build_log
17
+
18
+
19
+ def par_geds_dsp_nopt() -> None:
20
+ argparser = argparse.ArgumentParser()
21
+ argparser.add_argument("--raw-filelist", help="raw_filelist", type=str)
22
+ argparser.add_argument("--database", help="database", type=str, required=True)
23
+ argparser.add_argument("--inplots", help="inplots", type=str)
24
+
25
+ argparser.add_argument("--configs", help="configs", type=str, required=True)
26
+ argparser.add_argument("--log", help="log_file", type=str)
27
+
28
+ argparser.add_argument("--datatype", help="Datatype", type=str, required=True)
29
+ argparser.add_argument("--timestamp", help="Timestamp", type=str, required=True)
30
+ argparser.add_argument("--channel", help="Channel", type=str, required=True)
31
+ argparser.add_argument(
32
+ "--raw-table-name", help="raw table name", type=str, required=True
33
+ )
34
+
35
+ argparser.add_argument("--dsp-pars", help="dsp_pars", type=str, required=True)
36
+ argparser.add_argument("--plot-path", help="plot_path", type=str)
37
+
38
+ args = argparser.parse_args()
39
+
40
+ configs = TextDB(args.configs, lazy=True).on(args.timestamp, system=args.datatype)
41
+ config_dict = configs["snakemake_rules"]["pars_dsp_nopt"]
42
+
43
+ log = build_log(config_dict, args.log)
44
+
45
+ t0 = time.time()
46
+
47
+ dsp_config = config_dict["inputs"]["processing_chain"][args.channel]
48
+ opt_json = config_dict["inputs"]["optimiser_config"][args.channel]
49
+
50
+ opt_dict = Props.read_from(opt_json)
51
+ db_dict = Props.read_from(args.database)
52
+
53
+ if opt_dict.pop("run_nopt") is True:
54
+ with Path(args.raw_filelist).open() as f:
55
+ files = f.read().splitlines()
56
+
57
+ raw_files = sorted(files)
58
+
59
+ energies = lh5.read_as(
60
+ f"{args.raw_table_name}/daqenergy", raw_files, library="np"
61
+ )
62
+ idxs = np.where(energies == 0)[0]
63
+ tb_data = lh5.read(
64
+ args.raw_table_name, raw_files, n_rows=opt_dict["n_events"], idx=idxs
65
+ )
66
+ t1 = time.time()
67
+ msg = f"Time to open raw files {t1 - t0:.2f} s, n. baselines {len(tb_data)}"
68
+ log.info(msg)
69
+
70
+ msg = f"Select baselines {len(tb_data)}"
71
+ log.info(msg)
72
+ dsp_data = run_one_dsp(tb_data, dsp_config)
73
+ cut_dict = generate_cuts(dsp_data, cut_dict=opt_dict.pop("cut_pars"))
74
+ cut_idxs = get_cut_indexes(dsp_data, cut_dict)
75
+ tb_data = lh5.read(
76
+ args.raw_table_name,
77
+ raw_files,
78
+ n_rows=opt_dict.pop("n_events"),
79
+ idx=idxs[cut_idxs],
80
+ )
81
+ msg = f"... {len(tb_data)} baselines after cuts"
82
+ log.info(msg)
83
+
84
+ if isinstance(dsp_config, str | list):
85
+ dsp_config = Props.read_from(dsp_config)
86
+
87
+ if args.plot_path:
88
+ out_dict, plot_dict = pno.noise_optimization(
89
+ tb_data,
90
+ dsp_config,
91
+ db_dict.copy(),
92
+ opt_dict,
93
+ args.raw_table_name,
94
+ display=1,
95
+ )
96
+ else:
97
+ out_dict = pno.noise_optimization(
98
+ raw_files, dsp_config, db_dict.copy(), opt_dict, args.raw_table_name
99
+ )
100
+
101
+ t2 = time.time()
102
+ msg = f"Optimiser finished in {(t2 - t0) / 60} minutes"
103
+ log.info(msg)
104
+ else:
105
+ out_dict = {}
106
+ plot_dict = {}
107
+
108
+ if args.plot_path:
109
+ Path(args.plot_path).parent.mkdir(parents=True, exist_ok=True)
110
+ if args.inplots:
111
+ with Path(args.inplots).open("rb") as r:
112
+ old_plot_dict = pkl.load(r)
113
+ plot_dict = dict(noise_optimisation=plot_dict, **old_plot_dict)
114
+ else:
115
+ plot_dict = {"noise_optimisation": plot_dict}
116
+ with Path(args.plot_path).open("wb") as f:
117
+ pkl.dump(plot_dict, f, protocol=pkl.HIGHEST_PROTOCOL)
118
+
119
+ Path(args.dsp_pars).parent.mkdir(parents=True, exist_ok=True)
120
+ Props.write_to(args.dsp_pars, dict(nopt_pars=out_dict, **db_dict))