lstosa 0.10.18__py3-none-any.whl → 0.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/METADATA +4 -5
- lstosa-0.11.0.dist-info/RECORD +84 -0
- {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/WHEEL +1 -1
- {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/entry_points.txt +1 -0
- osa/_version.py +9 -4
- osa/configs/options.py +2 -0
- osa/configs/sequencer.cfg +21 -7
- osa/conftest.py +146 -6
- osa/high_level/significance.py +5 -3
- osa/high_level/tests/test_significance.py +3 -0
- osa/job.py +52 -26
- osa/nightsummary/extract.py +12 -3
- osa/nightsummary/tests/test_extract.py +5 -0
- osa/paths.py +111 -28
- osa/provenance/capture.py +1 -1
- osa/provenance/config/definition.yaml +7 -0
- osa/provenance/utils.py +22 -7
- osa/scripts/autocloser.py +0 -10
- osa/scripts/calibration_pipeline.py +9 -2
- osa/scripts/closer.py +136 -55
- osa/scripts/copy_datacheck.py +5 -3
- osa/scripts/datasequence.py +45 -71
- osa/scripts/gain_selection.py +14 -15
- osa/scripts/provprocess.py +16 -7
- osa/scripts/sequencer.py +49 -34
- osa/scripts/sequencer_catB_tailcuts.py +239 -0
- osa/scripts/sequencer_webmaker.py +4 -0
- osa/scripts/show_run_summary.py +2 -2
- osa/scripts/simulate_processing.py +4 -7
- osa/scripts/tests/test_osa_scripts.py +67 -22
- osa/scripts/update_source_catalog.py +45 -22
- osa/tests/test_jobs.py +28 -11
- osa/tests/test_paths.py +6 -6
- osa/tests/test_raw.py +4 -4
- osa/utils/cliopts.py +37 -32
- osa/utils/register.py +18 -13
- osa/utils/tests/test_utils.py +14 -0
- osa/utils/utils.py +186 -56
- osa/veto.py +1 -1
- osa/workflow/dl3.py +1 -2
- osa/workflow/stages.py +16 -11
- osa/workflow/tests/test_dl3.py +2 -1
- osa/workflow/tests/test_stages.py +7 -5
- lstosa-0.10.18.dist-info/RECORD +0 -83
- {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/LICENSE +0 -0
- {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/top_level.txt +0 -0
osa/scripts/provprocess.py
CHANGED
|
@@ -17,6 +17,8 @@ from osa.provenance.io import provdoc2graph, provdoc2json, provlist2provdoc, rea
|
|
|
17
17
|
from osa.provenance.utils import get_log_config
|
|
18
18
|
from osa.utils.cliopts import provprocessparsing
|
|
19
19
|
from osa.utils.logging import myLogger
|
|
20
|
+
from osa.utils.utils import date_to_dir
|
|
21
|
+
from osa.paths import get_dl1_prod_id_and_config, get_dl2_prod_id
|
|
20
22
|
|
|
21
23
|
__all__ = ["copy_used_file", "parse_lines_log", "parse_lines_run", "produce_provenance"]
|
|
22
24
|
|
|
@@ -110,7 +112,8 @@ def parse_lines_log(filter_cut, calib_runs, run_number):
|
|
|
110
112
|
keep = True
|
|
111
113
|
# make session starts with calibration
|
|
112
114
|
if session_id and filter_cut == "all" and not filtered:
|
|
113
|
-
|
|
115
|
+
nightdir = date_to_dir(options.date)
|
|
116
|
+
prov_dict["session_id"] = f"{nightdir}{run_number}"
|
|
114
117
|
prov_dict["name"] = run_number
|
|
115
118
|
prov_dict["observation_run"] = run_number
|
|
116
119
|
line = f"{ll[0]}{PROV_PREFIX}{ll[1]}{PROV_PREFIX}{prov_dict}\n"
|
|
@@ -336,7 +339,8 @@ def define_paths(grain, start_path, end_path, base_filename):
|
|
|
336
339
|
paths = {}
|
|
337
340
|
|
|
338
341
|
# check destination folder exists
|
|
339
|
-
|
|
342
|
+
nightdir = date_to_dir(options.date)
|
|
343
|
+
step_path = Path(start_path) / nightdir / options.prod_id / end_path
|
|
340
344
|
if not step_path.exists():
|
|
341
345
|
log.error(f"Path {step_path} does not exist")
|
|
342
346
|
|
|
@@ -381,8 +385,9 @@ def produce_provenance(session_log_filename, base_filename):
|
|
|
381
385
|
"""
|
|
382
386
|
|
|
383
387
|
if options.filter == "calibration" or not options.filter:
|
|
388
|
+
dl1_prod_id = get_dl1_prod_id_and_config(int(options.run))[0]
|
|
384
389
|
paths_calibration = define_paths(
|
|
385
|
-
"calibration_to_dl1", PATH_DL1,
|
|
390
|
+
"calibration_to_dl1", PATH_DL1, dl1_prod_id, base_filename
|
|
386
391
|
)
|
|
387
392
|
plines_drs4 = parse_lines_run(
|
|
388
393
|
"drs4_pedestal",
|
|
@@ -402,7 +407,8 @@ def produce_provenance(session_log_filename, base_filename):
|
|
|
402
407
|
pass
|
|
403
408
|
|
|
404
409
|
if options.filter == "r0_to_dl1" or not options.filter:
|
|
405
|
-
|
|
410
|
+
dl1_prod_id = get_dl1_prod_id_and_config(int(options.run))[0]
|
|
411
|
+
paths_r0_dl1 = define_paths("r0_to_dl1", PATH_DL1, dl1_prod_id, base_filename)
|
|
406
412
|
plines_r0 = parse_lines_run(
|
|
407
413
|
"r0_to_dl1",
|
|
408
414
|
read_prov(filename=session_log_filename),
|
|
@@ -425,8 +431,9 @@ def produce_provenance(session_log_filename, base_filename):
|
|
|
425
431
|
produce_provenance_files(plines_r0 + plines_ab[1:] + plines_check[1:], paths_r0_dl1)
|
|
426
432
|
|
|
427
433
|
if options.filter == "dl1_to_dl2" or not options.filter:
|
|
434
|
+
dl2_prod_id = get_dl2_prod_id(int(options.run))
|
|
428
435
|
if not options.no_dl2:
|
|
429
|
-
paths_dl1_dl2 = define_paths("dl1_to_dl2", PATH_DL2,
|
|
436
|
+
paths_dl1_dl2 = define_paths("dl1_to_dl2", PATH_DL2, dl2_prod_id, base_filename)
|
|
430
437
|
plines_dl2 = parse_lines_run(
|
|
431
438
|
"dl1_to_dl2",
|
|
432
439
|
read_prov(filename=session_log_filename),
|
|
@@ -441,16 +448,18 @@ def produce_provenance(session_log_filename, base_filename):
|
|
|
441
448
|
|
|
442
449
|
# create calibration_to_dl1 and calibration_to_dl2 prov files
|
|
443
450
|
if not options.filter:
|
|
451
|
+
dl1_prod_id = get_dl1_prod_id_and_config(int(options.run))[0]
|
|
444
452
|
calibration_to_dl1 = define_paths(
|
|
445
|
-
"calibration_to_dl1", PATH_DL1,
|
|
453
|
+
"calibration_to_dl1", PATH_DL1, dl1_prod_id, base_filename
|
|
446
454
|
)
|
|
447
455
|
calibration_to_dl1_lines = calibration_lines + dl1_lines[1:]
|
|
448
456
|
lines_dl1 = copy.deepcopy(calibration_to_dl1_lines)
|
|
449
457
|
produce_provenance_files(lines_dl1, calibration_to_dl1)
|
|
450
458
|
|
|
451
459
|
if not options.no_dl2:
|
|
460
|
+
dl2_prod_id = get_dl2_prod_id(int(options.run))
|
|
452
461
|
calibration_to_dl2 = define_paths(
|
|
453
|
-
"calibration_to_dl2", PATH_DL2,
|
|
462
|
+
"calibration_to_dl2", PATH_DL2, dl2_prod_id, base_filename
|
|
454
463
|
)
|
|
455
464
|
calibration_to_dl2_lines = calibration_to_dl1_lines + dl1_dl2_lines[1:]
|
|
456
465
|
lines_dl2 = copy.deepcopy(calibration_to_dl2_lines)
|
osa/scripts/sequencer.py
CHANGED
|
@@ -5,6 +5,7 @@ Orchestrator script that creates and execute the calibration sequence and
|
|
|
5
5
|
prepares a SLURM job array which launches the data sequences for every subrun.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
+
import warnings
|
|
8
9
|
import logging
|
|
9
10
|
import os
|
|
10
11
|
import sys
|
|
@@ -14,7 +15,15 @@ import datetime
|
|
|
14
15
|
from osa import osadb
|
|
15
16
|
from osa.configs import options
|
|
16
17
|
from osa.configs.config import cfg
|
|
17
|
-
from osa.
|
|
18
|
+
from osa.veto import get_closed_list, get_veto_list
|
|
19
|
+
from osa.utils.logging import myLogger
|
|
20
|
+
|
|
21
|
+
warnings.filterwarnings(
|
|
22
|
+
"ignore",
|
|
23
|
+
message="pkg_resources is deprecated as an API.*",
|
|
24
|
+
category=UserWarning
|
|
25
|
+
)
|
|
26
|
+
from osa.job import ( # noqa: E402
|
|
18
27
|
set_queue_values,
|
|
19
28
|
prepare_jobs,
|
|
20
29
|
submit_jobs,
|
|
@@ -22,16 +31,19 @@ from osa.job import (
|
|
|
22
31
|
get_squeue_output,
|
|
23
32
|
run_sacct,
|
|
24
33
|
run_squeue,
|
|
34
|
+
are_all_jobs_correctly_finished,
|
|
25
35
|
)
|
|
26
|
-
from osa.nightsummary.extract import
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
from osa.
|
|
32
|
-
from osa.
|
|
33
|
-
from osa.
|
|
34
|
-
from osa.
|
|
36
|
+
from osa.nightsummary.extract import ( # noqa: E402
|
|
37
|
+
build_sequences,
|
|
38
|
+
extract_runs,
|
|
39
|
+
extract_sequences
|
|
40
|
+
)
|
|
41
|
+
from osa.nightsummary.nightsummary import run_summary_table # noqa: E402
|
|
42
|
+
from osa.paths import analysis_path, destination_dir # noqa: E402
|
|
43
|
+
from osa.report import start # noqa: E402
|
|
44
|
+
from osa.utils.cliopts import sequencer_cli_parsing # noqa: E402
|
|
45
|
+
from osa.utils.utils import is_day_closed, gettag, date_to_iso # noqa: E402
|
|
46
|
+
from osa.scripts.gain_selection import GainSel_finished # noqa: E402
|
|
35
47
|
|
|
36
48
|
__all__ = [
|
|
37
49
|
"single_process",
|
|
@@ -115,7 +127,7 @@ def single_process(telescope):
|
|
|
115
127
|
log.info(f"Sequencer is still running for date {date_to_iso(options.date)}. Try again later.")
|
|
116
128
|
sys.exit(0)
|
|
117
129
|
|
|
118
|
-
|
|
130
|
+
if is_sequencer_completed(options.date) and not options.force_submit:
|
|
119
131
|
log.info(f"Sequencer already finished for date {date_to_iso(options.date)}. Exiting")
|
|
120
132
|
sys.exit(0)
|
|
121
133
|
|
|
@@ -192,7 +204,7 @@ def update_sequence_status(seq_list):
|
|
|
192
204
|
Decimal(get_status_for_sequence(seq, "DATACHECK") * 100) / seq.subruns
|
|
193
205
|
)
|
|
194
206
|
seq.muonstatus = int(Decimal(get_status_for_sequence(seq, "MUON") * 100) / seq.subruns)
|
|
195
|
-
seq.dl2status = int(Decimal(get_status_for_sequence(seq, "DL2") * 100)
|
|
207
|
+
seq.dl2status = int(Decimal(get_status_for_sequence(seq, "DL2") * 100))
|
|
196
208
|
|
|
197
209
|
|
|
198
210
|
def get_status_for_sequence(sequence, data_level) -> int:
|
|
@@ -210,17 +222,26 @@ def get_status_for_sequence(sequence, data_level) -> int:
|
|
|
210
222
|
number_of_files : int
|
|
211
223
|
"""
|
|
212
224
|
if data_level == "DL1AB":
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
225
|
+
try:
|
|
226
|
+
directory = options.directory / sequence.dl1_prod_id
|
|
227
|
+
files = list(directory.glob(f"dl1_LST-1*{sequence.run}*.h5"))
|
|
228
|
+
except AttributeError:
|
|
229
|
+
return 0
|
|
230
|
+
|
|
216
231
|
elif data_level == "DL2":
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
232
|
+
try:
|
|
233
|
+
directory = destination_dir(concept="DL2", create_dir=False, dl2_prod_id=sequence.dl2_prod_id)
|
|
234
|
+
files = list(directory.glob(f"dl2_LST-1*{sequence.run}*.h5"))
|
|
235
|
+
except AttributeError:
|
|
236
|
+
return 0
|
|
237
|
+
|
|
220
238
|
elif data_level == "DATACHECK":
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
239
|
+
try:
|
|
240
|
+
directory = options.directory / sequence.dl1_prod_id
|
|
241
|
+
files = list(directory.glob(f"datacheck_dl1_LST-1*{sequence.run}*.h5"))
|
|
242
|
+
except AttributeError:
|
|
243
|
+
return 0
|
|
244
|
+
|
|
224
245
|
else:
|
|
225
246
|
prefix = cfg.get("PATTERN", f"{data_level}PREFIX")
|
|
226
247
|
suffix = cfg.get("PATTERN", f"{data_level}SUFFIX")
|
|
@@ -340,20 +361,14 @@ def is_sequencer_completed(date: datetime.datetime) -> bool:
|
|
|
340
361
|
"""Check if the jobs launched by sequencer are already completed."""
|
|
341
362
|
summary_table = run_summary_table(date)
|
|
342
363
|
data_runs = summary_table[summary_table["run_type"] == "DATA"]
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
for run in data_runs["run_id"]:
|
|
347
|
-
jobs_run = sacct_info[sacct_info["JobName"]==f"LST1_{run:05d}"]
|
|
348
|
-
if len(jobs_run["JobID"].unique())>1:
|
|
349
|
-
last_job_id = sorted(jobs_run["JobID"].unique())[-1]
|
|
350
|
-
jobs_run = sacct_info[sacct_info["JobID"]==last_job_id]
|
|
351
|
-
incomplete_jobs = jobs_run[(jobs_run["State"] != "COMPLETED")]
|
|
352
|
-
if len(jobs_run) == 0 or len(incomplete_jobs) != 0:
|
|
353
|
-
return False
|
|
354
|
-
|
|
355
|
-
return True
|
|
364
|
+
run_list = extract_runs(data_runs)
|
|
365
|
+
sequence_list = extract_sequences(options.date, run_list)
|
|
356
366
|
|
|
367
|
+
if are_all_jobs_correctly_finished(sequence_list):
|
|
368
|
+
return True
|
|
369
|
+
else:
|
|
370
|
+
log.info("Jobs did not correctly/yet finish")
|
|
371
|
+
return False
|
|
357
372
|
|
|
358
373
|
def timeout_in_sequencer(date: datetime.datetime) -> bool:
|
|
359
374
|
"""Check if any of the jobs launched by sequencer finished in timeout."""
|
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
import glob
|
|
2
|
+
import re
|
|
3
|
+
import argparse
|
|
4
|
+
import logging
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from astropy.table import Table
|
|
7
|
+
import subprocess as sp
|
|
8
|
+
|
|
9
|
+
from osa.configs import options
|
|
10
|
+
from osa.configs.config import cfg
|
|
11
|
+
from osa.nightsummary.extract import get_last_pedcalib
|
|
12
|
+
from osa.utils.cliopts import valid_date, set_default_date_if_needed
|
|
13
|
+
from osa.utils.logging import myLogger
|
|
14
|
+
from osa.job import run_sacct, get_sacct_output
|
|
15
|
+
from osa.utils.utils import date_to_dir, get_calib_filters, get_lstchain_version
|
|
16
|
+
from osa.paths import (
|
|
17
|
+
catB_closed_file_exists,
|
|
18
|
+
catB_calibration_file_exists,
|
|
19
|
+
analysis_path,
|
|
20
|
+
get_major_version
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
log = myLogger(logging.getLogger())
|
|
24
|
+
|
|
25
|
+
parser = argparse.ArgumentParser()
|
|
26
|
+
parser.add_argument(
|
|
27
|
+
"-c",
|
|
28
|
+
"--config",
|
|
29
|
+
action="store",
|
|
30
|
+
type=Path,
|
|
31
|
+
help="Configuration file",
|
|
32
|
+
)
|
|
33
|
+
parser.add_argument(
|
|
34
|
+
"-d",
|
|
35
|
+
"--date",
|
|
36
|
+
type=valid_date,
|
|
37
|
+
default=None,
|
|
38
|
+
)
|
|
39
|
+
parser.add_argument(
|
|
40
|
+
"-v",
|
|
41
|
+
"--verbose",
|
|
42
|
+
action="store_true",
|
|
43
|
+
default=False,
|
|
44
|
+
help="Activate debugging mode.",
|
|
45
|
+
)
|
|
46
|
+
parser.add_argument(
|
|
47
|
+
"-s",
|
|
48
|
+
"--simulate",
|
|
49
|
+
action="store_true",
|
|
50
|
+
default=False,
|
|
51
|
+
help="Simulate launching of the sequencer_catB_tailcuts script.",
|
|
52
|
+
)
|
|
53
|
+
parser.add_argument(
|
|
54
|
+
"tel_id",
|
|
55
|
+
choices=["ST", "LST1", "LST2", "all"],
|
|
56
|
+
help="telescope identifier LST1, LST2, ST or all.",
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
def are_all_history_files_created(run_id: int) -> bool:
|
|
60
|
+
"""Check if all the history files (one per subrun) were created for a given run."""
|
|
61
|
+
run_summary_dir = Path(cfg.get(options.tel_id, "RUN_SUMMARY_DIR"))
|
|
62
|
+
run_summary_file = run_summary_dir / f"RunSummary_{date_to_dir(options.date)}.ecsv"
|
|
63
|
+
run_summary = Table.read(run_summary_file)
|
|
64
|
+
n_subruns = run_summary[run_summary["run_id"] == run_id]["n_subruns"]
|
|
65
|
+
analysis_dir = Path(options.directory)
|
|
66
|
+
history_files = glob.glob(f"{str(analysis_dir)}/sequence_LST1_{run_id:05d}.????.history")
|
|
67
|
+
if len(history_files) == n_subruns:
|
|
68
|
+
return True
|
|
69
|
+
else:
|
|
70
|
+
return False
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def r0_to_dl1_step_finished_for_run(run_id: int) -> bool:
|
|
74
|
+
"""
|
|
75
|
+
Check if the step r0_to_dl1 finished successfully
|
|
76
|
+
for a given run by looking the history files.
|
|
77
|
+
"""
|
|
78
|
+
if not are_all_history_files_created(run_id):
|
|
79
|
+
log.debug(f"All history files for run {run_id:05d} were not created yet.")
|
|
80
|
+
return False
|
|
81
|
+
analysis_dir = Path(options.directory)
|
|
82
|
+
history_files = glob.glob(f"{str(analysis_dir)}/sequence_LST1_{run_id:05d}.????.history")
|
|
83
|
+
for file in history_files:
|
|
84
|
+
rc = Path(file).read_text().splitlines()[-1][-1]
|
|
85
|
+
if rc != "0":
|
|
86
|
+
print(f"r0_to_dl1 step did not finish successfully (check file {file})")
|
|
87
|
+
return False
|
|
88
|
+
return True
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def get_catB_last_job_id(run_id: int) -> int:
|
|
92
|
+
"""Get job id of the last Cat-B calibration job that was launched for a given run."""
|
|
93
|
+
log_dir = Path(options.directory) / "log"
|
|
94
|
+
filenames = glob.glob(f"{log_dir}/catB_calibration_{run_id:05d}_*.err")
|
|
95
|
+
if filenames:
|
|
96
|
+
match = re.search(f"catB_calibration_{run_id:05d}_(\d+).err", sorted(filenames)[-1])
|
|
97
|
+
job_id = match.group(1)
|
|
98
|
+
return job_id
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def launch_catB_calibration(run_id: int):
|
|
102
|
+
"""
|
|
103
|
+
Launch the Cat-B calibration script for a given run if the Cat-B calibration
|
|
104
|
+
file has not been created yet. If the Cat-B calibration script was launched
|
|
105
|
+
before and it finished successfully, it creates a catB_{run}.closed file.
|
|
106
|
+
"""
|
|
107
|
+
job_id = get_catB_last_job_id(run_id)
|
|
108
|
+
if job_id:
|
|
109
|
+
job_status = get_sacct_output(run_sacct(job_id=job_id))["State"]
|
|
110
|
+
if job_status.item() in ["RUNNING", "PENDING"]:
|
|
111
|
+
log.debug(f"Job {job_id} (corresponding to run {run_id:05d}) is still running.")
|
|
112
|
+
|
|
113
|
+
elif job_status.item() == "COMPLETED":
|
|
114
|
+
catB_closed_file = Path(options.directory) / f"catB_{run_id:05d}.closed"
|
|
115
|
+
catB_closed_file.touch()
|
|
116
|
+
log.debug(
|
|
117
|
+
f"Cat-B job {job_id} (corresponding to run {run_id:05d}) finished "
|
|
118
|
+
f"successfully. Creating file {catB_closed_file}"
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
else:
|
|
122
|
+
log.warning(f"Cat-B job {job_id} (corresponding to run {run_id:05d}) failed.")
|
|
123
|
+
|
|
124
|
+
else:
|
|
125
|
+
if catB_calibration_file_exists(run_id):
|
|
126
|
+
log.info(f"Cat-B calibration file already produced for run {run_id:05d}.")
|
|
127
|
+
return
|
|
128
|
+
|
|
129
|
+
command = cfg.get("lstchain", "catB_calibration")
|
|
130
|
+
if cfg.getboolean("lstchain", "use_lstcam_env_for_CatB_calib"):
|
|
131
|
+
env_command = f"conda run -n lstcam-env {command}"
|
|
132
|
+
else:
|
|
133
|
+
env_command = command
|
|
134
|
+
options.filters = get_calib_filters(run_id)
|
|
135
|
+
base_dir = Path(cfg.get(options.tel_id, "BASE")).resolve()
|
|
136
|
+
r0_dir = Path(cfg.get(options.tel_id, "R0_DIR")).resolve()
|
|
137
|
+
log_dir = Path(options.directory) / "log"
|
|
138
|
+
catA_calib_run = get_last_pedcalib(options.date)
|
|
139
|
+
slurm_account = cfg.get("SLURM", "ACCOUNT")
|
|
140
|
+
lstchain_version = get_major_version(get_lstchain_version())
|
|
141
|
+
analysis_dir = cfg.get("LST1", "ANALYSIS_DIR")
|
|
142
|
+
cmd = ["sbatch", f"--account={slurm_account}", "--parsable",
|
|
143
|
+
"-o", f"{log_dir}/catB_calibration_{run_id:05d}_%j.out",
|
|
144
|
+
"-e", f"{log_dir}/catB_calibration_{run_id:05d}_%j.err",
|
|
145
|
+
env_command,
|
|
146
|
+
f"-r {run_id:05d}",
|
|
147
|
+
f"--catA_calibration_run={catA_calib_run}",
|
|
148
|
+
"-b", base_dir,
|
|
149
|
+
f"--r0-dir={r0_dir}",
|
|
150
|
+
f"--filters={options.filters}",
|
|
151
|
+
]
|
|
152
|
+
|
|
153
|
+
if command=="onsite_create_cat_B_calibration_file":
|
|
154
|
+
cmd.append(f"--interleaved-dir={analysis_dir}")
|
|
155
|
+
elif command=="lstcam_calib_onsite_create_cat_B_calibration_file":
|
|
156
|
+
cmd.append(f"--dl1-dir={analysis_dir}")
|
|
157
|
+
cmd.append(f"--lstchain-version={lstchain_version[1:]}")
|
|
158
|
+
|
|
159
|
+
if not options.simulate:
|
|
160
|
+
job = sp.run(cmd, encoding="utf-8", capture_output=True, text=True, check=True)
|
|
161
|
+
job_id = job.stdout.strip()
|
|
162
|
+
log.debug(f"Launched Cat-B calibration job {job_id} for run {run_id}!")
|
|
163
|
+
|
|
164
|
+
else:
|
|
165
|
+
log.info(f"Simulate launching of the {command} script.")
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def launch_tailcuts_finder(run_id: int):
|
|
169
|
+
"""
|
|
170
|
+
Launch the lstchain script to calculate the correct
|
|
171
|
+
tailcuts to use for a given run.
|
|
172
|
+
"""
|
|
173
|
+
command = cfg.get("lstchain", "tailcuts_finder")
|
|
174
|
+
slurm_account = cfg.get("SLURM", "ACCOUNT")
|
|
175
|
+
input_dir = Path(options.directory)
|
|
176
|
+
output_dir = Path(cfg.get(options.tel_id, "TAILCUTS_FINDER_DIR"))
|
|
177
|
+
log_dir = Path(options.directory) / "log"
|
|
178
|
+
log_file = log_dir / f"tailcuts_finder_{run_id:05d}_%j.log"
|
|
179
|
+
cmd = [
|
|
180
|
+
"sbatch", "--parsable",
|
|
181
|
+
f"--account={slurm_account}",
|
|
182
|
+
"-o", log_file,
|
|
183
|
+
command,
|
|
184
|
+
f"--input-dir={input_dir}",
|
|
185
|
+
f"--run={run_id}",
|
|
186
|
+
f"--output-dir={output_dir}",
|
|
187
|
+
]
|
|
188
|
+
if not options.simulate:
|
|
189
|
+
job = sp.run(cmd, encoding="utf-8", capture_output=True, text=True, check=True)
|
|
190
|
+
job_id = job.stdout.strip()
|
|
191
|
+
log.debug(f"Launched lstchain_find_tailcuts job {job_id} for run {run_id}!")
|
|
192
|
+
|
|
193
|
+
else:
|
|
194
|
+
log.info(f"Simulate launching of the {command} script.")
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def tailcuts_config_file_exists(run_id: int) -> bool:
|
|
199
|
+
"""Check if the config file created by the tailcuts finder script already exists."""
|
|
200
|
+
tailcuts_config_file = Path(cfg.get(options.tel_id, "TAILCUTS_FINDER_DIR")) / f"dl1ab_Run{run_id:05d}.json"
|
|
201
|
+
return tailcuts_config_file.exists()
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def main():
|
|
205
|
+
"""
|
|
206
|
+
Main script to be called as cron job. It launches the Cat-B calibration script
|
|
207
|
+
and the tailcuts finder script for each run of the corresponding date, and creates
|
|
208
|
+
the catB_{run}.closed files if Cat-B calibration has finished successfully.
|
|
209
|
+
"""
|
|
210
|
+
opts = parser.parse_args()
|
|
211
|
+
options.tel_id = opts.tel_id
|
|
212
|
+
options.simulate = opts.simulate
|
|
213
|
+
options.date = opts.date
|
|
214
|
+
options.date = set_default_date_if_needed()
|
|
215
|
+
options.configfile = opts.config.resolve()
|
|
216
|
+
options.directory = analysis_path(options.tel_id)
|
|
217
|
+
|
|
218
|
+
if opts.verbose:
|
|
219
|
+
log.setLevel(logging.DEBUG)
|
|
220
|
+
else:
|
|
221
|
+
log.setLevel(logging.INFO)
|
|
222
|
+
|
|
223
|
+
run_summary_dir = Path(cfg.get(options.tel_id, "RUN_SUMMARY_DIR"))
|
|
224
|
+
run_summary = Table.read(run_summary_dir / f"RunSummary_{date_to_dir(options.date)}.ecsv")
|
|
225
|
+
data_runs = run_summary[run_summary["run_type"]=="DATA"]
|
|
226
|
+
for run_id in data_runs["run_id"]:
|
|
227
|
+
# first check if the dl1a files are produced
|
|
228
|
+
if not r0_to_dl1_step_finished_for_run(run_id):
|
|
229
|
+
log.info(f"The r0_to_dl1 step did not finish yet for run {run_id:05d}. Please try again later.")
|
|
230
|
+
else:
|
|
231
|
+
# launch catB calibration and tailcut finder in parallel
|
|
232
|
+
if cfg.getboolean("lstchain", "apply_catB_calibration") and not catB_closed_file_exists(run_id):
|
|
233
|
+
launch_catB_calibration(run_id)
|
|
234
|
+
if not cfg.getboolean("lstchain", "apply_standard_dl1b_config") and not tailcuts_config_file_exists(run_id):
|
|
235
|
+
launch_tailcuts_finder(run_id)
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
if __name__ == "__main__":
|
|
239
|
+
main()
|
|
@@ -16,6 +16,7 @@ from osa.configs.config import cfg
|
|
|
16
16
|
from osa.utils.cliopts import sequencer_webmaker_argparser
|
|
17
17
|
from osa.utils.logging import myLogger
|
|
18
18
|
from osa.utils.utils import is_day_closed, date_to_iso, date_to_dir
|
|
19
|
+
from osa.paths import all_dl1ab_config_files_exist
|
|
19
20
|
|
|
20
21
|
log = myLogger(logging.getLogger())
|
|
21
22
|
|
|
@@ -88,6 +89,9 @@ def get_sequencer_output(date: str, config: str, test=False, no_gainsel=False) -
|
|
|
88
89
|
if test:
|
|
89
90
|
commandargs.insert(-1, "-t")
|
|
90
91
|
|
|
92
|
+
if not all_dl1ab_config_files_exist(date):
|
|
93
|
+
commandargs.insert(-1, "--no-dl1ab")
|
|
94
|
+
|
|
91
95
|
try:
|
|
92
96
|
output = sp.run(commandargs, stdout=sp.PIPE, stderr=sp.STDOUT, encoding="utf-8", check=True)
|
|
93
97
|
except sp.CalledProcessError as error:
|
osa/scripts/show_run_summary.py
CHANGED
|
@@ -26,8 +26,8 @@ parser.add_argument(
|
|
|
26
26
|
parser.add_argument(
|
|
27
27
|
"--r0-path",
|
|
28
28
|
type=Path,
|
|
29
|
-
help="Path to the R0 files. Default is /fefs/
|
|
30
|
-
default=Path("/fefs/
|
|
29
|
+
help="Path to the R0 files. Default is /fefs/onsite/data/R0/LSTN-01/lst-arraydaq/events",
|
|
30
|
+
default=Path("/fefs/onsite/data/R0/LSTN-01/lst-arraydaq/events"),
|
|
31
31
|
)
|
|
32
32
|
|
|
33
33
|
parser.add_argument(
|
|
@@ -8,7 +8,6 @@ python osa/scripts/simulate_processing.py"""
|
|
|
8
8
|
import logging
|
|
9
9
|
import multiprocessing as mp
|
|
10
10
|
import subprocess
|
|
11
|
-
from datetime import datetime
|
|
12
11
|
from pathlib import Path
|
|
13
12
|
|
|
14
13
|
import yaml
|
|
@@ -20,7 +19,8 @@ from osa.nightsummary.extract import build_sequences
|
|
|
20
19
|
from osa.provenance.utils import get_log_config
|
|
21
20
|
from osa.utils.cliopts import simprocparsing
|
|
22
21
|
from osa.utils.logging import myLogger
|
|
23
|
-
from osa.utils.utils import
|
|
22
|
+
from osa.utils.utils import date_to_iso
|
|
23
|
+
from osa.paths import analysis_path
|
|
24
24
|
|
|
25
25
|
__all__ = [
|
|
26
26
|
"parse_template",
|
|
@@ -174,7 +174,7 @@ def simulate_processing():
|
|
|
174
174
|
drs4_pedestal_run_id,
|
|
175
175
|
pedcal_run_id,
|
|
176
176
|
sequence.run_str,
|
|
177
|
-
options.
|
|
177
|
+
date_to_iso(options.date),
|
|
178
178
|
options.prod_id,
|
|
179
179
|
]
|
|
180
180
|
log.info(f"Processing provenance for run {sequence.run_str}")
|
|
@@ -187,10 +187,7 @@ def main():
|
|
|
187
187
|
|
|
188
188
|
simprocparsing()
|
|
189
189
|
|
|
190
|
-
|
|
191
|
-
options.date = datetime.fromisoformat("2020-01-17")
|
|
192
|
-
options.tel_id = "LST1"
|
|
193
|
-
options.directory = date_to_dir(options.date)
|
|
190
|
+
options.directory = analysis_path(options.tel_id)
|
|
194
191
|
|
|
195
192
|
log.info("Running simulate processing")
|
|
196
193
|
|