lstosa 0.10.18__py3-none-any.whl → 0.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/METADATA +4 -5
- lstosa-0.11.0.dist-info/RECORD +84 -0
- {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/WHEEL +1 -1
- {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/entry_points.txt +1 -0
- osa/_version.py +9 -4
- osa/configs/options.py +2 -0
- osa/configs/sequencer.cfg +21 -7
- osa/conftest.py +146 -6
- osa/high_level/significance.py +5 -3
- osa/high_level/tests/test_significance.py +3 -0
- osa/job.py +52 -26
- osa/nightsummary/extract.py +12 -3
- osa/nightsummary/tests/test_extract.py +5 -0
- osa/paths.py +111 -28
- osa/provenance/capture.py +1 -1
- osa/provenance/config/definition.yaml +7 -0
- osa/provenance/utils.py +22 -7
- osa/scripts/autocloser.py +0 -10
- osa/scripts/calibration_pipeline.py +9 -2
- osa/scripts/closer.py +136 -55
- osa/scripts/copy_datacheck.py +5 -3
- osa/scripts/datasequence.py +45 -71
- osa/scripts/gain_selection.py +14 -15
- osa/scripts/provprocess.py +16 -7
- osa/scripts/sequencer.py +49 -34
- osa/scripts/sequencer_catB_tailcuts.py +239 -0
- osa/scripts/sequencer_webmaker.py +4 -0
- osa/scripts/show_run_summary.py +2 -2
- osa/scripts/simulate_processing.py +4 -7
- osa/scripts/tests/test_osa_scripts.py +67 -22
- osa/scripts/update_source_catalog.py +45 -22
- osa/tests/test_jobs.py +28 -11
- osa/tests/test_paths.py +6 -6
- osa/tests/test_raw.py +4 -4
- osa/utils/cliopts.py +37 -32
- osa/utils/register.py +18 -13
- osa/utils/tests/test_utils.py +14 -0
- osa/utils/utils.py +186 -56
- osa/veto.py +1 -1
- osa/workflow/dl3.py +1 -2
- osa/workflow/stages.py +16 -11
- osa/workflow/tests/test_dl3.py +2 -1
- osa/workflow/tests/test_stages.py +7 -5
- lstosa-0.10.18.dist-info/RECORD +0 -83
- {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/LICENSE +0 -0
- {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/top_level.txt +0 -0
osa/nightsummary/extract.py
CHANGED
|
@@ -23,9 +23,9 @@ from osa.configs.datamodel import Sequence
|
|
|
23
23
|
from osa.job import sequence_filenames
|
|
24
24
|
from osa.nightsummary import database
|
|
25
25
|
from osa.nightsummary.nightsummary import run_summary_table
|
|
26
|
-
from osa.paths import sequence_calibration_files, get_run_date
|
|
26
|
+
from osa.paths import sequence_calibration_files, get_run_date, get_dl1_prod_id_and_config, get_dl2_prod_id
|
|
27
27
|
from osa.utils.logging import myLogger
|
|
28
|
-
from osa.utils.utils import date_to_iso, date_to_dir
|
|
28
|
+
from osa.utils.utils import date_to_iso, date_to_dir, get_RF_model
|
|
29
29
|
|
|
30
30
|
log = myLogger(logging.getLogger(__name__))
|
|
31
31
|
|
|
@@ -263,6 +263,15 @@ def extract_sequences(date: datetime, run_obj_list: List[RunObj]) -> List[Sequen
|
|
|
263
263
|
f"Data sequence {sequence.seq} from run {run.run} whose parent is "
|
|
264
264
|
f"{sequence.parent} (DRS4 {required_drs4_run} & Ped-Cal {required_pedcal_run})"
|
|
265
265
|
)
|
|
266
|
+
if not options.no_dl1ab and sequence.type=="DATA":
|
|
267
|
+
dl1_prod_id, dl1b_config = get_dl1_prod_id_and_config(sequence.run)
|
|
268
|
+
sequence.dl1_prod_id = dl1_prod_id
|
|
269
|
+
sequence.dl1b_config = dl1b_config
|
|
270
|
+
|
|
271
|
+
if not options.no_dl2 and not options.no_dl1ab and sequence.type=="DATA":
|
|
272
|
+
sequence.dl2_prod_id = get_dl2_prod_id(sequence.run)
|
|
273
|
+
sequence.rf_model = get_RF_model(sequence.run)
|
|
274
|
+
|
|
266
275
|
sequence_list.append(sequence)
|
|
267
276
|
|
|
268
277
|
# Add the calibration file names
|
|
@@ -300,7 +309,7 @@ def get_source_list(date: datetime) -> dict:
|
|
|
300
309
|
|
|
301
310
|
# Create a dictionary of sources and their corresponding sequences
|
|
302
311
|
source_dict = {
|
|
303
|
-
sequence.run: sequence.source_name
|
|
312
|
+
int(sequence.run): sequence.source_name
|
|
304
313
|
for sequence in sequence_list
|
|
305
314
|
if sequence.source_name is not None
|
|
306
315
|
}
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
from datetime import datetime
|
|
2
|
+
from osa.configs import options
|
|
2
3
|
|
|
4
|
+
options.test = True
|
|
3
5
|
obs_date = datetime.fromisoformat("2020-01-17")
|
|
4
6
|
|
|
5
7
|
|
|
@@ -11,6 +13,9 @@ def test_source_list(
|
|
|
11
13
|
systematic_correction_files,
|
|
12
14
|
pedestal_ids_file,
|
|
13
15
|
merged_run_summary,
|
|
16
|
+
dl1b_config_files,
|
|
17
|
+
tailcuts_log_files,
|
|
18
|
+
rf_models,
|
|
14
19
|
):
|
|
15
20
|
"""Test that the list of name of sources is correct."""
|
|
16
21
|
from osa.nightsummary.extract import get_source_list
|
osa/paths.py
CHANGED
|
@@ -2,17 +2,16 @@
|
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
4
|
import re
|
|
5
|
+
import sys
|
|
5
6
|
from datetime import datetime
|
|
6
7
|
from pathlib import Path
|
|
7
8
|
from typing import List
|
|
8
9
|
import subprocess
|
|
9
10
|
import time
|
|
10
|
-
|
|
11
|
-
import lstchain
|
|
11
|
+
import json
|
|
12
12
|
from astropy.table import Table
|
|
13
13
|
from lstchain.onsite import (find_systematics_correction_file,
|
|
14
|
-
find_time_calibration_file
|
|
15
|
-
find_filter_wheels)
|
|
14
|
+
find_time_calibration_file)
|
|
16
15
|
|
|
17
16
|
from osa.configs import options
|
|
18
17
|
from osa.configs.config import DEFAULT_CFG, cfg
|
|
@@ -45,8 +44,8 @@ __all__ = [
|
|
|
45
44
|
|
|
46
45
|
|
|
47
46
|
DATACHECK_WEB_BASEDIR = Path(cfg.get("WEBSERVER", "DATACHECK"))
|
|
48
|
-
CALIB_BASEDIR = Path(cfg.get("LST1", "
|
|
49
|
-
DRS4_PEDESTAL_BASEDIR = Path(cfg.get("LST1", "
|
|
47
|
+
CALIB_BASEDIR = Path(cfg.get("LST1", "CAT_A_CALIB_DIR"))
|
|
48
|
+
DRS4_PEDESTAL_BASEDIR = Path(cfg.get("LST1", "CAT_A_PEDESTAL_DIR"))
|
|
50
49
|
|
|
51
50
|
|
|
52
51
|
def analysis_path(tel) -> Path:
|
|
@@ -103,10 +102,12 @@ def get_drs4_pedestal_filename(run_id: int, prod_id: str) -> Path:
|
|
|
103
102
|
return files[-1] # Get the latest production among the major lstchain version
|
|
104
103
|
|
|
105
104
|
date = utils.date_to_dir(get_run_date(run_id))
|
|
105
|
+
lstcam_env = Path(cfg.get("LST1", "CALIB_ENV"))
|
|
106
|
+
lstcam_calib_version = utils.get_lstcam_calib_version(lstcam_env)
|
|
106
107
|
return (
|
|
107
108
|
DRS4_PEDESTAL_BASEDIR
|
|
108
109
|
/ date
|
|
109
|
-
/ f"v{
|
|
110
|
+
/ f"v{lstcam_calib_version}/drs4_pedestal.Run{run_id:05d}.0000.h5"
|
|
110
111
|
).resolve()
|
|
111
112
|
|
|
112
113
|
|
|
@@ -136,26 +137,26 @@ def get_calibration_filename(run_id: int, prod_id: str) -> Path:
|
|
|
136
137
|
return files[-1] # Get the latest production among the major lstchain version
|
|
137
138
|
|
|
138
139
|
date = utils.date_to_dir(get_run_date(run_id))
|
|
140
|
+
options.filters = utils.get_calib_filters(run_id)
|
|
139
141
|
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
else:
|
|
144
|
-
mongodb = cfg.get("database", "caco_db")
|
|
145
|
-
try:
|
|
146
|
-
# Cast run_id to int to avoid problems with numpy int64 encoding in MongoDB
|
|
147
|
-
options.filters = find_filter_wheels(int(run_id), mongodb)
|
|
148
|
-
except IOError:
|
|
149
|
-
log.warning("No filter information found in database. Assuming positions 52.")
|
|
150
|
-
options.filters = 52
|
|
151
|
-
|
|
142
|
+
lstcam_env = Path(cfg.get("LST1", "CALIB_ENV"))
|
|
143
|
+
lstcam_calib_version = utils.get_lstcam_calib_version(lstcam_env)
|
|
152
144
|
return (
|
|
153
145
|
CALIB_BASEDIR
|
|
154
146
|
/ date
|
|
155
|
-
/ f"v{
|
|
147
|
+
/ f"v{lstcam_calib_version}/calibration_filters_{options.filters}.Run{run_id:05d}.0000.h5"
|
|
156
148
|
).resolve()
|
|
157
149
|
|
|
158
150
|
|
|
151
|
+
def get_catB_calibration_filename(run_id: int) -> Path:
|
|
152
|
+
"""Return the Category-B calibration filename of a given run."""
|
|
153
|
+
date = utils.date_to_dir(options.date)
|
|
154
|
+
calib_prod_id = utils.get_lstchain_version()
|
|
155
|
+
catB_calib_dir = Path(cfg.get("LST1", "CAT_B_CALIB_BASE")) / "calibration" / date / calib_prod_id
|
|
156
|
+
filters = utils.get_calib_filters(run_id)
|
|
157
|
+
return catB_calib_dir / f"cat_B_calibration_filters_{filters}.Run{run_id:05d}.h5"
|
|
158
|
+
|
|
159
|
+
|
|
159
160
|
def pedestal_ids_file_exists(run_id: int) -> bool:
|
|
160
161
|
"""Look for the files with pedestal interleaved event identification."""
|
|
161
162
|
pedestal_ids_dir = Path(cfg.get("LST1", "PEDESTAL_FINDER_DIR"))
|
|
@@ -256,15 +257,18 @@ def sequence_calibration_files(sequence_list: List[Sequence]) -> None:
|
|
|
256
257
|
|
|
257
258
|
def get_datacheck_files(pattern: str, directory: Path) -> list:
|
|
258
259
|
"""Return a list of files matching the pattern."""
|
|
259
|
-
|
|
260
|
+
if pattern=="datacheck_dl1*.pdf":
|
|
261
|
+
return sorted(directory.glob("tailcut*/datacheck/"+pattern))
|
|
262
|
+
else:
|
|
263
|
+
return sorted(directory.glob(pattern))
|
|
260
264
|
|
|
261
265
|
|
|
262
266
|
def datacheck_directory(data_type: str, date: str) -> Path:
|
|
263
267
|
"""Returns the path to the datacheck directory given the data type."""
|
|
264
268
|
if data_type in {"PEDESTAL", "CALIB"}:
|
|
265
|
-
directory = Path(cfg.get("LST1", f"{data_type}_DIR")) / date / "pro/log"
|
|
269
|
+
directory = Path(cfg.get("LST1", f"CAT_A_{data_type}_DIR")) / date / "pro/log"
|
|
266
270
|
elif data_type == "DL1AB":
|
|
267
|
-
directory =
|
|
271
|
+
directory = Path(cfg.get("LST1", f"{data_type}_DIR")) / date / options.prod_id
|
|
268
272
|
elif data_type == "LONGTERM":
|
|
269
273
|
directory = Path(cfg.get("LST1", f"{data_type}_DIR")) / options.prod_id / date
|
|
270
274
|
else:
|
|
@@ -272,7 +276,7 @@ def datacheck_directory(data_type: str, date: str) -> Path:
|
|
|
272
276
|
return directory
|
|
273
277
|
|
|
274
278
|
|
|
275
|
-
def destination_dir(concept: str, create_dir: bool = True) -> Path:
|
|
279
|
+
def destination_dir(concept: str, create_dir: bool = True, dl1_prod_id: str = None, dl2_prod_id: str = None) -> Path:
|
|
276
280
|
"""
|
|
277
281
|
Create final destination directory for each data level.
|
|
278
282
|
See Also osa.utils.register_run_concept_files
|
|
@@ -303,7 +307,7 @@ def destination_dir(concept: str, create_dir: bool = True) -> Path:
|
|
|
303
307
|
Path(cfg.get(options.tel_id, "DL1_DIR"))
|
|
304
308
|
/ nightdir
|
|
305
309
|
/ options.prod_id
|
|
306
|
-
/
|
|
310
|
+
/ dl1_prod_id
|
|
307
311
|
/ "datacheck"
|
|
308
312
|
)
|
|
309
313
|
elif concept == "DL1AB":
|
|
@@ -311,13 +315,14 @@ def destination_dir(concept: str, create_dir: bool = True) -> Path:
|
|
|
311
315
|
Path(cfg.get(options.tel_id, "DL1_DIR"))
|
|
312
316
|
/ nightdir
|
|
313
317
|
/ options.prod_id
|
|
314
|
-
/
|
|
318
|
+
/ dl1_prod_id
|
|
315
319
|
)
|
|
316
320
|
elif concept in {"DL2", "DL3"}:
|
|
317
321
|
directory = (
|
|
318
322
|
(Path(cfg.get(options.tel_id, f"{concept}_DIR")) / nightdir)
|
|
319
323
|
/ options.prod_id
|
|
320
|
-
|
|
324
|
+
/ dl2_prod_id
|
|
325
|
+
)
|
|
321
326
|
elif concept in {"PEDESTAL", "CALIB", "TIMECALIB"}:
|
|
322
327
|
directory = (
|
|
323
328
|
Path(cfg.get(options.tel_id, f"{concept}_DIR"))
|
|
@@ -356,7 +361,8 @@ def get_latest_version_file(longterm_files: List[str]) -> Path:
|
|
|
356
361
|
return max(
|
|
357
362
|
longterm_files,
|
|
358
363
|
key=lambda path: int(path.parents[1].name.split(".")[1])
|
|
359
|
-
if path.parents[1].name.startswith("v")
|
|
364
|
+
if path.parents[1].name.startswith("v") and
|
|
365
|
+
re.match(r'^\d+\.\d+(\.\d+)?$', path.parents[1].name[1:])
|
|
360
366
|
else "",
|
|
361
367
|
)
|
|
362
368
|
|
|
@@ -397,6 +403,7 @@ def create_longterm_symlink(cherenkov_job_id: str = None):
|
|
|
397
403
|
else:
|
|
398
404
|
log.warning(f"Job {cherenkov_job_id} (lstchain_cherenkov_transparency) did not finish successfully.")
|
|
399
405
|
|
|
406
|
+
|
|
400
407
|
def dl1_datacheck_longterm_file_exits() -> bool:
|
|
401
408
|
"""Return true if the longterm DL1 datacheck file was already produced."""
|
|
402
409
|
nightdir = utils.date_to_dir(options.date)
|
|
@@ -404,3 +411,79 @@ def dl1_datacheck_longterm_file_exits() -> bool:
|
|
|
404
411
|
longterm_file = longterm_dir / options.prod_id / nightdir / f"DL1_datacheck_{nightdir}.h5"
|
|
405
412
|
return longterm_file.exists()
|
|
406
413
|
|
|
414
|
+
|
|
415
|
+
def catB_closed_file_exists(run_id: int) -> bool:
|
|
416
|
+
catB_closed_file = Path(options.directory) / f"catB_{run_id:05d}.closed"
|
|
417
|
+
return catB_closed_file.exists()
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def catB_calibration_file_exists(run_id: int) -> bool:
|
|
421
|
+
catB_calib_base_dir = Path(cfg.get("LST1","CAT_B_CALIB_BASE"))
|
|
422
|
+
prod_id = utils.get_lstchain_version()
|
|
423
|
+
night_dir = utils.date_to_dir(options.date)
|
|
424
|
+
filters = utils.get_calib_filters(run_id)
|
|
425
|
+
catB_calib_dir = catB_calib_base_dir / "calibration" / night_dir / prod_id
|
|
426
|
+
catB_calib_file = catB_calib_dir / f"cat_B_calibration_filters_{filters}.Run{run_id:05d}.h5"
|
|
427
|
+
return catB_calib_file.exists()
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
def get_dl1_prod_id(config_filename):
|
|
431
|
+
with open(config_filename) as json_file:
|
|
432
|
+
data = json.load(json_file)
|
|
433
|
+
|
|
434
|
+
picture_thresh = data["tailcuts_clean_with_pedestal_threshold"]["picture_thresh"]
|
|
435
|
+
boundary_thresh = data["tailcuts_clean_with_pedestal_threshold"]["boundary_thresh"]
|
|
436
|
+
|
|
437
|
+
if boundary_thresh == 4:
|
|
438
|
+
return f"tailcut{picture_thresh}{boundary_thresh}"
|
|
439
|
+
else:
|
|
440
|
+
return f"tailcut{picture_thresh}{boundary_thresh:02d}"
|
|
441
|
+
|
|
442
|
+
|
|
443
|
+
def get_dl2_nsb_prod_id(rf_model: Path) -> str:
|
|
444
|
+
match = re.search(r'nsb_tuning_\d+\.\d+', str(rf_model))
|
|
445
|
+
if not match:
|
|
446
|
+
log.warning(f"No 'nsb_tuning_X.XX' pattern found in the path:\n{rf_model}")
|
|
447
|
+
sys.exit(1)
|
|
448
|
+
else:
|
|
449
|
+
return match.group(0)
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
def get_dl1_prod_id_and_config(run_id: int) -> str:
|
|
453
|
+
if not cfg.getboolean("lstchain", "apply_standard_dl1b_config"):
|
|
454
|
+
tailcuts_finder_dir = Path(cfg.get(options.tel_id, "TAILCUTS_FINDER_DIR"))
|
|
455
|
+
dl1b_config_file = tailcuts_finder_dir / f"dl1ab_Run{run_id:05d}.json"
|
|
456
|
+
if not dl1b_config_file.exists() and not options.simulate:
|
|
457
|
+
log.error(
|
|
458
|
+
f"The dl1b config file was not created yet for run {run_id:05d}. "
|
|
459
|
+
"Please try again later."
|
|
460
|
+
)
|
|
461
|
+
sys.exit(1)
|
|
462
|
+
else:
|
|
463
|
+
dl1_prod_id = get_dl1_prod_id(dl1b_config_file)
|
|
464
|
+
return dl1_prod_id, dl1b_config_file.resolve()
|
|
465
|
+
else:
|
|
466
|
+
dl1b_config_file = Path(cfg.get("lstchain", "dl1b_config"))
|
|
467
|
+
dl1_prod_id = cfg.get("LST1", "DL1_PROD_ID")
|
|
468
|
+
return dl1_prod_id, dl1b_config_file.resolve()
|
|
469
|
+
|
|
470
|
+
|
|
471
|
+
def get_dl2_prod_id(run_id: int) -> str:
|
|
472
|
+
dl1_prod_id = get_dl1_prod_id_and_config(run_id)[0]
|
|
473
|
+
rf_model = utils.get_RF_model(run_id)
|
|
474
|
+
nsb_prod_id = get_dl2_nsb_prod_id(rf_model)
|
|
475
|
+
return f"{dl1_prod_id}/{nsb_prod_id}"
|
|
476
|
+
|
|
477
|
+
|
|
478
|
+
def all_dl1ab_config_files_exist(date: str) -> bool:
|
|
479
|
+
nightdir = date.replace("-","")
|
|
480
|
+
run_summary_dir = Path(cfg.get(options.tel_id, "RUN_SUMMARY_DIR"))
|
|
481
|
+
run_summary_file = run_summary_dir / f"RunSummary_{nightdir}.ecsv"
|
|
482
|
+
summary_table = Table.read(run_summary_file)
|
|
483
|
+
data_runs = summary_table[summary_table["run_type"] == "DATA"]
|
|
484
|
+
for run_id in data_runs["run_id"]:
|
|
485
|
+
tailcuts_finder_dir = Path(cfg.get(options.tel_id, "TAILCUTS_FINDER_DIR"))
|
|
486
|
+
dl1b_config_file = tailcuts_finder_dir / f"dl1ab_Run{run_id:05d}.json"
|
|
487
|
+
if not dl1b_config_file.exists():
|
|
488
|
+
return False
|
|
489
|
+
return True
|
osa/provenance/capture.py
CHANGED
|
@@ -53,7 +53,7 @@ LOG_FILENAME = provconfig["handlers"]["provHandler"]["filename"]
|
|
|
53
53
|
PROV_PREFIX = provconfig["PREFIX"]
|
|
54
54
|
SUPPORTED_HASH_METHOD = ["md5"]
|
|
55
55
|
SUPPORTED_HASH_BUFFER = ["content", "path"]
|
|
56
|
-
REDUCTION_TASKS = ["r0_to_dl1", "dl1ab", "dl1_datacheck", "dl1_to_dl2"]
|
|
56
|
+
REDUCTION_TASKS = ["r0_to_dl1", "catB_calibration", "dl1ab", "dl1_datacheck", "dl1_to_dl2"]
|
|
57
57
|
|
|
58
58
|
# global variables
|
|
59
59
|
traced_entities = {}
|
|
@@ -200,6 +200,13 @@ activities:
|
|
|
200
200
|
# filepath: /fefs/aswg/data/real/DL1/20200218/v0.4.3_v00/
|
|
201
201
|
# size: 128
|
|
202
202
|
|
|
203
|
+
catB_calibration:
|
|
204
|
+
description:
|
|
205
|
+
"Create Cat-B calibration file for an observation run"
|
|
206
|
+
parameters:
|
|
207
|
+
usage:
|
|
208
|
+
generation:
|
|
209
|
+
|
|
203
210
|
dl1ab:
|
|
204
211
|
description:
|
|
205
212
|
"Create DL1AB files for an observation run"
|
osa/provenance/utils.py
CHANGED
|
@@ -10,7 +10,7 @@ from osa.utils.utils import date_to_dir, get_lstchain_version
|
|
|
10
10
|
|
|
11
11
|
__all__ = ["parse_variables", "get_log_config"]
|
|
12
12
|
|
|
13
|
-
REDUCTION_TASKS = ["r0_to_dl1", "dl1ab", "dl1_datacheck", "dl1_to_dl2"]
|
|
13
|
+
REDUCTION_TASKS = ["r0_to_dl1", "catB_calibration", "dl1ab", "dl1_datacheck", "dl1_to_dl2"]
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
def parse_variables(class_instance):
|
|
@@ -40,20 +40,18 @@ def parse_variables(class_instance):
|
|
|
40
40
|
configfile_dl1b = cfg.get("lstchain", "dl1b_config")
|
|
41
41
|
configfile_dl2 = cfg.get("lstchain", "dl2_config")
|
|
42
42
|
raw_dir = Path(cfg.get("LST1", "R0_DIR"))
|
|
43
|
-
rf_models_directory = Path(cfg.get("
|
|
43
|
+
rf_models_directory = Path(cfg.get("LST1", "RF_MODELS"))
|
|
44
44
|
dl1_dir = Path(cfg.get("LST1", "DL1_DIR"))
|
|
45
45
|
dl2_dir = Path(cfg.get("LST1", "DL2_DIR"))
|
|
46
|
-
calib_dir = Path(cfg.get("LST1", "
|
|
47
|
-
pedestal_dir = Path(cfg.get("LST1", "
|
|
46
|
+
calib_dir = Path(cfg.get("LST1", "CAT_A_CALIB_DIR"))
|
|
47
|
+
pedestal_dir = Path(cfg.get("LST1", "CAT_A_PEDESTAL_DIR"))
|
|
48
48
|
|
|
49
49
|
class_instance.SoftwareVersion = get_lstchain_version()
|
|
50
50
|
class_instance.ProcessingConfigFile = str(options.configfile)
|
|
51
51
|
class_instance.ObservationDate = flat_date
|
|
52
52
|
if class_instance.__name__ in REDUCTION_TASKS:
|
|
53
53
|
muon_dir = dl1_dir / flat_date / options.prod_id / "muons"
|
|
54
|
-
|
|
55
|
-
outdir_dl2 = dl2_dir / flat_date / options.prod_id / options.dl2_prod_id
|
|
56
|
-
|
|
54
|
+
|
|
57
55
|
if class_instance.__name__ in ["drs4_pedestal", "calibrate_charge"]:
|
|
58
56
|
# drs4_pedestal_run_id [0] 1804
|
|
59
57
|
# pedcal_run_id [1] 1805
|
|
@@ -111,6 +109,7 @@ def parse_variables(class_instance):
|
|
|
111
109
|
run = run_subrun.split(".")[0]
|
|
112
110
|
class_instance.ObservationRun = run
|
|
113
111
|
|
|
112
|
+
outdir_dl1 = dl1_dir / flat_date / options.prod_id
|
|
114
113
|
calibration_file = Path(class_instance.args[0]).resolve()
|
|
115
114
|
pedestal_file = Path(class_instance.args[1]).resolve()
|
|
116
115
|
timecalibration_file = Path(class_instance.args[2]).resolve()
|
|
@@ -133,10 +132,16 @@ def parse_variables(class_instance):
|
|
|
133
132
|
class_instance.InterleavedPedestalEventsFile = None
|
|
134
133
|
if class_instance.args[6] is not None:
|
|
135
134
|
class_instance.InterleavedPedestalEventsFile = str(Path(class_instance.args[6]))
|
|
135
|
+
|
|
136
|
+
if class_instance.__name__ == "catB_calibration":
|
|
137
|
+
class_instance.ObservationRun = class_instance.args[0].split(".")[0]
|
|
136
138
|
|
|
137
139
|
if class_instance.__name__ == "dl1ab":
|
|
138
140
|
# run_str [0] 02006.0000
|
|
141
|
+
# dl1b_config [1]
|
|
142
|
+
# dl1_prod_id [2]
|
|
139
143
|
|
|
144
|
+
outdir_dl1 = dl1_dir / flat_date / options.prod_id / class_instance.args[2]
|
|
140
145
|
class_instance.Analysisconfigfile_dl1 = str(Path(configfile_dl1b))
|
|
141
146
|
class_instance.ObservationRun = class_instance.args[0].split(".")[0]
|
|
142
147
|
class_instance.StoreImage = cfg.getboolean("lstchain", "store_image_dl1ab")
|
|
@@ -146,9 +151,12 @@ def parse_variables(class_instance):
|
|
|
146
151
|
|
|
147
152
|
if class_instance.__name__ == "dl1_datacheck":
|
|
148
153
|
# run_str [0] 02006.0000
|
|
154
|
+
# dl1b_prod_id [1]
|
|
155
|
+
|
|
149
156
|
run_subrun = class_instance.args[0]
|
|
150
157
|
run = run_subrun.split(".")[0]
|
|
151
158
|
|
|
159
|
+
outdir_dl1 = dl1_dir / flat_date / options.prod_id / class_instance.args[1]
|
|
152
160
|
class_instance.ObservationRun = run
|
|
153
161
|
class_instance.DL1SubrunDataset = str(
|
|
154
162
|
(outdir_dl1 / f"dl1_LST-1.Run{run_subrun}.h5").resolve()
|
|
@@ -168,9 +176,16 @@ def parse_variables(class_instance):
|
|
|
168
176
|
|
|
169
177
|
if class_instance.__name__ == "dl1_to_dl2":
|
|
170
178
|
# run_str [0] 02006.0000
|
|
179
|
+
# rf_model_path [1]
|
|
180
|
+
# dl1_prod_id [2]
|
|
181
|
+
# dl2_prod_id [3]
|
|
182
|
+
|
|
171
183
|
run_subrun = class_instance.args[0]
|
|
172
184
|
run = run_subrun.split(".")[0]
|
|
173
185
|
|
|
186
|
+
outdir_dl1 = dl1_dir / flat_date / options.prod_id / class_instance.args[2]
|
|
187
|
+
outdir_dl2 = dl2_dir / flat_date / options.prod_id / class_instance.args[3]
|
|
188
|
+
|
|
174
189
|
class_instance.Analysisconfigfile_dl2 = configfile_dl2
|
|
175
190
|
class_instance.ObservationRun = run
|
|
176
191
|
class_instance.RFModelEnergyFile = str((rf_models_directory / "reg_energy.sav").resolve())
|
osa/scripts/autocloser.py
CHANGED
|
@@ -266,21 +266,11 @@ class Sequence:
|
|
|
266
266
|
|
|
267
267
|
def is_100(self, no_dl2: bool):
|
|
268
268
|
"""Check that all analysis products are 100% complete."""
|
|
269
|
-
if (
|
|
270
|
-
no_dl2
|
|
271
|
-
and self.dict_sequence["Tel"] != "ST"
|
|
272
|
-
and self.dict_sequence["DL1%"] == "100"
|
|
273
|
-
and self.dict_sequence["DL1AB%"] == "100"
|
|
274
|
-
and self.dict_sequence["MUONS%"] == "100"
|
|
275
|
-
):
|
|
276
|
-
return True
|
|
277
|
-
|
|
278
269
|
if (
|
|
279
270
|
self.dict_sequence["Tel"] != "ST"
|
|
280
271
|
and self.dict_sequence["DL1%"] == "100"
|
|
281
272
|
and self.dict_sequence["DL1AB%"] == "100"
|
|
282
273
|
and self.dict_sequence["MUONS%"] == "100"
|
|
283
|
-
and self.dict_sequence["DL2%"] == "100"
|
|
284
274
|
):
|
|
285
275
|
return True
|
|
286
276
|
|
|
@@ -19,6 +19,7 @@ from osa.paths import drs4_pedestal_exists, calibration_file_exists
|
|
|
19
19
|
from osa.provenance.capture import trace
|
|
20
20
|
from osa.utils.cliopts import calibration_pipeline_cliparsing
|
|
21
21
|
from osa.utils.logging import myLogger
|
|
22
|
+
from osa.utils import utils
|
|
22
23
|
from osa.workflow.stages import DRS4PedestalStage, ChargeCalibrationStage
|
|
23
24
|
|
|
24
25
|
__all__ = [
|
|
@@ -37,30 +38,36 @@ def is_calibration_produced(drs4_pedestal_run_id: int, pedcal_run_id: int) -> bo
|
|
|
37
38
|
Check if both daily calibration (DRS4 baseline and
|
|
38
39
|
charge calibration) files are already produced.
|
|
39
40
|
"""
|
|
40
|
-
|
|
41
|
-
|
|
41
|
+
lstcam_env = Path(cfg.get("LST1", "CALIB_ENV"))
|
|
42
|
+
lstcam_calib_version = utils.get_lstcam_calib_version(lstcam_env)
|
|
43
|
+
return drs4_pedestal_exists(drs4_pedestal_run_id, f"v{lstcam_calib_version}") \
|
|
44
|
+
and calibration_file_exists(pedcal_run_id, f"v{lstcam_calib_version}")
|
|
42
45
|
|
|
43
46
|
|
|
44
47
|
def drs4_pedestal_command(drs4_pedestal_run_id: int) -> list:
|
|
45
48
|
"""Build the create_drs4_pedestal command."""
|
|
46
49
|
base_dir = Path(cfg.get("LST1", "BASE")).resolve()
|
|
50
|
+
r0_dir = Path(cfg.get("LST1", "R0_DIR")).resolve()
|
|
47
51
|
command = cfg.get("lstchain", "drs4_baseline")
|
|
48
52
|
return [
|
|
49
53
|
command,
|
|
50
54
|
"-r", str(drs4_pedestal_run_id),
|
|
51
55
|
"-b", base_dir,
|
|
56
|
+
f"--r0-dir={r0_dir}",
|
|
52
57
|
"--no-progress",
|
|
53
58
|
]
|
|
54
59
|
|
|
55
60
|
def calibration_file_command(drs4_pedestal_run_id: int, pedcal_run_id: int) -> list:
|
|
56
61
|
"""Build the create_calibration_file command."""
|
|
57
62
|
base_dir = Path(cfg.get("LST1", "BASE")).resolve()
|
|
63
|
+
r0_dir = Path(cfg.get("LST1", "R0_DIR")).resolve()
|
|
58
64
|
command = cfg.get("lstchain", "charge_calibration")
|
|
59
65
|
cmd = [
|
|
60
66
|
command,
|
|
61
67
|
"-p", str(drs4_pedestal_run_id),
|
|
62
68
|
"-r", str(pedcal_run_id),
|
|
63
69
|
"-b", base_dir,
|
|
70
|
+
f"--r0-dir={r0_dir}",
|
|
64
71
|
]
|
|
65
72
|
# In case of problems with trigger tagging:
|
|
66
73
|
if cfg.getboolean("lstchain", "use_ff_heuristic_id"):
|