lstosa 0.10.10__py3-none-any.whl → 0.10.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lstosa
3
- Version: 0.10.10
3
+ Version: 0.10.12
4
4
  Summary: Onsite analysis pipeline for the CTA LST-1
5
5
  Author: María Láinez, José Enrique Ruiz, Lab Saha, Andrés Baquero, José Luis Contreras, Maximilian Linhoff
6
6
  Author-email: Daniel Morcuende <dmorcuen@ucm.es>
@@ -1,9 +1,9 @@
1
1
  osa/__init__.py,sha256=crotf1NMTfNdZuCua_5T_jk3kvZrAAwVw4FPrfxv994,193
2
- osa/_version.py,sha256=6hfVr0QQ3mWElH0wg5A8_M5KIbDPnD4zKdk55gVEMik,415
3
- osa/conftest.py,sha256=_NERtB9t-Oi7ykW4QDYy9ZbgsrYxXMmQ3QkDScH5JyE,19601
2
+ osa/_version.py,sha256=iptzZkijfZOuLJEgzcWGVRKqYSdbRz1HyJHUd9sgNhw,415
3
+ osa/conftest.py,sha256=NBeGqTUBRqCPirDSDPny4bf1e_OJXbiePazHwaoQPY4,20072
4
4
  osa/job.py,sha256=OnjF88kTVdURcrIR9iPenATNx2HteDFlAKtOX4fD144,26603
5
5
  osa/osadb.py,sha256=pkCuYbEG-moHG0uQHxwB7giQAv2XTld4HJ5gdn1F1hA,2422
6
- osa/paths.py,sha256=bLV-eIVbpvFeBy2xvsmkfE_2CxZOVNIYUuRNrVC0G0k,12954
6
+ osa/paths.py,sha256=R-LwBlyoIJ-PuTJ8vcSYWMwzQY7YrgXq7gan0zhVzPY,14399
7
7
  osa/raw.py,sha256=ZNIsuqfx5ljoz_hwhSuafdKf-wr8-cxRJmel-A2endg,1337
8
8
  osa/report.py,sha256=sL2V7n8Y_UUaSDbWJY2o4UxDb4FU5AaFIRR8R25DB8o,4634
9
9
  osa/version.py,sha256=9T2TtuGBQeOy5PJDxMCeGlqx5baxLaq47VmFTDc09z8,796
@@ -12,7 +12,7 @@ osa/configs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  osa/configs/config.py,sha256=cX0Vr4sorBVQ2KRqPIRSEGENKH_uC5lrgVHx1hp6YTk,1148
13
13
  osa/configs/datamodel.py,sha256=L_WRM91PBlMrtuE30akh7YR-56P0g9D994qzKSfhNJc,1950
14
14
  osa/configs/options.py,sha256=CyL7WnHiC_pvB3mnjRF7Wg43uPzQgmwlbvIqkRzlDLA,524
15
- osa/configs/sequencer.cfg,sha256=cysPy6q8vShh0V0LiEBfip-rUmwyNlNVDKhGPMiKH5g,4784
15
+ osa/configs/sequencer.cfg,sha256=vG-6VS2osCccEsY58JgaqqADUDz-k8-9tD3NDitscR4,4871
16
16
  osa/high_level/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  osa/high_level/selection_cuts.toml,sha256=ReSmcKtOPZY5JsZ9ExnxYdz7OrJEB8gghCbzHmeOyFg,128
18
18
  osa/high_level/significance.py,sha256=Y1jokkHCo-D_qSqxKiQzc6KJSmivznaJRS2xY-txNIo,9039
@@ -34,23 +34,22 @@ osa/provenance/config/definition.yaml,sha256=DSwqNln1jEXV8aUh7ca2r7ArMkpaMJi1xbH
34
34
  osa/provenance/config/environment.yaml,sha256=kPM6ucPyLZLDFzkwFWoY0C6vmAArG98U-P1UAl89bgE,246
35
35
  osa/provenance/config/logger.yaml,sha256=hy_lH3DfbRFh2VM_iawI-c-3wE0cjTRHy465C2eFfnQ,510
36
36
  osa/scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
- osa/scripts/autocloser.py,sha256=j7nRvglIv_Ol_GJzbcfqPHquBIWwQU515GkDRfc6_ho,14540
37
+ osa/scripts/autocloser.py,sha256=Z4x4yfDA2Xkr-6ld57L4VjWcVse-sLqkGuE6ZFgSXps,14716
38
38
  osa/scripts/calibration_pipeline.py,sha256=g9o1chqCRRSo7GNuQZRABjGnJYjZyfhkPgRLPmuV994,5703
39
- osa/scripts/closer.py,sha256=FPqUHaZcKyI20u9PhBCSqO-IXa1El3w_0DRXTKBTVTg,17778
39
+ osa/scripts/closer.py,sha256=p11tq2YJLwsRK_CBWXU1465ArzLJ0qWlUCnubGqLPUo,18277
40
40
  osa/scripts/copy_datacheck.py,sha256=tfDs6oTdPbii4BOXp6bTHuED0xNJeqaPFrv6Ed7ZnWc,3104
41
41
  osa/scripts/datasequence.py,sha256=gXAp8arbLPEK-sca9VnME6-2XfUzBFIoEFchlUZYrXI,9260
42
- osa/scripts/gain_selection.py,sha256=yaz2fKicuCDgLbD3fhb3l9LBlXLEEO3yuZO-oDc2IPo,8084
42
+ osa/scripts/gain_selection.py,sha256=0ZOFgX-ZaJFoe_B0UI5NYRr7CmeHL1_bxjlB0n8E5hs,13775
43
43
  osa/scripts/provprocess.py,sha256=mufkZe6_qwH3DGqTFxINIc01hciF5RMpw3n_Mp7vwXU,18629
44
44
  osa/scripts/reprocess_longterm.py,sha256=wMfc3UVwickkGFiviIhOlB9ebMIqQPWoUrgg8hQ78Lg,2138
45
45
  osa/scripts/reprocessing.py,sha256=D-J8Rl3GrkWpxYkk6ci79oJOMewgGdxLkQgaHCAZuqs,3417
46
- osa/scripts/sequencer.py,sha256=6Cg-eIExk0eN8-HXkO6DBeFEWOI-FznDueqJEpbEzos,8163
46
+ osa/scripts/sequencer.py,sha256=GM6B17YQNJfBOosWx0gpXBy4fIwga3DC4T2nGapGQEU,8763
47
47
  osa/scripts/sequencer_webmaker.py,sha256=99P1rJiBlvNidDeppvtw4kOoAfsOxSMJ2sm4xlgKCE8,4640
48
- osa/scripts/show_run_summary.py,sha256=UpdTDRfncdUBKhPCy3reCKSk_saOsTLeMdyZHUV300Q,5197
49
- osa/scripts/show_run_summary_tcu.py,sha256=SoDLVKdQHOJkfenFguBOfXf10Gyv7heXSQAFnDVZqMs,2468
48
+ osa/scripts/show_run_summary.py,sha256=SoDLVKdQHOJkfenFguBOfXf10Gyv7heXSQAFnDVZqMs,2468
50
49
  osa/scripts/simulate_processing.py,sha256=NiRVYiwZENt_mnKncytgJT23_-tJMb1B5PswM12nnX4,6941
51
50
  osa/scripts/update_source_catalog.py,sha256=GHwWFc-y6S4KkUJxUVM5drdAnVDD0-n3D-Tv3CCmh4E,7218
52
51
  osa/scripts/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
- osa/scripts/tests/test_osa_scripts.py,sha256=1B2UFc3idLGSyqeuzf4IEvb6sEbtfaV8ogj1LoEoEyQ,12851
52
+ osa/scripts/tests/test_osa_scripts.py,sha256=7xms2Jq2ZalU5kiHBBbtFOxBzXhrybsNlT16nEtFTxU,12926
54
53
  osa/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
54
  osa/tests/test_jobs.py,sha256=F0jsHZ9BYB_cCHHxlXyO9v1E5_-mBJhuFtshtsAtnXo,15260
56
55
  osa/tests/test_osa.py,sha256=QCOsjUgPuNMHoef3Ym2sDXVjun2LaBrfKyroAIH-os8,415
@@ -60,7 +59,7 @@ osa/tests/test_raw.py,sha256=WkgwEc_vY0D6nREo-BSm6F-5xDpqidMC0DkS86pXlRU,1058
60
59
  osa/tests/test_report.py,sha256=OY-EsrXytoS6esfjUeLnIAmCMIw9EzoGD-elySafyhE,1365
61
60
  osa/tests/test_veto.py,sha256=UIsooji_5Z8TtAhc0UlD2VqheVd9DBufuMxinJ3e0w8,1066
62
61
  osa/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
63
- osa/utils/cliopts.py,sha256=Xqh-9w2o1iSHfwO0jHEW0AXQ9VeEw6X6IGLNAPKoNyc,13699
62
+ osa/utils/cliopts.py,sha256=XeVu6TjrIEoqQEqf4meIKSiRyobMqYTLuggj-iGU4SQ,14126
64
63
  osa/utils/iofile.py,sha256=kJ7KB1suynhS2cTf7EeHwhMXq3delC_ls2HFpCzvsZo,2021
65
64
  osa/utils/logging.py,sha256=1WcNPjjslo3y25jcEY_fe0yXOeJ6frZrGLAy1GJpu_k,1491
66
65
  osa/utils/mail.py,sha256=uQfqPQdiOVHTvEAXr9H15a7-g9DtYVNKjMEb9GnI0oY,554
@@ -75,9 +74,9 @@ osa/workflow/dl3.py,sha256=kz7L5jcKHFJ--UdQ8HQKLzWO6nxc2LLOTz42ExcqzTk,9921
75
74
  osa/workflow/stages.py,sha256=WYgUM2XDIaUjCc4_Zs_VSGW6gk73EaKcHk6ZMnPds74,6692
76
75
  osa/workflow/tests/test_dl3.py,sha256=aY5bb-8OcZGAXG3JPCZihChzkA_GsWjRIa31BHZn3Dg,299
77
76
  osa/workflow/tests/test_stages.py,sha256=TmC00XFACWZp740TQeFaokWi3C50ovj_XGiySWrrdZk,3944
78
- lstosa-0.10.10.dist-info/LICENSE,sha256=h6iWot11EtMvaDaS_AvCHKLTNByO5wEbMyNj1c90y1c,1519
79
- lstosa-0.10.10.dist-info/METADATA,sha256=OQ2iUZTU6KQsB6QWFtshoxx_U1Wqp7FOQehLhBSlaVU,7349
80
- lstosa-0.10.10.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
81
- lstosa-0.10.10.dist-info/entry_points.txt,sha256=e5x7xddaqZhfdZPsErhHInqR4UGHsxXIlylEbTie0_8,928
82
- lstosa-0.10.10.dist-info/top_level.txt,sha256=_Tj8zVHdrOoWZuuWTHbDpNofxW0imUmKdlXhnxsXJek,4
83
- lstosa-0.10.10.dist-info/RECORD,,
77
+ lstosa-0.10.12.dist-info/LICENSE,sha256=h6iWot11EtMvaDaS_AvCHKLTNByO5wEbMyNj1c90y1c,1519
78
+ lstosa-0.10.12.dist-info/METADATA,sha256=HmGZoRfvvKndHKpKPPxBbsC85Rj7Eel1Q9lQhbZyJoM,7349
79
+ lstosa-0.10.12.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
80
+ lstosa-0.10.12.dist-info/entry_points.txt,sha256=e5x7xddaqZhfdZPsErhHInqR4UGHsxXIlylEbTie0_8,928
81
+ lstosa-0.10.12.dist-info/top_level.txt,sha256=_Tj8zVHdrOoWZuuWTHbDpNofxW0imUmKdlXhnxsXJek,4
82
+ lstosa-0.10.12.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.42.0)
2
+ Generator: bdist_wheel (0.43.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
osa/_version.py CHANGED
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '0.10.10'
16
- __version_tuple__ = version_tuple = (0, 10, 10)
15
+ __version__ = version = '0.10.12'
16
+ __version_tuple__ = version_tuple = (0, 10, 12)
osa/configs/sequencer.cfg CHANGED
@@ -28,6 +28,7 @@ HIGH_LEVEL_DIR: %(OSA_DIR)s/HighLevel
28
28
  LONGTERM_DIR: %(OSA_DIR)s/DL1DataCheck_LongTerm
29
29
  MERGED_SUMMARY: %(OSA_DIR)s/Catalog/merged_RunSummary.ecsv
30
30
  SEQUENCER_WEB_DIR: %(OSA_DIR)s/SequencerWeb
31
+ GAIN_SELECTION_FLAG_DIR: %(OSA_DIR)s/GainSel
31
32
 
32
33
  # To be set by the user. Using PROD-ID will overcome the automatic
33
34
  # fetching of lstchain version. Otherwise leave it empty (and without the colon symbol).
@@ -138,6 +139,7 @@ WORKFLOWPREFIX: Workflow
138
139
  GRAPHSUFFIX: .dot
139
140
  SVGSUFFIX: .svg
140
141
  end_of_activity: NightFinished.txt
142
+ gain_selection_check: GainSelFinished.txt
141
143
 
142
144
  [OUTPUT]
143
145
  # REPORTWIDTH is the width in characters of the heading frame for the output
osa/conftest.py CHANGED
@@ -48,6 +48,13 @@ def monitoring_dir(base_test_dir):
48
48
  return monitoring_dir
49
49
 
50
50
 
51
+ @pytest.fixture(scope="session")
52
+ def osa_dir(base_test_dir):
53
+ osa_dir = base_test_dir / "OSA"
54
+ osa_dir.mkdir(parents=True, exist_ok=True)
55
+ return osa_dir
56
+
57
+
51
58
  @pytest.fixture(scope="session")
52
59
  def run_summary_dir(monitoring_dir):
53
60
  summary_dir = monitoring_dir / "RunSummary"
@@ -436,6 +443,8 @@ def sequence_file_list(
436
443
  drs4_time_calibration_files,
437
444
  systematic_correction_files,
438
445
  r0_data,
446
+ gain_selection_flag_file,
447
+ merged_run_summary,
439
448
  ):
440
449
  for r0_file in r0_data:
441
450
  assert r0_file.exists()
@@ -448,6 +457,8 @@ def sequence_file_list(
448
457
 
449
458
  assert run_summary_file.exists()
450
459
  assert run_catalog.exists()
460
+ assert gain_selection_flag_file.exists()
461
+ assert merged_run_summary.exists()
451
462
 
452
463
  run_program("sequencer", "-d", "2020-01-17", "--no-submit", "-t", "LST1")
453
464
  # First sequence in the list corresponds to the calibration run 1809
@@ -548,11 +559,9 @@ def run_catalog(run_catalog_dir):
548
559
 
549
560
 
550
561
  @pytest.fixture(scope="session")
551
- def database(base_test_dir):
562
+ def database(osa_dir):
552
563
  import sqlite3
553
564
 
554
- osa_dir = base_test_dir / "OSA"
555
- osa_dir.mkdir(parents=True, exist_ok=True)
556
565
  db_file = osa_dir / "osa.db"
557
566
  with sqlite3.connect(db_file) as connection:
558
567
  cursor = connection.cursor()
@@ -562,3 +571,13 @@ def database(base_test_dir):
562
571
  )
563
572
  cursor.connection.commit()
564
573
  yield cursor
574
+
575
+
576
+ @pytest.fixture(scope="session")
577
+ def gain_selection_flag_file(osa_dir):
578
+
579
+ GainSel_dir = osa_dir / "GainSel" / "20200117"
580
+ GainSel_dir.mkdir(parents=True, exist_ok=True)
581
+ file = GainSel_dir / "GainSelFinished.txt"
582
+ file.touch()
583
+ return file
osa/paths.py CHANGED
@@ -5,6 +5,8 @@ import re
5
5
  from datetime import datetime
6
6
  from pathlib import Path
7
7
  from typing import List
8
+ import subprocess
9
+ import time
8
10
 
9
11
  import lstchain
10
12
  from astropy.table import Table
@@ -359,15 +361,46 @@ def get_latest_version_file(longterm_files: List[str]) -> Path:
359
361
  )
360
362
 
361
363
 
362
- def create_longterm_symlink():
364
+ def is_job_completed(job_id: str):
365
+ """
366
+ Check whether SLURM job `job_id` has finished.
367
+
368
+ It keeps checking every 10 minutes for one our.
369
+ """
370
+ n_max = 10
371
+ n = 0
372
+ while n < n_max:
373
+ # Check if the status of the SLURM job is "COMPLETED"
374
+ status = subprocess.run(["sacct", "--format=state", "--jobs", job_id], capture_output=True, text=True)
375
+ if "COMPLETED" in status.stdout:
376
+ log.debug(f"Job {job_id} finished successfully!")
377
+ return True
378
+ n += 1
379
+ log.debug(f"Job {job_id} is not completed yet, checking again in 10 minutes...")
380
+ time.sleep(600) # wait 10 minutes to check again
381
+ log.info(f"The maximum number of checks of job {job_id} was reached, job {job_id} did not finish succesfully yet.")
382
+ return False
383
+
384
+
385
+ def create_longterm_symlink(cherenkov_job_id: str = None):
363
386
  """If the created longterm DL1 datacheck file corresponds to the latest
364
387
  version available, make symlink to it in the "all" common directory."""
388
+ if not cherenkov_job_id or is_job_completed(cherenkov_job_id):
389
+ nightdir = utils.date_to_dir(options.date)
390
+ longterm_dir = Path(cfg.get("LST1", "LONGTERM_DIR"))
391
+ linked_longterm_file = longterm_dir / f"night_wise/all/DL1_datacheck_{nightdir}.h5"
392
+ all_longterm_files = longterm_dir.rglob(f"v*/{nightdir}/DL1_datacheck_{nightdir}.h5")
393
+ latest_version_file = get_latest_version_file(all_longterm_files)
394
+ log.info("Symlink the latest version longterm DL1 datacheck file in the common directory.")
395
+ linked_longterm_file.unlink(missing_ok=True)
396
+ linked_longterm_file.symlink_to(latest_version_file)
397
+ else:
398
+ log.warning(f"Job {cherenkov_job_id} (lstchain_cherenkov_transparency) did not finish successfully.")
399
+
400
+ def dl1_datacheck_longterm_file_exits() -> bool:
401
+ """Return true if the longterm DL1 datacheck file was already produced."""
365
402
  nightdir = utils.date_to_dir(options.date)
366
403
  longterm_dir = Path(cfg.get("LST1", "LONGTERM_DIR"))
367
- linked_longterm_file = longterm_dir / f"night_wise/all/DL1_datacheck_{nightdir}.h5"
368
- all_longterm_files = longterm_dir.rglob(f"v*/{nightdir}/DL1_datacheck_{nightdir}.h5")
369
- latest_version_file = get_latest_version_file(all_longterm_files)
404
+ longterm_file = longterm_dir / options.prod_id / nightdir / f"DL1_datacheck_{nightdir}.h5"
405
+ return longterm_file.exists()
370
406
 
371
- log.info("Symlink the latest version longterm DL1 datacheck file in the common directory.")
372
- linked_longterm_file.unlink(missing_ok=True)
373
- linked_longterm_file.symlink_to(latest_version_file)
osa/scripts/autocloser.py CHANGED
@@ -41,6 +41,7 @@ class Telescope:
41
41
  config_file: Path,
42
42
  ignore_cronlock: bool = False,
43
43
  test: bool = False,
44
+ no_gainsel: bool = False,
44
45
  ):
45
46
  """
46
47
  Parameters
@@ -82,7 +83,7 @@ class Telescope:
82
83
  if not self.lock_automatic_sequencer() and not ignore_cronlock:
83
84
  log.warning(f"{self.telescope} already locked! Ignoring {self.telescope}")
84
85
  return
85
- if not self.simulate_sequencer(date, config_file, test):
86
+ if not self.simulate_sequencer(date, config_file, test, no_gainsel):
86
87
  log.warning(
87
88
  f"Simulation of the sequencer failed "
88
89
  f"for {self.telescope}! Ignoring {self.telescope}"
@@ -121,7 +122,7 @@ class Telescope:
121
122
  self.locked = True
122
123
  return True
123
124
 
124
- def simulate_sequencer(self, date: str, config_file: Path, test: bool):
125
+ def simulate_sequencer(self, date: str, config_file: Path, test: bool, no_gainsel: bool):
125
126
  """Launch the sequencer in simulation mode."""
126
127
  if test:
127
128
  self.read_file()
@@ -135,6 +136,9 @@ class Telescope:
135
136
  date,
136
137
  self.telescope,
137
138
  ]
139
+ if no_gainsel:
140
+ sequencer_cmd.insert(1, "--no-gainsel")
141
+
138
142
  log.debug(f"Executing {' '.join(sequencer_cmd)}")
139
143
  sequencer = subprocess.Popen(
140
144
  sequencer_cmd,
@@ -445,7 +449,7 @@ def main():
445
449
  # create telescope and sequence objects
446
450
  log.info("Simulating sequencer...")
447
451
 
448
- telescope = Telescope(args.tel_id, date, args.config)
452
+ telescope = Telescope(args.tel_id, date, args.config, no_gainsel=args.no_gainsel)
449
453
 
450
454
  log.info(f"Processing {args.tel_id}...")
451
455
 
osa/scripts/closer.py CHANGED
@@ -24,7 +24,11 @@ from osa.job import (
24
24
  )
25
25
  from osa.nightsummary.extract import extract_runs, extract_sequences
26
26
  from osa.nightsummary.nightsummary import run_summary_table
27
- from osa.paths import destination_dir, create_longterm_symlink
27
+ from osa.paths import (
28
+ destination_dir,
29
+ create_longterm_symlink,
30
+ dl1_datacheck_longterm_file_exits
31
+ )
28
32
  from osa.raw import is_raw_data_available
29
33
  from osa.report import start
30
34
  from osa.utils.cliopts import closercliparsing
@@ -154,38 +158,42 @@ def ask_for_closing():
154
158
  def post_process(seq_tuple):
155
159
  """Set of last instructions."""
156
160
  seq_list = seq_tuple[1]
161
+
162
+ if dl1_datacheck_longterm_file_exits() and not options.test:
163
+ create_longterm_symlink()
157
164
 
158
- # Close the sequences
159
- post_process_files(seq_list)
165
+ else:
166
+ # Close the sequences
167
+ post_process_files(seq_list)
160
168
 
161
- # Merge DL1 datacheck files and produce PDFs. It also produces
162
- # the daily datacheck report using the longterm script, and updates
163
- # the longterm DL1 datacheck file with the cherenkov_transparency script.
164
- if cfg.getboolean("lstchain", "merge_dl1_datacheck"):
165
- list_job_id = merge_dl1_datacheck(seq_list)
166
- longterm_job_id = daily_datacheck(daily_longterm_cmd(list_job_id))
167
- cherenkov_transparency(cherenkov_transparency_cmd(longterm_job_id))
168
- create_longterm_symlink()
169
+ # Extract the provenance info
170
+ extract_provenance(seq_list)
169
171
 
170
- # Extract the provenance info
171
- extract_provenance(seq_list)
172
+ # Merge DL1b files run-wise
173
+ merge_files(seq_list, data_level="DL1AB")
172
174
 
173
- # Merge DL1b files run-wise
174
- merge_files(seq_list, data_level="DL1AB")
175
+ merge_muon_files(seq_list)
175
176
 
176
- merge_muon_files(seq_list)
177
+ # Merge DL2 files run-wise
178
+ if not options.no_dl2:
179
+ merge_files(seq_list, data_level="DL2")
177
180
 
178
- # Merge DL2 files run-wise
179
- if not options.no_dl2:
180
- merge_files(seq_list, data_level="DL2")
181
+ # Merge DL1 datacheck files and produce PDFs. It also produces
182
+ # the daily datacheck report using the longterm script, and updates
183
+ # the longterm DL1 datacheck file with the cherenkov_transparency script.
184
+ if cfg.getboolean("lstchain", "merge_dl1_datacheck"):
185
+ list_job_id = merge_dl1_datacheck(seq_list)
186
+ longterm_job_id = daily_datacheck(daily_longterm_cmd(list_job_id))
187
+ cherenkov_job_id = cherenkov_transparency(cherenkov_transparency_cmd(longterm_job_id))
188
+ create_longterm_symlink(cherenkov_job_id)
181
189
 
182
- time.sleep(600)
190
+ time.sleep(600)
183
191
 
184
192
  # Check if all jobs launched by autocloser finished correctly
185
193
  # before creating the NightFinished.txt file
186
194
  n_max = 6
187
195
  n = 0
188
- while not all_closer_jobs_finished_correctly() & n <= n_max:
196
+ while not all_closer_jobs_finished_correctly() and n <= n_max:
189
197
  log.info(
190
198
  "All jobs launched by autocloser did not finished correctly yet. "
191
199
  "Checking again in 10 minutes..."
@@ -194,7 +202,7 @@ def post_process(seq_tuple):
194
202
  n += 1
195
203
 
196
204
  if n > n_max:
197
- send_warning_mail(date=options.date)
205
+ send_warning_mail(date=date_to_iso(options.date))
198
206
  return False
199
207
 
200
208
  if options.seqtoclose is None:
@@ -536,6 +544,7 @@ def cherenkov_transparency_cmd(longterm_job_id: str) -> List[str]:
536
544
 
537
545
  return [
538
546
  "sbatch",
547
+ "--parsable",
539
548
  "-D",
540
549
  options.directory,
541
550
  "-o",
@@ -553,7 +562,16 @@ def cherenkov_transparency(cmd: List[str]):
553
562
  log.debug(f"Executing {stringify(cmd)}")
554
563
 
555
564
  if not options.simulate and not options.test and shutil.which("sbatch") is not None:
556
- subprocess.run(cmd, check=True)
565
+ job = subprocess.run(
566
+ cmd,
567
+ encoding="utf-8",
568
+ capture_output=True,
569
+ text=True,
570
+ check=True,
571
+ )
572
+ job_id = job.stdout.strip()
573
+ return job_id
574
+
557
575
  else:
558
576
  log.debug("Simulate launching scripts")
559
577
 
@@ -8,8 +8,8 @@ import subprocess as sp
8
8
  from pathlib import Path
9
9
  from textwrap import dedent
10
10
  from io import StringIO
11
+ import argparse
11
12
 
12
- import click
13
13
  from astropy.table import Table
14
14
  from lstchain.paths import run_info_from_filename, parse_r0_filename
15
15
 
@@ -17,51 +17,131 @@ from osa.scripts.reprocessing import get_list_of_dates, check_job_status_and_wai
17
17
  from osa.utils.utils import wait_for_daytime
18
18
  from osa.utils.logging import myLogger
19
19
  from osa.job import get_sacct_output, FORMAT_SLURM
20
+ from osa.configs.config import cfg
21
+ from osa.paths import DEFAULT_CFG
20
22
 
21
23
  log = myLogger(logging.getLogger(__name__))
22
24
 
23
25
  PATH = "PATH=/fefs/aswg/software/offline_dvr/bin:$PATH"
24
26
 
27
+ parser = argparse.ArgumentParser(add_help=False)
28
+ parser.add_argument(
29
+ "--check",
30
+ action="store_true",
31
+ default=False,
32
+ help="Check if any job failed",
33
+ )
34
+ parser.add_argument(
35
+ "--no-queue-check",
36
+ action="store_true",
37
+ default=False,
38
+ help="Do not wait until the number of jobs in the slurm queue is < 1500",
39
+ )
40
+ parser.add_argument(
41
+ "-c",
42
+ "--config",
43
+ action="store",
44
+ type=Path,
45
+ default=DEFAULT_CFG,
46
+ help="Configuration file",
47
+ )
48
+ parser.add_argument(
49
+ "-d",
50
+ "--date",
51
+ default=None,
52
+ type=str,
53
+ help="Night to apply the gain selection",
54
+ )
55
+ parser.add_argument(
56
+ "-l",
57
+ "--dates-file",
58
+ default=None,
59
+ help="List of dates to apply the gain selection",
60
+ )
61
+ parser.add_argument(
62
+ "-o",
63
+ "--output-basedir",
64
+ type=Path,
65
+ default=Path("/fefs/aswg/data/real/R0G"),
66
+ help="Output directory of the gain selected files. Default is /fefs/aswg/data/real/R0G."
67
+ )
68
+ parser.add_argument(
69
+ "-s",
70
+ "--start-time",
71
+ type=int,
72
+ default=10,
73
+ help="Time to (re)start gain selection in HH format. Default is 10.",
74
+ )
75
+ parser.add_argument(
76
+ "-e",
77
+ "--end-time",
78
+ type=int,
79
+ default=18,
80
+ help="Time to stop gain selection in HH format. Default is 18.",
81
+ )
25
82
 
26
83
  def get_sbatch_script(
27
- run_id, subrun, input_file, output_dir, log_dir, ref_time, ref_counter, module, ref_source
84
+ run_id, subrun, input_file, output_dir, log_dir, log_file, ref_time, ref_counter, module, ref_source, script
28
85
  ):
29
86
  """Build the sbatch job pilot script for running the gain selection."""
30
- return dedent(
31
- f"""\
32
- #!/bin/bash
33
-
34
- #SBATCH -D {log_dir}
35
- #SBATCH -o "gain_selection_{run_id:05d}_{subrun:04d}_%j.log"
36
- #SBATCH --job-name "gain_selection_{run_id:05d}"
37
- #SBATCH --export {PATH}
38
-
39
- lst_dvr {input_file} {output_dir} {ref_time} {ref_counter} {module} {ref_source}
40
- """
41
- )
42
-
87
+ if script=="old":
88
+ return dedent(
89
+ f"""\
90
+ #!/bin/bash
91
+
92
+ #SBATCH -D {log_dir}
93
+ #SBATCH -o "gain_selection_{run_id:05d}_{subrun:04d}_%j.log"
94
+ #SBATCH --job-name "gain_selection_{run_id:05d}"
95
+ #SBATCH --export {PATH}
96
+ #SBATCH --partition=short,long
97
+
98
+ lst_dvr {input_file} {output_dir} {ref_time} {ref_counter} {module} {ref_source}
99
+ """
100
+ )
101
+ elif script=="new":
102
+ return dedent(
103
+ f"""\
104
+ #!/bin/bash
105
+
106
+ #SBATCH -D {log_dir}
107
+ #SBATCH -o "gain_selection_{run_id:05d}_{subrun:04d}_%j.log"
108
+ #SBATCH --job-name "gain_selection_{run_id:05d}"
109
+ #SBATCH --mem=40GB
110
+ #SBATCH --partition=short,long
111
+
112
+ lstchain_r0_to_r0g --R0-file={input_file} --output-dir={output_dir} --log={log_file} --no-flatfield-heuristic
113
+ """
114
+ )
43
115
 
44
- def apply_gain_selection(date: str, start: int, end: int, output_basedir: Path = None):
116
+ def apply_gain_selection(date: str, start: int, end: int, output_basedir: Path = None, no_queue_check: bool = False):
45
117
  """
46
118
  Submit the jobs to apply the gain selection to the data for a given date
47
119
  on a subrun-by-subrun basis.
48
120
  """
49
121
 
122
+ if date < "20231205":
123
+ script = "old"
124
+ else:
125
+ script = "new"
126
+
50
127
  run_summary_dir = Path("/fefs/aswg/data/real/monitoring/RunSummary")
51
128
  run_summary_file = run_summary_dir / f"RunSummary_{date}.ecsv"
52
129
  summary_table = Table.read(run_summary_file)
53
130
  # Apply gain selection only to DATA runs
54
131
  data_runs = summary_table[summary_table["run_type"] == "DATA"]
132
+ log.info(f"Found {len(data_runs)} DATA runs to which apply the gain selection")
55
133
 
56
134
  output_dir = output_basedir / date
57
135
  log_dir = output_basedir / "log" / date
58
136
  output_dir.mkdir(parents=True, exist_ok=True)
59
137
  log_dir.mkdir(parents=True, exist_ok=True)
138
+ log_file = log_dir / f"r0_to_r0g_{date}.log"
60
139
  r0_dir = Path(f"/fefs/aswg/data/real/R0/{date}")
61
140
 
62
141
  for run in data_runs:
63
- # Check slurm queue status and sleep for a while to avoid overwhelming the queue
64
- check_job_status_and_wait(max_jobs=1500)
142
+ if not no_queue_check:
143
+ # Check slurm queue status and sleep for a while to avoid overwhelming the queue
144
+ check_job_status_and_wait(max_jobs=1500)
65
145
 
66
146
  # Avoid running jobs while it is still night time
67
147
  wait_for_daytime(start, end)
@@ -76,14 +156,25 @@ def apply_gain_selection(date: str, start: int, end: int, output_basedir: Path =
76
156
  subrun_numbers = [int(file[-12:-8]) for file in files]
77
157
  input_files = []
78
158
 
79
- if ref_source in ["UCTS", "TIB"]:
159
+ if date < "20231205" and ref_source not in ["UCTS", "TIB"]:
160
+ input_files = r0_dir.glob(f"LST-1.?.Run{run_id:05d}.????.fits.fz")
161
+ log.info(
162
+ f"Run {run_id} does not have UCTS or TIB info, so gain selection cannot"
163
+ f"be applied. Copying directly the R0 files to {output_dir}."
164
+ )
165
+ for file in input_files:
166
+ sp.run(["cp", file, output_dir])
80
167
 
168
+ else:
81
169
  n_subruns = max(subrun_numbers)
82
170
 
83
171
  for subrun in range(n_subruns + 1):
84
172
  new_files = glob.glob(f"{r0_dir}/LST-1.?.Run{run_id:05d}.{subrun:04d}.fits.fz")
85
173
 
86
174
  if len(new_files) != 4:
175
+ log.info(f"Run {run_id}.{subrun:05d} does not have 4 streams of R0 files, so gain"
176
+ f"selection cannot be applied. Copying directly the R0 files to {output_dir}."
177
+ )
87
178
  for file in new_files:
88
179
  sp.run(["cp", file, output_dir])
89
180
 
@@ -91,6 +182,7 @@ def apply_gain_selection(date: str, start: int, end: int, output_basedir: Path =
91
182
  new_files.sort()
92
183
  input_files.append(new_files[0])
93
184
 
185
+ log.info("Creating and launching the sbatch scripts for the rest of the runs to apply gain selection")
94
186
  for file in input_files:
95
187
  run_info = run_info_from_filename(file)
96
188
  job_file = log_dir / f"gain_selection_{run_info.run:05d}.{run_info.subrun:04d}.sh"
@@ -102,24 +194,21 @@ def apply_gain_selection(date: str, start: int, end: int, output_basedir: Path =
102
194
  file,
103
195
  output_dir,
104
196
  log_dir,
197
+ log_file,
105
198
  ref_time,
106
199
  ref_counter,
107
200
  module,
108
201
  ref_source,
202
+ script,
109
203
  )
110
204
  )
111
205
  sp.run(["sbatch", job_file], check=True)
112
206
 
113
- else:
114
-
115
- input_files = r0_dir.glob(f"LST-1.?.Run{run_id:05d}.????.fits.fz")
116
-
117
- for file in input_files:
118
- sp.run(["cp", file, output_dir])
119
-
120
207
  calib_runs = summary_table[summary_table["run_type"] != "DATA"]
208
+ log.info(f"Found {len(calib_runs)} NO-DATA runs")
121
209
 
122
210
  for run in calib_runs:
211
+ log.info(f"Copying R0 files corresponding to run {run} directly to {output_dir}")
123
212
  # Avoid copying files while it is still night time
124
213
  wait_for_daytime(start, end)
125
214
 
@@ -150,6 +239,19 @@ def run_sacct_j(job) -> StringIO:
150
239
  return StringIO(sp.check_output(sacct_cmd).decode())
151
240
 
152
241
 
242
+ def GainSel_flag_file(date: str) -> Path:
243
+ filename = cfg.get("LSTOSA", "gain_selection_check")
244
+ GainSel_dir = Path(cfg.get("LST1", "GAIN_SELECTION_FLAG_DIR"))
245
+ flagfile = GainSel_dir / date / filename
246
+ return flagfile.resolve()
247
+
248
+
249
+ def GainSel_finished(date: str) -> bool:
250
+ """Check if gain selection finished successfully."""
251
+ flagfile = GainSel_flag_file(date)
252
+ return flagfile.exists()
253
+
254
+
153
255
  def check_failed_jobs(date: str, output_basedir: Path = None):
154
256
  """Search for failed jobs in the log directory."""
155
257
  failed_jobs = []
@@ -165,72 +267,82 @@ def check_failed_jobs(date: str, output_basedir: Path = None):
165
267
  log.warning(f"Job {job} did not finish successfully")
166
268
  failed_jobs.append(job)
167
269
 
168
- if not failed_jobs:
169
- log.info(f"{date}: all jobs finished successfully")
170
- else:
270
+ if failed_jobs:
171
271
  log.warning(f"{date}: some jobs did not finish successfully")
172
272
 
173
-
174
- run_summary_dir = Path("/fefs/aswg/data/real/monitoring/RunSummary")
175
- run_summary_file = run_summary_dir / f"RunSummary_{date}.ecsv"
176
- summary_table = Table.read(run_summary_file)
177
- runs = summary_table["run_id"]
178
- missing_runs = []
179
-
180
- r0_files = glob.glob(f"/fefs/aswg/data/real/R0/{date}/LST-1.?.Run?????.????.fits.fz")
181
- r0g_files = glob.glob(f"/fefs/aswg/data/real/R0G/{date}/LST-1.?.Run?????.????.fits.fz")
182
- all_r0_runs = [parse_r0_filename(i).run for i in r0_files]
183
- all_r0g_runs = [parse_r0_filename(i).run for i in r0g_files]
184
-
185
- for run in all_r0_runs:
186
- if run not in runs:
187
- if run not in all_r0g_runs:
188
- missing_runs.append(run)
189
-
190
- missing_runs.sort()
191
- if missing_runs:
192
- log.info(
193
- f"Some runs are missing. Copying R0 files of runs {pd.Series(missing_runs).unique()} "
194
- f"directly to /fefs/aswg/data/real/R0G/{date}"
195
- )
196
-
197
- for run in missing_runs:
198
- output_dir = Path(f"/fefs/aswg/data/real/R0G/{date}/")
199
- files = glob.glob(f"/fefs/aswg/data/real/R0/{date}/LST-1.?.Run{run:05d}.????.fits.fz")
200
- for file in files:
201
- sp.run(["cp", file, output_dir])
202
-
273
+ else:
274
+ log.info(f"{date}: all jobs finished successfully")
203
275
 
204
276
 
205
- @click.command()
206
- @click.option("--check", is_flag=True, default=False, help="Check for failed jobs.")
207
- @click.argument("dates-file", type=click.Path(exists=True, path_type=Path))
208
- @click.argument("output-basedir", type=click.Path(path_type=Path))
209
- @click.option("-s", "--start-time", type=int, default=10, help="Time to (re)start gain selection in HH format.")
210
- @click.option("-e", "--end-time", type=int, default=18, help="Time to stop gain selection in HH format.")
211
- def main(
212
- dates_file: Path = None,
213
- output_basedir: Path = None,
214
- check: bool = False,
215
- start_time: int = 10,
216
- end_time: int = 18
217
- ):
277
+ run_summary_dir = Path("/fefs/aswg/data/real/monitoring/RunSummary")
278
+ run_summary_file = run_summary_dir / f"RunSummary_{date}.ecsv"
279
+ summary_table = Table.read(run_summary_file)
280
+ runs = summary_table["run_id"]
281
+ missing_runs = []
282
+
283
+ r0_files = glob.glob(f"/fefs/aswg/data/real/R0/{date}/LST-1.?.Run?????.????.fits.fz")
284
+ r0g_files = glob.glob(f"/fefs/aswg/data/real/R0G/{date}/LST-1.?.Run?????.????.fits.fz")
285
+ all_r0_runs = [parse_r0_filename(i).run for i in r0_files]
286
+ all_r0g_runs = [parse_r0_filename(i).run for i in r0g_files]
287
+
288
+ for run in all_r0_runs:
289
+ if run not in runs:
290
+ if run not in all_r0g_runs:
291
+ missing_runs.append(run)
292
+
293
+ missing_runs.sort()
294
+ if missing_runs:
295
+ log.info(
296
+ f"Some runs are missing. Copying R0 files of runs {pd.Series(missing_runs).unique()} "
297
+ f"directly to /fefs/aswg/data/real/R0G/{date}"
298
+ )
299
+
300
+ for run in missing_runs:
301
+ output_dir = Path(f"/fefs/aswg/data/real/R0G/{date}/")
302
+ files = glob.glob(f"/fefs/aswg/data/real/R0/{date}/LST-1.?.Run{run:05d}.????.fits.fz")
303
+ for file in files:
304
+ sp.run(["cp", file, output_dir])
305
+
306
+ GainSel_dir = Path(cfg.get("LST1", "GAIN_SELECTION_FLAG_DIR"))
307
+ flagfile_dir = GainSel_dir / date
308
+ flagfile_dir.mkdir(parents=True, exist_ok=True)
309
+
310
+ flagfile = GainSel_flag_file(date)
311
+ log.info(f"Gain selection finished successfully, creating flag file for date {date} ({flagfile})")
312
+ flagfile.touch()
313
+
314
+
315
+ def main():
218
316
  """
219
317
  Loop over the dates listed in the input file and launch the gain selection
220
318
  script for each of them. The input file should list the dates in the format
221
319
  YYYYMMDD one date per line.
222
320
  """
223
321
  log.setLevel(logging.INFO)
322
+ args = parser.parse_args()
224
323
 
225
- list_of_dates = get_list_of_dates(dates_file)
324
+ if args.date:
325
+ if args.check:
326
+ log.info(f"Checking gain selection status for date {args.date}")
327
+ check_failed_jobs(args.date, args.output_basedir)
328
+ else:
329
+ log.info(f"Applying gain selection to date {args.date}")
330
+ apply_gain_selection(args.date, args.start_time, args.end_time, args.output_basedir, no_queue_check=args.no_queue_check)
226
331
 
227
- if check:
228
- for date in list_of_dates:
229
- check_failed_jobs(date, output_basedir)
230
- else:
231
- for date in list_of_dates:
232
- apply_gain_selection(date, start_time, end_time, output_basedir)
233
- log.info("Done! No more dates to process.")
332
+
333
+ elif args.dates_file:
334
+ list_of_dates = get_list_of_dates(args.dates_file)
335
+ log.info(f"Found {len(list_of_dates)} dates to apply or check gain selection")
336
+
337
+ if args.check:
338
+ for date in list_of_dates:
339
+ log.info(f"Checking gain selection status for date {date}")
340
+ check_failed_jobs(date, args.output_basedir)
341
+ else:
342
+ for date in list_of_dates:
343
+ log.info(f"Applying gain selection to date {date}")
344
+ apply_gain_selection(date, args.start_time, args.end_time, args.output_basedir, no_queue_check=args.no_queue_check)
345
+ log.info("Done! No more dates to process.")
234
346
 
235
347
 
236
348
  if __name__ == "__main__":
osa/scripts/sequencer.py CHANGED
@@ -7,6 +7,7 @@ prepares a SLURM job array which launches the data sequences for every subrun.
7
7
 
8
8
  import logging
9
9
  import os
10
+ import sys
10
11
  from decimal import Decimal
11
12
 
12
13
  from osa import osadb
@@ -22,12 +23,14 @@ from osa.job import (
22
23
  run_squeue,
23
24
  )
24
25
  from osa.nightsummary.extract import build_sequences
26
+ from osa.nightsummary.nightsummary import run_summary_table
25
27
  from osa.paths import analysis_path
26
28
  from osa.report import start
27
29
  from osa.utils.cliopts import sequencer_cli_parsing
28
30
  from osa.utils.logging import myLogger
29
- from osa.utils.utils import is_day_closed, gettag, date_to_iso
31
+ from osa.utils.utils import is_day_closed, gettag, date_to_iso, date_to_dir
30
32
  from osa.veto import get_closed_list, get_veto_list
33
+ from osa.scripts.gain_selection import GainSel_finished
31
34
 
32
35
  __all__ = [
33
36
  "single_process",
@@ -90,6 +93,18 @@ def single_process(telescope):
90
93
  if not options.simulate:
91
94
  os.makedirs(options.log_directory, exist_ok=True)
92
95
 
96
+ summary_table = run_summary_table(options.date)
97
+ if len(summary_table) == 0:
98
+ log.warning("No runs found for this date. Nothing to do. Exiting.")
99
+ sys.exit(0)
100
+
101
+ if not options.no_gainsel and not GainSel_finished(date_to_dir(options.date)):
102
+ log.info(
103
+ f"Gain selection did not finish successfully for date {options.date}."
104
+ "Try again later, once gain selection has finished."
105
+ )
106
+ sys.exit()
107
+
93
108
  if is_day_closed():
94
109
  log.info(f"Date {date_to_iso(options.date)} is already closed for {options.tel_id}")
95
110
  return sequence_list
@@ -1,30 +1,18 @@
1
1
  """
2
- Display summary of the observations for a given date.
3
-
4
- Show the run summary for a given date containing the number of subruns,
5
- the start and end time of the run and type pf the run: DATA, DRS4, PEDCALIB.
2
+ Print out basic run summary for a given date with run numbers, type of runs,
3
+ start and stop timestamps and elapsed times.
6
4
  """
7
5
 
8
6
  import argparse
9
- import logging
10
- import os
7
+ import datetime
11
8
  from pathlib import Path
12
9
 
10
+ import astropy.units as u
13
11
  import numpy as np
14
- from astropy import units as u
15
12
  from astropy.table import Table
16
- from astropy.time import Time
17
- from lstchain.scripts.lstchain_create_run_summary import (
18
- get_list_of_files,
19
- get_list_of_runs,
20
- get_runs_and_subruns,
21
- type_of_run,
22
- read_counters,
23
- )
24
-
25
- from osa.utils.logging import myLogger
13
+ from lstchain.scripts.lstchain_create_run_summary import get_list_of_files, get_list_of_runs
26
14
 
27
- log = myLogger(logging.getLogger(__name__))
15
+ from osa.nightsummary.database import get_run_info_from_TCU
28
16
 
29
17
  parser = argparse.ArgumentParser(description="Create run summary file")
30
18
 
@@ -42,138 +30,60 @@ parser.add_argument(
42
30
  default=Path("/fefs/aswg/data/real/R0"),
43
31
  )
44
32
 
45
- dtypes = {
46
- "time_start": str,
47
- "time_end": str,
48
- "elapsed": u.quantity.Quantity,
49
- }
50
-
51
-
52
- def start_end_of_run_files_stat(r0_path: Path, run_number: int, num_files: int):
53
- """
54
- Get first timestamps from the last subrun.
55
- Write down the reference Dragon module used, reference event_id.
56
-
57
- Notes
58
- -----
59
- Start and end times are currently taken from the creation and last modification
60
- time of the first and last file in the run. They are approximate and may be off
61
- by a few seconds.
62
-
63
- Parameters
64
- ----------
65
- r0_path : pathlib.Path
66
- Directory that contains the R0 files
67
- run_number : int
68
- Number of the run
69
- num_files : int
70
- Number of the sequential files (subruns) of a given run
71
-
72
- Returns
73
- -------
74
- end_timestamp
75
- """
76
-
77
- last_subrun = num_files - 1 # first subrun is 0
78
- pattern_first_subrun = r0_path / f"LST-1.1.Run{run_number:05d}.0000.fits.fz"
79
- pattern_last_subrun = r0_path / f"LST-1.1.Run{run_number:05d}.{last_subrun:04d}.fits.fz"
80
- try:
81
- # Get start and end times from the creation and last modification timestamps
82
- # from the first and last file in the run
83
- run_start = Time(os.path.getctime(pattern_first_subrun), format="unix")
84
- run_end = Time(os.path.getmtime(pattern_last_subrun), format="unix")
85
- elapsed_time = run_end - run_start
86
-
87
- return dict(
88
- time_start=run_start.iso,
89
- time_end=run_end.iso,
90
- elapsed=np.round(elapsed_time.to_value("min"), decimals=1),
91
- )
92
-
93
- except Exception as err:
94
- log.error(f"Files {pattern_first_subrun} or {pattern_last_subrun} have error: {err}")
95
-
96
- return dict(
97
- time_start=None,
98
- time_end=None,
99
- elapsed=0.0,
100
- )
33
+ parser.add_argument(
34
+ "--tcu-db",
35
+ type=str,
36
+ help="Server of the TCU monitoring database",
37
+ default="lst101-int",
38
+ )
101
39
 
102
40
 
103
41
  def main():
104
42
  """
105
- Build an astropy Table with run summary information and write it
106
- as ECSV file with the following information (one row per run):
107
- - run_id
108
- - number of subruns
109
- - type of run (DRS4, CALI, DATA, CONF)
110
- - start of the run
111
- - dragon reference UCTS timestamp if available (-1 otherwise)
112
- - dragon reference time source ("ucts" or "run_date")
113
- - dragon_reference_module_id
114
- - dragon_reference_module_index
115
- - dragon_reference_counter
43
+ Get run metadata information from TCU monitoring
44
+ database and print out the run summary
116
45
  """
117
-
118
- log.setLevel(logging.INFO)
119
-
120
46
  args = parser.parse_args()
121
47
 
48
+ tcu_db = args.tcu_db
122
49
  date_path = args.r0_path / args.date
123
-
124
50
  file_list = get_list_of_files(date_path)
125
- runs = get_list_of_runs(file_list)
126
- run_numbers, n_subruns = get_runs_and_subruns(runs)
127
-
128
- reference_counters = [read_counters(date_path, run) for run in run_numbers]
129
-
130
- run_types = [
131
- type_of_run(date_path, run, counters)
132
- for run, counters in zip(run_numbers, reference_counters)
133
- ]
134
-
135
- start_end_timestamps = [
136
- start_end_of_run_files_stat(date_path, run, n_files)
137
- for run, n_files in zip(run_numbers, n_subruns)
138
- ]
139
-
140
- run_summary = Table(
141
- {
142
- col: np.array([d[col] for d in start_end_timestamps], dtype=dtype)
143
- for col, dtype in dtypes.items()
144
- }
145
- )
146
-
147
- run_summary.add_column(run_numbers, name="run_id", index=0)
148
- run_summary.add_column(n_subruns, name="n_subruns", index=1)
149
- run_summary.add_column(run_types, name="run_type", index=2)
150
-
151
- run_summary["elapsed"].unit = u.min
152
-
153
- header = " Run summary "
154
- print(f"{header.center(50, '*')}")
155
- run_summary.pprint_all()
156
- print("\n")
157
-
158
- # Sum elapsed times:
159
- obs_by_type = run_summary.group_by("run_type")
160
- obs_by_type["number_of_runs"] = 1
161
- total_obs_time = obs_by_type[
162
- "run_type", "number_of_runs", "n_subruns", "elapsed"
163
- ].groups.aggregate(np.sum)
164
- total_obs_time["elapsed"].format = "7.1f"
165
-
166
- header = " Observation time per run type "
167
- print(f"{header.center(50, '*')}")
168
- total_obs_time.pprint_all()
169
- print("\n")
170
-
171
- run_summary["number_of_runs"] = 1
172
- total_obs = run_summary["number_of_runs", "n_subruns", "elapsed"].groups.aggregate(np.sum)
173
- total_obs["elapsed"].format = "7.1f"
174
- header = " Total observation time "
175
- print(f"{header.center(50, '*')}")
176
- total_obs.pprint_all()
51
+ all_runs = get_list_of_runs(file_list)
52
+ run_numbers = [x.run for x in all_runs]
53
+ run_numbers_array = np.unique(run_numbers)
54
+ run_numbers_array = run_numbers_array[run_numbers_array != 0]
55
+
56
+ list_info = []
57
+
58
+ for run in run_numbers_array:
59
+ run_info = get_run_info_from_TCU(int(run), tcu_server=tcu_db)
60
+ list_info.append(run_info)
61
+
62
+ if list_info:
63
+ table = Table(
64
+ np.array(list_info).T.tolist(),
65
+ names=("run", "type", "tstart", "tstop", "elapsed"),
66
+ dtype=(int, str, datetime.datetime, datetime.datetime, float),
67
+ )
68
+ table["elapsed"].unit = u.min
69
+ table["elapsed"].info.format = "3.1f"
70
+ print("\n")
71
+ table.pprint_all()
72
+
73
+ # Sum elapsed times:
74
+ obs_by_type = table.group_by("type")
75
+ obs_by_type["number_of_runs"] = 1
76
+ total_obs_time = obs_by_type["type", "number_of_runs", "elapsed"].groups.aggregate(np.sum)
77
+ total_obs_time["elapsed"].info.format = "7.0f"
78
+
79
+ print("\n")
80
+ header = " Observation time per run type "
81
+ print(f"{header.center(50, '*')}")
82
+ total_obs_time.pprint_all()
83
+ print("\n")
84
+
85
+ else:
86
+ print(f"No data found in {date_path}")
177
87
 
178
88
 
179
89
  if __name__ == "__main__":
@@ -126,9 +126,11 @@ def test_simulated_sequencer(
126
126
  run_catalog,
127
127
  r0_data,
128
128
  merged_run_summary,
129
+ gain_selection_flag_file,
129
130
  ):
130
131
  assert run_summary_file.exists()
131
132
  assert run_catalog.exists()
133
+ assert gain_selection_flag_file.exists()
132
134
 
133
135
  for r0_file in r0_data:
134
136
  assert r0_file.exists()
osa/utils/cliopts.py CHANGED
@@ -274,6 +274,12 @@ def sequencer_argparser():
274
274
  default=False,
275
275
  help="Do not produce DL2 files (default False)",
276
276
  )
277
+ parser.add_argument(
278
+ "--no-gainsel",
279
+ action="store_true",
280
+ default=False,
281
+ help="Do not check if the gain selection finished correctly (default False)",
282
+ )
277
283
  parser.add_argument(
278
284
  "tel_id",
279
285
  choices=["ST", "LST1", "LST2", "all"],
@@ -292,6 +298,7 @@ def sequencer_cli_parsing():
292
298
  options.no_submit = opts.no_submit
293
299
  options.no_calib = opts.no_calib
294
300
  options.no_dl2 = opts.no_dl2
301
+ options.no_gainsel = opts.no_gainsel
295
302
 
296
303
  log.debug(f"the options are {opts}")
297
304
 
@@ -476,6 +483,12 @@ def autocloser_cli_parser():
476
483
  default=False,
477
484
  help="Disregard the production of DL2 files",
478
485
  )
486
+ parser.add_argument(
487
+ "--no-gainsel",
488
+ action="store_true",
489
+ default=False,
490
+ help="Do not check if the gain selection finished correctly (default False)",
491
+ )
479
492
  parser.add_argument("-r", "--runwise", action="store_true", help="Close the day run-wise.")
480
493
  parser.add_argument("-l", "--log", type=Path, default=None, help="Write log to a file.")
481
494
  parser.add_argument("tel_id", type=str, choices=["LST1"])
@@ -1,90 +0,0 @@
1
- """
2
- Print out basic run summary for a given date with run numbers, type of runs,
3
- start and stop timestamps and elapsed times.
4
- """
5
-
6
- import argparse
7
- import datetime
8
- from pathlib import Path
9
-
10
- import astropy.units as u
11
- import numpy as np
12
- from astropy.table import Table
13
- from lstchain.scripts.lstchain_create_run_summary import get_list_of_files, get_list_of_runs
14
-
15
- from osa.nightsummary.database import get_run_info_from_TCU
16
-
17
- parser = argparse.ArgumentParser(description="Create run summary file")
18
-
19
- parser.add_argument(
20
- "-d",
21
- "--date",
22
- help="Date for the creation of the run summary in format YYYYMMDD",
23
- required=True,
24
- )
25
-
26
- parser.add_argument(
27
- "--r0-path",
28
- type=Path,
29
- help="Path to the R0 files. Default is /fefs/aswg/data/real/R0",
30
- default=Path("/fefs/aswg/data/real/R0"),
31
- )
32
-
33
- parser.add_argument(
34
- "--tcu-db",
35
- type=str,
36
- help="Server of the TCU monitoring database",
37
- default="lst101-int",
38
- )
39
-
40
-
41
- def main():
42
- """
43
- Get run metadata information from TCU monitoring
44
- database and print out the run summary
45
- """
46
- args = parser.parse_args()
47
-
48
- tcu_db = args.tcu_db
49
- date_path = args.r0_path / args.date
50
- file_list = get_list_of_files(date_path)
51
- all_runs = get_list_of_runs(file_list)
52
- run_numbers = [x.run for x in all_runs]
53
- run_numbers_array = np.unique(run_numbers)
54
- run_numbers_array = run_numbers_array[run_numbers_array != 0]
55
-
56
- list_info = []
57
-
58
- for run in run_numbers_array:
59
- run_info = get_run_info_from_TCU(int(run), tcu_server=tcu_db)
60
- list_info.append(run_info)
61
-
62
- if list_info:
63
- table = Table(
64
- np.array(list_info).T.tolist(),
65
- names=("run", "type", "tstart", "tstop", "elapsed"),
66
- dtype=(int, str, datetime.datetime, datetime.datetime, float),
67
- )
68
- table["elapsed"].unit = u.min
69
- table["elapsed"].info.format = "3.1f"
70
- print("\n")
71
- table.pprint_all()
72
-
73
- # Sum elapsed times:
74
- obs_by_type = table.group_by("type")
75
- obs_by_type["number_of_runs"] = 1
76
- total_obs_time = obs_by_type["type", "number_of_runs", "elapsed"].groups.aggregate(np.sum)
77
- total_obs_time["elapsed"].info.format = "7.0f"
78
-
79
- print("\n")
80
- header = " Observation time per run type "
81
- print(f"{header.center(50, '*')}")
82
- total_obs_time.pprint_all()
83
- print("\n")
84
-
85
- else:
86
- print(f"No data found in {date_path}")
87
-
88
-
89
- if __name__ == "__main__":
90
- main()