lstosa 0.10.18__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/METADATA +4 -5
  2. lstosa-0.11.0.dist-info/RECORD +84 -0
  3. {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/WHEEL +1 -1
  4. {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/entry_points.txt +1 -0
  5. osa/_version.py +9 -4
  6. osa/configs/options.py +2 -0
  7. osa/configs/sequencer.cfg +21 -7
  8. osa/conftest.py +146 -6
  9. osa/high_level/significance.py +5 -3
  10. osa/high_level/tests/test_significance.py +3 -0
  11. osa/job.py +52 -26
  12. osa/nightsummary/extract.py +12 -3
  13. osa/nightsummary/tests/test_extract.py +5 -0
  14. osa/paths.py +111 -28
  15. osa/provenance/capture.py +1 -1
  16. osa/provenance/config/definition.yaml +7 -0
  17. osa/provenance/utils.py +22 -7
  18. osa/scripts/autocloser.py +0 -10
  19. osa/scripts/calibration_pipeline.py +9 -2
  20. osa/scripts/closer.py +136 -55
  21. osa/scripts/copy_datacheck.py +5 -3
  22. osa/scripts/datasequence.py +45 -71
  23. osa/scripts/gain_selection.py +14 -15
  24. osa/scripts/provprocess.py +16 -7
  25. osa/scripts/sequencer.py +49 -34
  26. osa/scripts/sequencer_catB_tailcuts.py +239 -0
  27. osa/scripts/sequencer_webmaker.py +4 -0
  28. osa/scripts/show_run_summary.py +2 -2
  29. osa/scripts/simulate_processing.py +4 -7
  30. osa/scripts/tests/test_osa_scripts.py +67 -22
  31. osa/scripts/update_source_catalog.py +45 -22
  32. osa/tests/test_jobs.py +28 -11
  33. osa/tests/test_paths.py +6 -6
  34. osa/tests/test_raw.py +4 -4
  35. osa/utils/cliopts.py +37 -32
  36. osa/utils/register.py +18 -13
  37. osa/utils/tests/test_utils.py +14 -0
  38. osa/utils/utils.py +186 -56
  39. osa/veto.py +1 -1
  40. osa/workflow/dl3.py +1 -2
  41. osa/workflow/stages.py +16 -11
  42. osa/workflow/tests/test_dl3.py +2 -1
  43. osa/workflow/tests/test_stages.py +7 -5
  44. lstosa-0.10.18.dist-info/RECORD +0 -83
  45. {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/LICENSE +0 -0
  46. {lstosa-0.10.18.dist-info → lstosa-0.11.0.dist-info}/top_level.txt +0 -0
osa/scripts/closer.py CHANGED
@@ -160,7 +160,8 @@ def post_process(seq_tuple):
160
160
  seq_list = seq_tuple[1]
161
161
 
162
162
  if dl1_datacheck_longterm_file_exits() and not options.test:
163
- create_longterm_symlink()
163
+ if cfg.getboolean("lstchain", "create_longterm_symlink"):
164
+ create_longterm_symlink()
164
165
 
165
166
  else:
166
167
  # Close the sequences
@@ -169,14 +170,14 @@ def post_process(seq_tuple):
169
170
  # Extract the provenance info
170
171
  extract_provenance(seq_list)
171
172
 
172
- # Merge DL1b files run-wise
173
- merge_files(seq_list, data_level="DL1AB")
174
-
175
173
  merge_muon_files(seq_list)
176
174
 
177
- # Merge DL2 files run-wise
178
- if not options.no_dl2:
179
- merge_files(seq_list, data_level="DL2")
175
+ # Merge DL1b files run-wise
176
+ for sequence in seq_list:
177
+ dl1_merge_job_id = merge_files(sequence, data_level="DL1AB")
178
+ # Produce DL2 files run-wise
179
+ if not options.no_dl2 and sequence.type=="DATA":
180
+ dl1_to_dl2(sequence, dl1_merge_job_id)
180
181
 
181
182
  # Merge DL1 datacheck files and produce PDFs. It also produces
182
183
  # the daily datacheck report using the longterm script, and updates
@@ -185,7 +186,8 @@ def post_process(seq_tuple):
185
186
  list_job_id = merge_dl1_datacheck(seq_list)
186
187
  longterm_job_id = daily_datacheck(daily_longterm_cmd(list_job_id))
187
188
  cherenkov_job_id = cherenkov_transparency(cherenkov_transparency_cmd(longterm_job_id))
188
- create_longterm_symlink(cherenkov_job_id)
189
+ if cfg.getboolean("lstchain", "create_longterm_symlink"):
190
+ create_longterm_symlink(cherenkov_job_id)
189
191
 
190
192
  time.sleep(600)
191
193
 
@@ -215,6 +217,66 @@ def post_process(seq_tuple):
215
217
  return False
216
218
 
217
219
 
220
+ def dl1_to_dl2(sequence, dl1_merge_job_id) -> int:
221
+ """
222
+ It prepares and execute the dl1 to dl2 lstchain scripts that applies
223
+ the already trained RFs models to DL1 files. It identifies the
224
+ primary particle, reconstructs its energy and direction.
225
+
226
+ Parameters
227
+ ----------
228
+ run_str: str
229
+
230
+ Returns
231
+ -------
232
+ rc: int
233
+ """
234
+ nightdir = date_to_dir(options.date)
235
+ dl2_dir = Path(cfg.get("LST1", "DL2_DIR"))
236
+ dl2_subdirectory = dl2_dir / nightdir / options.prod_id / sequence.dl2_prod_id
237
+ dl2_file = dl2_subdirectory / f"dl2_LST-1.Run{sequence.run_str[:5]}.h5"
238
+ dl2_config = Path(cfg.get("lstchain", "dl2_config"))
239
+ dl1ab_subdirectory = Path(cfg.get("LST1", "DL1AB_DIR"))
240
+ dl1_file = dl1ab_subdirectory / nightdir / options.prod_id / sequence.dl1_prod_id / f"dl1_LST-1.Run{sequence.run_str[:5]}.h5"
241
+ log_dir = Path(options.directory) / "log"
242
+ slurm_account = cfg.get("SLURM", "ACCOUNT")
243
+
244
+ if dl2_file.exists():
245
+ log.debug(f"The dl2 file {dl2_file} already exists.")
246
+ return 0
247
+
248
+ command = cfg.get("lstchain", "dl1_to_dl2")
249
+ cmd = [
250
+ "sbatch",
251
+ "--parsable",
252
+ "--mem-per-cpu=60GB",
253
+ f"--account={slurm_account}",
254
+ "-o", f"{log_dir}/Run{sequence.run_str[:5]}_dl2_%j.out",
255
+ "-e", f"{log_dir}/Run{sequence.run_str[:5]}_dl2_%j.err",
256
+ f"--dependency=afterok:{dl1_merge_job_id}",
257
+ command,
258
+ f"--input-file={dl1_file}",
259
+ f"--output-dir={dl2_subdirectory}",
260
+ f"--path-models={sequence.rf_model}",
261
+ f"--config={dl2_config}",
262
+ ]
263
+ log.info(f"executing {cmd}")
264
+
265
+ if options.simulate:
266
+ return 0
267
+
268
+ if not options.test and shutil.which("sbatch") is not None:
269
+ job = subprocess.run(
270
+ cmd,
271
+ encoding="utf-8",
272
+ capture_output=True,
273
+ text=True,
274
+ check=True,
275
+ )
276
+ job_id = job.stdout.strip()
277
+ return job_id
278
+
279
+
218
280
  def post_process_files(seq_list: list):
219
281
  """
220
282
  Identify the different types of files, try to close the sequences
@@ -226,9 +288,7 @@ def post_process_files(seq_list: list):
226
288
  list of sequences
227
289
  """
228
290
 
229
- output_files_set = set(Path(options.directory).rglob("*Run*"))
230
-
231
- DL1AB_RE = re.compile(rf"{options.dl1_prod_id}/dl1.*.(?:h5|hdf5|hdf)")
291
+ DL1AB_RE = re.compile(r"tailcut.*/dl1.*.(?:h5|hdf5|hdf)")
232
292
  MUONS_RE = re.compile(r"muons.*.fits")
233
293
  DATACHECK_RE = re.compile(r"datacheck_dl1.*.(?:h5|hdf5|hdf)")
234
294
  INTERLEAVED_RE = re.compile(r"interleaved.*.(?:h5|hdf5|hdf)")
@@ -243,27 +303,36 @@ def post_process_files(seq_list: list):
243
303
  )
244
304
 
245
305
  if not options.no_dl2:
246
- DL2_RE = re.compile(f"{options.dl2_prod_id}/dl2.*.(?:h5|hdf5|hdf)")
306
+ DL2_RE = re.compile("tailcut.*/nsb_tuning_.*/dl2.*.(?:h5|hdf5|hdf)")
247
307
  pattern_files["DL2"] = DL2_RE
248
308
 
249
309
  for concept, pattern_re in pattern_files.items():
250
- log.info(f"Post processing {concept} files, {len(output_files_set)} files left")
310
+ for sequence in seq_list:
311
+ output_files_set = set(Path(options.directory).rglob(f"*Run{sequence.run:05d}*"))
312
+ log.info(f"Post processing {concept} files, {len(output_files_set)} files left")
313
+
314
+ if sequence.type=="DATA":
315
+ dst_path = destination_dir(
316
+ concept,
317
+ create_dir=True,
318
+ dl1_prod_id=sequence.dl1_prod_id,
319
+ dl2_prod_id=sequence.dl2_prod_id
320
+ )
251
321
 
252
- dst_path = destination_dir(concept, create_dir=True)
322
+ log.debug(f"Checking if {concept} files need to be moved to {dst_path}")
253
323
 
254
- log.debug(f"Checking if {concept} files need to be moved to {dst_path}")
324
+ for file_path in output_files_set.copy():
255
325
 
256
- for file_path in output_files_set.copy():
326
+ file = str(file_path)
257
327
 
258
- file = str(file_path)
259
- # If seqtoclose is set, we only want to close that sequence
260
- if options.seqtoclose is not None and options.seqtoclose not in file:
261
- continue
328
+ # If seqtoclose is set, we only want to close that sequence
329
+ if options.seqtoclose is not None and options.seqtoclose not in file:
330
+ continue
262
331
 
263
- if pattern_found := pattern_re.search(file):
264
- log.debug(f"Pattern {concept} found, {pattern_found} in {file}")
265
- registered_file = register_found_pattern(file_path, seq_list, concept, dst_path)
266
- output_files_set.remove(registered_file)
332
+ if pattern_found := pattern_re.search(file):
333
+ log.debug(f"Pattern {concept} found, {pattern_found} in {file}")
334
+ registered_file = register_found_pattern(file_path, seq_list, concept, dst_path)
335
+ output_files_set.remove(registered_file)
267
336
 
268
337
 
269
338
  def set_closed_with_file():
@@ -335,13 +404,13 @@ def merge_dl1_datacheck(seq_list) -> List[str]:
335
404
  log.debug("Merging dl1 datacheck files and producing PDFs")
336
405
 
337
406
  muons_dir = destination_dir("MUON", create_dir=False)
338
- datacheck_dir = destination_dir("DATACHECK", create_dir=False)
339
407
  slurm_account = cfg.get("SLURM", "ACCOUNT")
340
408
 
341
409
  list_job_id = []
342
410
 
343
411
  for sequence in seq_list:
344
412
  if sequence.type == "DATA":
413
+ datacheck_dir = destination_dir("DATACHECK", create_dir=False, dl1_prod_id=sequence.dl1_prod_id)
345
414
  cmd = [
346
415
  "sbatch",
347
416
  "--parsable",
@@ -387,7 +456,7 @@ def extract_provenance(seq_list):
387
456
  """
388
457
  log.info("Extract provenance run wise")
389
458
 
390
- nightdir = date_to_dir(options.date)
459
+ nightdir = date_to_iso(options.date)
391
460
  slurm_account = cfg.get("SLURM", "ACCOUNT")
392
461
 
393
462
  for sequence in seq_list:
@@ -431,40 +500,52 @@ def get_pattern(data_level) -> Tuple[str, str]:
431
500
  raise ValueError(f"Unknown data level {data_level}")
432
501
 
433
502
 
434
- def merge_files(sequence_list, data_level="DL2"):
503
+ def merge_files(sequence, data_level="DL2"):
435
504
  """Merge DL1b or DL2 h5 files run-wise."""
436
505
  log.info(f"Looping over the sequences and merging the {data_level} files")
437
-
438
- data_dir = destination_dir(data_level, create_dir=False)
439
506
  pattern, prefix = get_pattern(data_level)
440
507
  slurm_account = cfg.get("SLURM", "ACCOUNT")
508
+
509
+ if sequence.type == "DATA":
510
+ data_dir = destination_dir(
511
+ data_level,
512
+ create_dir=False,
513
+ dl1_prod_id=sequence.dl1_prod_id,
514
+ dl2_prod_id=sequence.dl2_prod_id
515
+ )
516
+ merged_file = Path(data_dir) / f"{prefix}_LST-1.Run{sequence.run:05d}.h5"
441
517
 
442
- for sequence in sequence_list:
443
- if sequence.type == "DATA":
444
- merged_file = Path(data_dir) / f"{prefix}_LST-1.Run{sequence.run:05d}.h5"
445
-
446
- cmd = [
447
- "sbatch",
448
- f"--account={slurm_account}",
449
- "-D",
450
- options.directory,
451
- "-o",
452
- f"log/merge_{prefix}_{sequence.run:05d}_%j.log",
453
- "lstchain_merge_hdf5_files",
454
- f"--input-dir={data_dir}",
455
- f"--output-file={merged_file}",
456
- "--no-image",
457
- "--no-progress",
458
- f"--run-number={sequence.run}",
459
- f"--pattern={pattern}",
460
- ]
518
+ cmd = [
519
+ "sbatch",
520
+ "--parsable",
521
+ f"--account={slurm_account}",
522
+ "-D",
523
+ options.directory,
524
+ "-o",
525
+ f"log/merge_{prefix}_{sequence.run:05d}_%j.log",
526
+ "lstchain_merge_hdf5_files",
527
+ f"--input-dir={data_dir}",
528
+ f"--output-file={merged_file}",
529
+ "--no-image",
530
+ "--no-progress",
531
+ f"--run-number={sequence.run}",
532
+ f"--pattern={pattern}",
533
+ ]
461
534
 
462
- log.debug(f"Executing {stringify(cmd)}")
535
+ log.debug(f"Executing {stringify(cmd)}")
463
536
 
464
- if not options.simulate and not options.test and shutil.which("sbatch") is not None:
465
- subprocess.run(cmd, check=True)
466
- else:
467
- log.debug("Simulate launching scripts")
537
+ if not options.simulate and not options.test and shutil.which("sbatch") is not None:
538
+ job = subprocess.run(
539
+ cmd,
540
+ encoding="utf-8",
541
+ capture_output=True,
542
+ text=True,
543
+ check=True,
544
+ )
545
+ job_id = job.stdout.strip()
546
+ return job_id
547
+ else:
548
+ log.debug("Simulate launching scripts")
468
549
 
469
550
 
470
551
  def merge_muon_files(sequence_list):
@@ -503,7 +584,7 @@ def merge_muon_files(sequence_list):
503
584
  def daily_longterm_cmd(parent_job_ids: List[str]) -> List[str]:
504
585
  """Build the daily longterm command."""
505
586
  nightdir = date_to_dir(options.date)
506
- datacheck_dir = destination_dir("DATACHECK", create_dir=False)
587
+ datacheck_dir = destination_dir("DATACHECK", create_dir=False, dl1_prod_id="tailcut84")
507
588
  muons_dir = destination_dir("MUON", create_dir=False)
508
589
  longterm_dir = Path(cfg.get("LST1", "LONGTERM_DIR")) / options.prod_id / nightdir
509
590
  longterm_output_file = longterm_dir / f"DL1_datacheck_{nightdir}.h5"
@@ -548,7 +629,7 @@ def daily_datacheck(cmd: List[str]):
548
629
  def cherenkov_transparency_cmd(longterm_job_id: str) -> List[str]:
549
630
  """Build the cherenkov transparency command."""
550
631
  nightdir = date_to_dir(options.date)
551
- datacheck_dir = destination_dir("DATACHECK", create_dir=False)
632
+ datacheck_dir = destination_dir("DATACHECK", create_dir=False, dl1_prod_id="tailcut84")
552
633
  longterm_dir = Path(cfg.get("LST1", "LONGTERM_DIR")) / options.prod_id / nightdir
553
634
  longterm_datacheck_file = longterm_dir / f"DL1_datacheck_{nightdir}.h5"
554
635
  slurm_account = cfg.get("SLURM", "ACCOUNT")
@@ -5,12 +5,13 @@ directories whenever they are needed.
5
5
  """
6
6
 
7
7
  import logging
8
+ from pathlib import Path
8
9
 
9
10
  from osa.configs import options
11
+ from osa.configs.config import cfg
10
12
  from osa.paths import (
11
13
  datacheck_directory,
12
14
  get_datacheck_files,
13
- destination_dir,
14
15
  )
15
16
  from osa.utils.cliopts import copy_datacheck_parsing
16
17
  from osa.utils.logging import myLogger
@@ -90,8 +91,9 @@ def get_number_of_runs():
90
91
  Get the run sequence processed list for the given date by globbing the
91
92
  run-wise DL1 files.
92
93
  """
93
- dl1_directory = destination_dir("DL1AB", create_dir=False)
94
- list_files = list(dl1_directory.glob("dl1_LST-1.Run?????.h5"))
94
+ nightdir = date_to_dir(options.date)
95
+ dl1_directory = Path(cfg.get("LST1", "DL1_DIR")) / nightdir / options.prod_id
96
+ list_files = list(dl1_directory.glob("tailcut*/dl1_LST-1.Run?????.h5"))
95
97
  return len(list_files)
96
98
 
97
99
 
@@ -9,11 +9,13 @@ from osa.configs.config import cfg
9
9
  from osa.job import historylevel
10
10
  from osa.workflow.stages import AnalysisStage
11
11
  from osa.provenance.capture import trace
12
+ from osa.paths import get_catB_calibration_filename
12
13
  from osa.utils.cliopts import data_sequence_cli_parsing
13
14
  from osa.utils.logging import myLogger
14
15
  from osa.utils.utils import date_to_dir
16
+ from osa.paths import catB_closed_file_exists
15
17
 
16
- __all__ = ["data_sequence", "r0_to_dl1", "dl1_to_dl2", "dl1ab", "dl1_datacheck"]
18
+ __all__ = ["data_sequence", "r0_to_dl1", "dl1ab", "dl1_datacheck"]
17
19
 
18
20
  log = myLogger(logging.getLogger())
19
21
 
@@ -27,6 +29,8 @@ def data_sequence(
27
29
  run_summary: Path,
28
30
  pedestal_ids_file: Path,
29
31
  run_str: str,
32
+ dl1b_config: Path,
33
+ dl1_prod_id: str,
30
34
  ):
31
35
  """
32
36
  Performs all the steps to process a whole run.
@@ -50,10 +54,10 @@ def data_sequence(
50
54
  history_file = Path(options.directory) / f"sequence_{options.tel_id}_{run_str}.history"
51
55
  # Set the starting level and corresponding return code from last analysis step
52
56
  # registered in the history file.
53
- level, rc = (4, 0) if options.simulate else historylevel(history_file, "DATA")
57
+ level, rc = (3, 0) if options.simulate else historylevel(history_file, "DATA")
54
58
  log.info(f"Going to level {level}")
55
59
 
56
- if level == 4:
60
+ if level == 3:
57
61
  rc = r0_to_dl1(
58
62
  calibration_file,
59
63
  pedestal_file,
@@ -67,32 +71,23 @@ def data_sequence(
67
71
  level -= 1
68
72
  log.info(f"Going to level {level}")
69
73
 
70
- if level == 3:
71
- rc = dl1ab(run_str)
72
- if cfg.getboolean("lstchain", "store_image_dl1ab"):
73
- level -= 1
74
- log.info(f"Going to level {level}")
75
- else:
76
- level -= 2
77
- log.info(f"No images stored in dl1ab. Producing DL2. Going to level {level}")
78
-
79
74
  if level == 2:
80
- rc = dl1_datacheck(run_str)
81
- if options.no_dl2:
75
+ if options.no_dl1ab:
82
76
  level = 0
83
- log.info(f"No DL2 are going to be produced. Going to level {level}")
77
+ log.info(f"No DL1B are going to be produced. Going to level {level}")
84
78
  else:
85
- level -= 1
86
- log.info(f"Going to level {level}")
79
+ rc = dl1ab(run_str, dl1b_config, dl1_prod_id)
80
+ if cfg.getboolean("lstchain", "store_image_dl1ab"):
81
+ level -= 1
82
+ log.info(f"Going to level {level}")
83
+ else:
84
+ level -= 2
85
+ log.info(f"No images stored in dl1ab. Going to level {level}")
87
86
 
88
87
  if level == 1:
89
- if options.no_dl2:
90
- level = 0
91
- log.info(f"No DL2 are going to be produced. Going to level {level}")
92
- else:
93
- rc = dl1_to_dl2(run_str)
94
- level -= 1
95
- log.info(f"Going to level {level}")
88
+ rc = dl1_datacheck(run_str, dl1_prod_id)
89
+ level -= 1
90
+ log.info(f"Going to level {level}")
96
91
 
97
92
  if level == 0:
98
93
  log.info(f"Job for sequence {run_str} finished without fatal errors")
@@ -166,7 +161,7 @@ def r0_to_dl1(
166
161
 
167
162
 
168
163
  @trace
169
- def dl1ab(run_str: str) -> int:
164
+ def dl1ab(run_str: str, dl1b_config: Path, dl1_prod_id: str) -> int:
170
165
  """
171
166
  Prepare and launch the actual lstchain script that is performing
172
167
  the image cleaning considering the interleaved pedestal information
@@ -181,26 +176,39 @@ def dl1ab(run_str: str) -> int:
181
176
  rc: int
182
177
  Return code of the executed command.
183
178
  """
179
+
180
+ # Prepare and launch the actual lstchain script
181
+ command = cfg.get("lstchain", "dl1ab")
182
+
184
183
  # Create a new subdirectory for the dl1ab output
185
- dl1ab_subdirectory = Path(options.directory) / options.dl1_prod_id
184
+ dl1ab_subdirectory = Path(options.directory) / dl1_prod_id
186
185
  dl1ab_subdirectory.mkdir(parents=True, exist_ok=True)
187
- dl1b_config = Path(cfg.get("lstchain", "dl1b_config"))
188
186
  # DL1a input file from base running_analysis directory
189
187
  input_dl1_datafile = Path(options.directory) / f"dl1_LST-1.Run{run_str}.h5"
190
188
  # DL1b output file to be stored in the dl1ab subdirectory
191
189
  output_dl1_datafile = dl1ab_subdirectory / f"dl1_LST-1.Run{run_str}.h5"
192
190
 
193
- # Prepare and launch the actual lstchain script
194
- command = cfg.get("lstchain", "dl1ab")
195
191
  cmd = [
196
192
  command,
197
193
  f"--input-file={input_dl1_datafile}",
198
194
  f"--output-file={output_dl1_datafile}",
199
195
  f"--config={dl1b_config}",
200
196
  ]
197
+
201
198
  if not cfg.getboolean("lstchain", "store_image_dl1ab"):
202
199
  cmd.append("--no-image=True")
203
200
 
201
+ if cfg.getboolean("lstchain", "apply_catB_calibration"):
202
+ if catB_closed_file_exists(int(run_str[:5])):
203
+ catB_calibration_file = get_catB_calibration_filename(int(run_str[:5]))
204
+ cmd.append(f"--catB-calibration-file={catB_calibration_file}")
205
+ else:
206
+ log.info(
207
+ f"Cat-B calibration did not finish yet for run {run_str[:5]}. "
208
+ "Please try again later."
209
+ )
210
+ sys.exit(1)
211
+
204
212
  if options.simulate:
205
213
  return 0
206
214
 
@@ -210,7 +218,7 @@ def dl1ab(run_str: str) -> int:
210
218
 
211
219
 
212
220
  @trace
213
- def dl1_datacheck(run_str: str) -> int:
221
+ def dl1_datacheck(run_str: str, dl1_prod_id: str) -> int:
214
222
  """
215
223
  Run datacheck script
216
224
 
@@ -223,9 +231,9 @@ def dl1_datacheck(run_str: str) -> int:
223
231
  rc: int
224
232
  """
225
233
  # Create a new subdirectory for the dl1ab output
226
- dl1ab_subdirectory = Path(options.directory) / options.dl1_prod_id
234
+ dl1ab_subdirectory = Path(options.directory) / dl1_prod_id
227
235
  input_dl1_datafile = dl1ab_subdirectory / f"dl1_LST-1.Run{run_str}.h5"
228
- output_directory = Path(options.directory) / options.dl1_prod_id
236
+ output_directory = Path(options.directory) / dl1_prod_id
229
237
  output_directory.mkdir(parents=True, exist_ok=True)
230
238
 
231
239
  # Prepare and launch the actual lstchain script
@@ -247,46 +255,8 @@ def dl1_datacheck(run_str: str) -> int:
247
255
  return analysis_step.rc
248
256
 
249
257
 
250
- @trace
251
- def dl1_to_dl2(run_str: str) -> int:
252
- """
253
- It prepares and execute the dl1 to dl2 lstchain scripts that applies
254
- the already trained RFs models to DL1 files. It identifies the
255
- primary particle, reconstructs its energy and direction.
256
-
257
- Parameters
258
- ----------
259
- run_str: str
260
-
261
- Returns
262
- -------
263
- rc: int
264
- """
265
- dl1ab_subdirectory = Path(options.directory) / options.dl1_prod_id
266
- dl2_subdirectory = Path(options.directory) / options.dl2_prod_id
267
- dl2_config = Path(cfg.get("lstchain", "dl2_config"))
268
- rf_models_directory = Path(cfg.get("lstchain", "RF_MODELS"))
269
- dl1_file = dl1ab_subdirectory / f"dl1_LST-1.Run{run_str}.h5"
270
-
271
- command = cfg.get("lstchain", "dl1_to_dl2")
272
- cmd = [
273
- command,
274
- f"--input-file={dl1_file}",
275
- f"--output-dir={dl2_subdirectory}",
276
- f"--path-models={rf_models_directory}",
277
- f"--config={dl2_config}",
278
- ]
279
-
280
- if options.simulate:
281
- return 0
282
-
283
- analysis_step = AnalysisStage(run=run_str, command_args=cmd, config_file=dl2_config.name)
284
- analysis_step.execute()
285
- return analysis_step.rc
286
-
287
-
288
258
  def main():
289
- """Performs the analysis steps to convert raw data into DL2 files."""
259
+ """Performs the analysis steps to convert raw data into DL1b files."""
290
260
  (
291
261
  calibration_file,
292
262
  drs4_ped_file,
@@ -296,6 +266,8 @@ def main():
296
266
  run_summary_file,
297
267
  pedestal_ids_file,
298
268
  run_number,
269
+ dl1b_config,
270
+ dl1_prod_id,
299
271
  ) = data_sequence_cli_parsing()
300
272
 
301
273
  if options.verbose:
@@ -313,6 +285,8 @@ def main():
313
285
  run_summary_file,
314
286
  pedestal_ids_file,
315
287
  run_number,
288
+ dl1b_config,
289
+ dl1_prod_id,
316
290
  )
317
291
  sys.exit(rc)
318
292
 
@@ -276,11 +276,10 @@ def apply_gain_selection(date: datetime, start: int, end: int, tool: str = None,
276
276
  data_runs = summary_table[summary_table["run_type"] == "DATA"]
277
277
  log.info(f"Found {len(data_runs)} DATA runs to which apply the gain selection")
278
278
 
279
- base_dir = Path(cfg.get("LST1", "BASE"))
280
279
  date_str = date_to_dir(date)
281
- r0_dir = base_dir / "R0" / date_str
282
- output_dir = base_dir / f"R0G/{date_str}"
283
- log_dir = base_dir / f"R0G/log/{date_str}"
280
+ r0_dir = Path(cfg.get("LST1", "RAW_R0_DIR")) / date_str
281
+ output_dir = Path(cfg.get("LST1", "R0_DIR")) / date_str
282
+ log_dir = Path(cfg.get("LST1", "R0_DIR")) / f"log/{date_str}"
284
283
  if not simulate:
285
284
  output_dir.mkdir(parents=True, exist_ok=True)
286
285
  log_dir.mkdir(parents=True, exist_ok=True)
@@ -356,9 +355,10 @@ def update_history_file(run_id: str, subrun: str, log_dir: Path, history_file: P
356
355
 
357
356
  def is_run_already_copied(date: datetime, run_id: int) -> bool:
358
357
  """Check if the R0 files of a given run have already been copied to the R0G directory."""
359
- base_dir = Path(cfg.get("LST1", "BASE"))
360
- r0_files = glob.glob(f"{base_dir}/R0/{date_to_dir(date)}/LST-1.?.Run{run_id:05d}.????.fits.fz")
361
- r0g_files = glob.glob(f"{base_dir}/R0G/{date_to_dir(date)}/LST-1.?.Run{run_id:05d}.????.fits.fz")
358
+ r0_dir = Path(cfg.get("LST1", "RAW_R0_DIR"))
359
+ r0g_dir = Path(cfg.get("LST1", "R0_DIR"))
360
+ r0_files = glob.glob(f"{r0_dir}/{date_to_dir(date)}/LST-1.?.Run{run_id:05d}.????.fits.fz")
361
+ r0g_files = glob.glob(f"{r0g_dir}/{date_to_dir(date)}/LST-1.?.Run{run_id:05d}.????.fits.fz")
362
362
  return len(r0_files)==len(r0g_files)
363
363
 
364
364
 
@@ -465,9 +465,10 @@ def check_failed_jobs(date: datetime):
465
465
  missing_runs = []
466
466
 
467
467
  date_str = date_to_dir(date)
468
- base_dir = Path(cfg.get("LST1", "BASE"))
469
- r0_files = glob.glob(f"{base_dir}/R0/{date_str}/LST-1.?.Run?????.????.fits.fz")
470
- r0g_files = glob.glob(f"{base_dir}/R0G/{date_str}/LST-1.?.Run?????.????.fits.fz")
468
+ r0_dir = Path(cfg.get("LST1", "RAW_R0_DIR")) / date_str
469
+ r0g_dir = Path(cfg.get("LST1", "R0_DIR")) / date_str
470
+ r0_files = glob.glob(f"{r0_dir}/LST-1.?.Run?????.????.fits.fz")
471
+ r0g_files = glob.glob(f"{r0g_dir}/LST-1.?.Run?????.????.fits.fz")
471
472
  all_r0_runs = [parse_r0_filename(i).run for i in r0_files]
472
473
  all_r0g_runs = [parse_r0_filename(i).run for i in r0g_files]
473
474
 
@@ -478,17 +479,15 @@ def check_failed_jobs(date: datetime):
478
479
 
479
480
  missing_runs.sort()
480
481
  if missing_runs:
481
- output_dir = base_dir / f"R0G/{date_str}/"
482
482
  log.info(
483
483
  f"Some runs are missing. Copying R0 files of runs {pd.Series(missing_runs).unique()} "
484
- f"directly to {output_dir}"
484
+ f"directly to {r0g_dir}"
485
485
  )
486
486
 
487
487
  for run in missing_runs:
488
-
489
- files = base_dir.glob(f"R0/{date_str}/LST-1.?.Run{run:05d}.????.fits.fz")
488
+ files = r0_dir.glob(f"LST-1.?.Run{run:05d}.????.fits.fz")
490
489
  for file in files:
491
- sp.run(["cp", file, output_dir])
490
+ sp.run(["cp", file, r0g_dir])
492
491
 
493
492
  GainSel_dir = Path(cfg.get("LST1", "GAIN_SELECTION_FLAG_DIR"))
494
493
  flagfile_dir = GainSel_dir / date_str