lstosa 0.10.6__py3-none-any.whl → 0.10.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lstosa
3
- Version: 0.10.6
3
+ Version: 0.10.7
4
4
  Summary: Onsite analysis pipeline for the CTA LST-1
5
5
  Author: María Láinez, José Enrique Ruiz, Lab Saha, Andrés Baquero, José Luis Contreras, Maximilian Linhoff
6
6
  Author-email: Daniel Morcuende <dmorcuen@ucm.es>
@@ -20,8 +20,8 @@ Requires-Python: >=3.9
20
20
  Description-Content-Type: text/markdown
21
21
  License-File: LICENSE
22
22
  Requires-Dist: astropy ~=5.0
23
- Requires-Dist: lstchain ~=0.10.0
24
- Requires-Dist: matplotlib ~=3.7
23
+ Requires-Dist: lstchain >=0.10.5
24
+ Requires-Dist: matplotlib
25
25
  Requires-Dist: numpy
26
26
  Requires-Dist: pandas
27
27
  Requires-Dist: pyyaml
@@ -1,5 +1,5 @@
1
1
  osa/__init__.py,sha256=crotf1NMTfNdZuCua_5T_jk3kvZrAAwVw4FPrfxv994,193
2
- osa/_version.py,sha256=0ek8_w_jF04UB6FpW1K7d1RgH0zQMd77H5TbKzdthjI,413
2
+ osa/_version.py,sha256=Tqz9jfXJ9oRFN5_F1Y4Bx09mLTPY4KYQNRrHQQNprBE,413
3
3
  osa/conftest.py,sha256=b_26FciV8NZAdHj2S10E8cE4bettinLJY83Roe1a5L4,19358
4
4
  osa/job.py,sha256=acdiaRlHsc2GLyz2Oz_RBiawJo9QKK-tsajYdM7X0_Q,25393
5
5
  osa/osadb.py,sha256=pkCuYbEG-moHG0uQHxwB7giQAv2XTld4HJ5gdn1F1hA,2422
@@ -39,8 +39,8 @@ osa/scripts/calibration_pipeline.py,sha256=g9o1chqCRRSo7GNuQZRABjGnJYjZyfhkPgRLP
39
39
  osa/scripts/closer.py,sha256=eZLUlqot4EXwL9IlU21tKRZ2GvZ4i8ill0c2QdEkoDY,15219
40
40
  osa/scripts/copy_datacheck.py,sha256=tfDs6oTdPbii4BOXp6bTHuED0xNJeqaPFrv6Ed7ZnWc,3104
41
41
  osa/scripts/datasequence.py,sha256=gXAp8arbLPEK-sca9VnME6-2XfUzBFIoEFchlUZYrXI,9260
42
- osa/scripts/gain_selection.py,sha256=BmNjtfeCFw5OGymaLG1PFPDcn8-yDOG6fWaifOCOHLM,8095
43
- osa/scripts/provprocess.py,sha256=pKR3wf3krAGmeHen_au2RmqJJtNBcX29J3XPQYKrHRE,18536
42
+ osa/scripts/gain_selection.py,sha256=yaz2fKicuCDgLbD3fhb3l9LBlXLEEO3yuZO-oDc2IPo,8084
43
+ osa/scripts/provprocess.py,sha256=mufkZe6_qwH3DGqTFxINIc01hciF5RMpw3n_Mp7vwXU,18629
44
44
  osa/scripts/reprocess_longterm.py,sha256=wMfc3UVwickkGFiviIhOlB9ebMIqQPWoUrgg8hQ78Lg,2138
45
45
  osa/scripts/reprocessing.py,sha256=D-J8Rl3GrkWpxYkk6ci79oJOMewgGdxLkQgaHCAZuqs,3417
46
46
  osa/scripts/sequencer.py,sha256=6Cg-eIExk0eN8-HXkO6DBeFEWOI-FznDueqJEpbEzos,8163
@@ -48,9 +48,9 @@ osa/scripts/sequencer_webmaker.py,sha256=99P1rJiBlvNidDeppvtw4kOoAfsOxSMJ2sm4xlg
48
48
  osa/scripts/show_run_summary.py,sha256=UpdTDRfncdUBKhPCy3reCKSk_saOsTLeMdyZHUV300Q,5197
49
49
  osa/scripts/show_run_summary_tcu.py,sha256=SoDLVKdQHOJkfenFguBOfXf10Gyv7heXSQAFnDVZqMs,2468
50
50
  osa/scripts/simulate_processing.py,sha256=NiRVYiwZENt_mnKncytgJT23_-tJMb1B5PswM12nnX4,6941
51
- osa/scripts/update_source_catalog.py,sha256=lJXYWI92YB0SrafsND4do0kjPOV_mP2MzuyMfBJbY-0,6265
51
+ osa/scripts/update_source_catalog.py,sha256=GHwWFc-y6S4KkUJxUVM5drdAnVDD0-n3D-Tv3CCmh4E,7218
52
52
  osa/scripts/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
- osa/scripts/tests/test_osa_scripts.py,sha256=UiUTXw5uK2iMZt7thRS1UfqoAY5b9s8k8TB143BnXcw,12587
53
+ osa/scripts/tests/test_osa_scripts.py,sha256=xgBic0sW-eao2MSgYgnP-MB6QbmDK6IIY69m0SGdzaA,12588
54
54
  osa/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
55
  osa/tests/test_jobs.py,sha256=F0jsHZ9BYB_cCHHxlXyO9v1E5_-mBJhuFtshtsAtnXo,15260
56
56
  osa/tests/test_osa.py,sha256=QCOsjUgPuNMHoef3Ym2sDXVjun2LaBrfKyroAIH-os8,415
@@ -75,9 +75,9 @@ osa/workflow/dl3.py,sha256=kz7L5jcKHFJ--UdQ8HQKLzWO6nxc2LLOTz42ExcqzTk,9921
75
75
  osa/workflow/stages.py,sha256=WYgUM2XDIaUjCc4_Zs_VSGW6gk73EaKcHk6ZMnPds74,6692
76
76
  osa/workflow/tests/test_dl3.py,sha256=aY5bb-8OcZGAXG3JPCZihChzkA_GsWjRIa31BHZn3Dg,299
77
77
  osa/workflow/tests/test_stages.py,sha256=TmC00XFACWZp740TQeFaokWi3C50ovj_XGiySWrrdZk,3944
78
- lstosa-0.10.6.dist-info/LICENSE,sha256=h6iWot11EtMvaDaS_AvCHKLTNByO5wEbMyNj1c90y1c,1519
79
- lstosa-0.10.6.dist-info/METADATA,sha256=Z1GxQGKizr5McPZEq-mhPLuYRmNjzLV8teV87W62kdA,7354
80
- lstosa-0.10.6.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
81
- lstosa-0.10.6.dist-info/entry_points.txt,sha256=e5x7xddaqZhfdZPsErhHInqR4UGHsxXIlylEbTie0_8,928
82
- lstosa-0.10.6.dist-info/top_level.txt,sha256=_Tj8zVHdrOoWZuuWTHbDpNofxW0imUmKdlXhnxsXJek,4
83
- lstosa-0.10.6.dist-info/RECORD,,
78
+ lstosa-0.10.7.dist-info/LICENSE,sha256=h6iWot11EtMvaDaS_AvCHKLTNByO5wEbMyNj1c90y1c,1519
79
+ lstosa-0.10.7.dist-info/METADATA,sha256=v28ofFiZTcCliCrNe6aGtQ-C9GxvHlJm__FfAdKTKfQ,7348
80
+ lstosa-0.10.7.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
81
+ lstosa-0.10.7.dist-info/entry_points.txt,sha256=e5x7xddaqZhfdZPsErhHInqR4UGHsxXIlylEbTie0_8,928
82
+ lstosa-0.10.7.dist-info/top_level.txt,sha256=_Tj8zVHdrOoWZuuWTHbDpNofxW0imUmKdlXhnxsXJek,4
83
+ lstosa-0.10.7.dist-info/RECORD,,
osa/_version.py CHANGED
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '0.10.6'
16
- __version_tuple__ = version_tuple = (0, 10, 6)
15
+ __version__ = version = '0.10.7'
16
+ __version_tuple__ = version_tuple = (0, 10, 7)
@@ -20,7 +20,7 @@ from osa.job import get_sacct_output, FORMAT_SLURM
20
20
 
21
21
  log = myLogger(logging.getLogger(__name__))
22
22
 
23
- PATH = "PATH=/fefs/aswg/software/gain_selection/bin:$PATH"
23
+ PATH = "PATH=/fefs/aswg/software/offline_dvr/bin:$PATH"
24
24
 
25
25
 
26
26
  def get_sbatch_script(
@@ -36,7 +36,7 @@ def get_sbatch_script(
36
36
  #SBATCH --job-name "gain_selection_{run_id:05d}"
37
37
  #SBATCH --export {PATH}
38
38
 
39
- lst_select_gain {input_file} {output_dir} {ref_time} {ref_counter} {module} {ref_source}
39
+ lst_dvr {input_file} {output_dir} {ref_time} {ref_counter} {module} {ref_source}
40
40
  """
41
41
  )
42
42
 
@@ -81,8 +81,8 @@ def parse_lines_log(filter_cut, calib_runs, run_number):
81
81
  filter_cut = "all"
82
82
  cuts = {
83
83
  "calibration": ["drs4_pedestal", "calibrate_charge"],
84
- "r0_to_dl1": ["r0_to_dl1", "dl1ab"],
85
- "dl1_to_dl2": ["dl1_datacheck", "dl1_to_dl2"],
84
+ "r0_to_dl1": ["r0_to_dl1", "dl1ab", "dl1_datacheck"],
85
+ "dl1_to_dl2": ["dl1_to_dl2"],
86
86
  }
87
87
  cuts["all"] = cuts["calibration"] + cuts["r0_to_dl1"] + cuts["dl1_to_dl2"]
88
88
 
@@ -413,29 +413,31 @@ def produce_provenance(session_log_filename, base_filename):
413
413
  read_prov(filename=session_log_filename),
414
414
  str(paths_r0_dl1["out_path"]),
415
415
  )
416
- dl1_lines = plines_r0 + plines_ab[1:]
416
+ plines_check = parse_lines_run(
417
+ "dl1_datacheck",
418
+ read_prov(filename=session_log_filename),
419
+ str(paths_r0_dl1["out_path"]),
420
+ )
421
+ dl1_lines = plines_r0 + plines_ab[1:] + plines_check[1:]
417
422
 
418
423
  # create r0_to_dl1 prov files only if filtering
419
424
  if options.filter == "r0_to_dl1":
420
- produce_provenance_files(plines_r0 + plines_ab[1:], paths_r0_dl1)
425
+ produce_provenance_files(plines_r0 + plines_ab[1:] + plines_check[1:], paths_r0_dl1)
421
426
 
422
427
  if options.filter == "dl1_to_dl2" or not options.filter:
423
- paths_dl1_dl2 = define_paths("dl1_to_dl2", PATH_DL2, options.dl2_prod_id, base_filename)
424
- plines_check = parse_lines_run(
425
- "dl1_datacheck",
426
- read_prov(filename=session_log_filename),
427
- str(paths_dl1_dl2["out_path"]),
428
- )
429
- plines_dl2 = parse_lines_run(
430
- "dl1_to_dl2",
431
- read_prov(filename=session_log_filename),
432
- str(paths_dl1_dl2["out_path"]),
433
- )
434
- dl1_dl2_lines = plines_check + plines_dl2[1:]
428
+ if not options.no_dl2:
429
+ paths_dl1_dl2 = define_paths("dl1_to_dl2", PATH_DL2, options.dl2_prod_id, base_filename)
430
+ plines_dl2 = parse_lines_run(
431
+ "dl1_to_dl2",
432
+ read_prov(filename=session_log_filename),
433
+ str(paths_dl1_dl2["out_path"]),
434
+ )
435
+ dl1_dl2_lines = plines_dl2
435
436
 
436
437
  # create dl1_to_dl2 prov files only if filtering
437
438
  if options.filter == "dl1_to_dl2":
438
- produce_provenance_files(plines_check + plines_dl2[1:], paths_dl1_dl2)
439
+ if not options.no_dl2:
440
+ produce_provenance_files(plines_dl2, paths_dl1_dl2)
439
441
 
440
442
  # create calibration_to_dl1 and calibration_to_dl2 prov files
441
443
  if not options.filter:
@@ -99,10 +99,10 @@ def test_simulate_processing(
99
99
 
100
100
  with open(json_file_dl1) as file:
101
101
  dl1 = yaml.safe_load(file)
102
- assert len(dl1["entity"]) == 16
103
- assert len(dl1["activity"]) == 4
104
- assert len(dl1["used"]) == 13
105
- assert len(dl1["wasGeneratedBy"]) == 7
102
+ assert len(dl1["entity"]) == 19
103
+ assert len(dl1["activity"]) == 5
104
+ assert len(dl1["used"]) == 15
105
+ assert len(dl1["wasGeneratedBy"]) == 10
106
106
 
107
107
  with open(json_file_dl2) as file:
108
108
  dl2 = yaml.safe_load(file)
@@ -5,30 +5,30 @@ from pathlib import Path
5
5
  from textwrap import dedent
6
6
 
7
7
  import click
8
- import re
9
8
  import pandas as pd
10
9
  from astropy import units as u
11
- from astropy.table import Table, join, vstack, unique
10
+ from astropy.table import Table, join, unique, vstack
12
11
  from astropy.time import Time
13
12
  from lstchain.io.io import dl1_params_lstcam_key
14
- from lstchain.reco.utils import get_effective_time, add_delta_t_key
13
+ from lstchain.reco.utils import add_delta_t_key, get_effective_time
14
+
15
+ from osa.paths import get_major_version
15
16
  from osa.utils.utils import get_lstchain_version
16
17
 
17
- pd.set_option('display.float_format', '{:.1f}'.format)
18
+ pd.set_option("display.float_format", "{:.1f}".format)
18
19
 
19
- logging.basicConfig(
20
- level=logging.INFO,
21
- format='%(asctime)s:%(levelname)s:%(message)s'
22
- )
20
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s:%(levelname)s:%(message)s")
23
21
  log = logging.getLogger(__name__)
24
22
 
25
23
 
26
24
  BASE_DL1 = Path("/fefs/aswg/data/real/DL1")
27
25
  BASE_MONITORING = Path("/fefs/aswg/data/real/monitoring")
26
+ CATALOG_DIR = Path("/fefs/aswg/data/real/OSA/Catalog")
28
27
 
29
28
 
30
29
  def add_table_to_html(html_table):
31
- return dedent(f"""\
30
+ return dedent(
31
+ f"""\
32
32
  <html>
33
33
  <head>
34
34
  <link href="osa.css" rel="stylesheet" type="text/css">
@@ -37,11 +37,13 @@ def add_table_to_html(html_table):
37
37
  {html_table}
38
38
  </body>
39
39
  </html>
40
- """)
40
+ """
41
+ )
41
42
 
42
43
 
43
44
  def add_query_table_to_html(html_table):
44
- return dedent(f"""\
45
+ return dedent(
46
+ f"""\
45
47
  <html>
46
48
  <head>
47
49
  <meta http-equiv="Content-type" content="text/html; charset=utf-8">
@@ -96,87 +98,111 @@ def add_query_table_to_html(html_table):
96
98
  {html_table}
97
99
  </body>
98
100
  </html>
99
- """)
101
+ """
102
+ )
100
103
 
101
104
 
102
- def add_run_start_iso(table):
103
- start_times = []
104
- for timestamp in table["run_start"]:
105
- start_time = Time(timestamp * u.ns, format="unix_tai")
106
- start_times.append(start_time.utc.iso)
107
- table.replace_column("run_start", start_times)
105
+ def add_start_and_elapsed(table: Table, datedir: str, version: str) -> None:
106
+ """Add columns with the timestamp of first events and elapsed time of the runs.
108
107
 
108
+ This information is taken from the merged DL1 files. Two new columns are added
109
+ to the input table.
110
+
111
+ Parameters
112
+ ----------
113
+ table : astropy.table.Table
114
+ Astropy Table to which the two new columns are to be added.
115
+ datedir : str
116
+ Date directory in YYYYMMDD format.
117
+ version : str
118
+ Production version of the processing in the format 'vW.X.Y.Z'.
119
+ """
120
+ if "run_id" not in table.columns:
121
+ raise KeyError("Run ID not present in given table. Please check its content.")
109
122
 
110
- def add_elapsed(table, datedir, version):
123
+ start_times = []
111
124
  elapsed_times = []
125
+
112
126
  for run in table["run_id"]:
113
- major_version = re.search(r'\D\d+\.\d+', version)[0]
127
+ major_version = get_major_version(version)
114
128
  file = BASE_DL1 / datedir / major_version / f"tailcut84/dl1_LST-1.Run{run:05d}.h5"
115
129
  df = pd.read_hdf(file, key=dl1_params_lstcam_key)
130
+
131
+ # Timestamp of the first event
132
+ first_time = Time(df["dragon_time"][0], format="unix", scale="utc")
133
+ start_times.append(first_time.utc.iso)
134
+
135
+ # Elapsed time of the run
116
136
  df_delta = add_delta_t_key(df)
117
137
  _, elapsed_t = get_effective_time(df_delta)
118
-
119
138
  elapsed_times.append(elapsed_t.to(u.min))
120
139
 
140
+ # Modify the input table by adding two new columns
121
141
  table.add_column(elapsed_times, name="Elapsed [min]")
142
+ table.add_column(start_times, name="Run start [UTC]")
122
143
 
123
144
 
124
145
  def copy_to_webserver(html_file, csv_file):
125
- sp.run(["scp", str(html_file), "datacheck:/home/www/html/datacheck/lstosa/."])
126
- sp.run(["scp", str(csv_file), "datacheck:/home/www/html/datacheck/lstosa/."])
146
+ sp.run(["scp", str(html_file), "datacheck:/home/www/html/datacheck/lstosa/."], check=True)
147
+ sp.run(["scp", str(csv_file), "datacheck:/home/www/html/datacheck/lstosa/."], check=True)
127
148
 
128
149
 
129
150
  @click.command()
130
- @click.argument(
131
- 'date',
132
- type=click.DateTime(formats=["%Y-%m-%d"])
133
- )
151
+ @click.argument("date", type=click.DateTime(formats=["%Y-%m-%d"]))
134
152
  @click.option("-v", "--version", type=str, default=get_lstchain_version())
135
153
  def main(date: datetime = None, version: str = get_lstchain_version()):
154
+ """Update source catalog with new run entries from a given date in format YYYY-MM-DD.
155
+
156
+ Notes
157
+ -----
158
+ It needs to be run as lstanalyzer user.
136
159
  """
137
- Update source catalog with new run entries from a given date in
138
- format YYYY-MM-DD. It needs to be run as lstanalyzer user.
139
- """
140
- csv_file = Path("/fefs/aswg/data/real/OSA/Catalog/LST_source_catalog.ecsv")
141
- table = Table.read(csv_file)
160
+ catalog_path = CATALOG_DIR / "LST_source_catalog.ecsv"
161
+ catalog_table = Table.read(catalog_path)
142
162
 
143
- # Open today's table and append its content to general table
163
+ # Open table for given date and append its content to the table with entire catalog
144
164
  datedir = date.strftime("%Y%m%d")
145
165
  today_catalog = Table.read(BASE_MONITORING / f"RunCatalog/RunCatalog_{datedir}.ecsv")
146
166
  today_runsummary = Table.read(BASE_MONITORING / f"RunSummary/RunSummary_{datedir}.ecsv")
167
+ # Keep only astronomical data runs
147
168
  today_runsummary = today_runsummary[today_runsummary["run_type"] == "DATA"]
148
- todays_join = join(today_runsummary, today_catalog)
149
- todays_join.add_column(date.strftime("%Y-%m-%d"), name="date_dir")
150
- todays_join.keep_columns(["run_id", "run_start", "source_name", "date_dir"])
169
+ todays_info = join(today_runsummary, today_catalog)
170
+ todays_info.add_column(date.strftime("%Y-%m-%d"), name="date_dir")
171
+ todays_info.keep_columns(["run_id", "source_name", "date_dir"])
172
+
151
173
  # Add start of run in iso format and elapsed time for each run
152
174
  log.info("Getting run start and elapsed time")
153
- add_run_start_iso(todays_join)
154
- add_elapsed(todays_join, datedir, version)
155
- # Change col names
156
- todays_join.rename_column('run_id', 'Run ID')
157
- todays_join.rename_column('run_start', 'Run start [UTC]')
158
- todays_join.rename_column('source_name', 'Source name')
159
- todays_join.rename_column('date_dir', 'Date directory')
160
-
161
- # Add new rows
175
+ add_start_and_elapsed(todays_info, datedir, version)
176
+
177
+ # Change column names
178
+ todays_info.rename_column("run_id", "Run ID")
179
+ todays_info.rename_column("source_name", "Source name")
180
+ todays_info.rename_column("date_dir", "Date directory")
181
+
182
+ # Add new rows from given date to the whole catalog table
162
183
  log.info("Adding new rows to table")
163
- new_table = vstack([table, todays_join])
164
- table_unique = unique(new_table, keys="Run ID", keep='last')
184
+ new_table = vstack([catalog_table, todays_info])
185
+ table_unique = unique(new_table, keys="Run ID", keep="last")
165
186
 
166
- # To pandas and HTML
187
+ # To pandas
167
188
  log.info("Converting to pandas and HTML")
168
189
  df = table_unique.to_pandas()
169
190
  df = df.sort_values(by="Run ID", ascending=False)
191
+
192
+ # To HTML
170
193
  html_table = df.to_html(index=False, justify="left")
171
194
  html_table = html_table.replace(
172
195
  '<table border="1" class="dataframe">',
173
- '<table class="display compact" id="table139855676982704">')
196
+ '<table class="display compact" id="table139855676982704">',
197
+ )
174
198
  html_content = add_query_table_to_html(html_table)
175
- html_file = Path("LST_source_catalog.html")
199
+
200
+ # Save the HTML and ECSV files and copy them to the LST-1 webserver
201
+ html_file = CATALOG_DIR / "LST_source_catalog.html"
176
202
  html_file.write_text(html_content)
177
- table_unique.write(csv_file, delimiter=",", overwrite=True)
203
+ table_unique.write(catalog_path, delimiter=",", overwrite=True)
178
204
 
179
- copy_to_webserver(html_file, csv_file)
205
+ copy_to_webserver(html_file, catalog_path)
180
206
 
181
207
 
182
208
  if __name__ == "__main__":