lstosa 0.11.5__py3-none-any.whl → 0.11.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lstosa
3
- Version: 0.11.5
3
+ Version: 0.11.7
4
4
  Summary: Onsite analysis pipeline for the CTA LST-1
5
5
  Author: María Láinez, José Enrique Ruiz, Lab Saha, Andrés Baquero, José Luis Contreras, Maximilian Linhoff
6
6
  Author-email: Daniel Morcuende <dmorcuen@ucm.es>
@@ -1,9 +1,9 @@
1
1
  osa/__init__.py,sha256=crotf1NMTfNdZuCua_5T_jk3kvZrAAwVw4FPrfxv994,193
2
- osa/_version.py,sha256=ZyFxcHP5D9gYt6tCf5RRnkEsC8isbt9wg2nFTag5BOE,706
3
- osa/conftest.py,sha256=AdSHdWqjeMTgIeZEF8fdw6eSliySouivMWCPIqUV2os,24667
2
+ osa/_version.py,sha256=KU0yNPBwd_J_8Kv0fRgFC7mC3xqwbIvT7rTRw-5uF-A,706
3
+ osa/conftest.py,sha256=ziqbE03yLFiBw4bEQv-1jEYz0rNpTLwfsvxBYEri0TE,24608
4
4
  osa/job.py,sha256=CunwW7xA4mWEocS6KkDZ1K6h_LYh_ePZMyGHyCa6CKg,27863
5
5
  osa/osadb.py,sha256=pkCuYbEG-moHG0uQHxwB7giQAv2XTld4HJ5gdn1F1hA,2422
6
- osa/paths.py,sha256=B8-HA39YmThl2Dd9p3OFuYSYJf9MWg4Qdc0zmAhWjOE,18100
6
+ osa/paths.py,sha256=tgFXTMyDKVLMLuoXGYFcfeTmOcVcnZXLw1GnMOruvhc,19963
7
7
  osa/raw.py,sha256=ZNIsuqfx5ljoz_hwhSuafdKf-wr8-cxRJmel-A2endg,1337
8
8
  osa/report.py,sha256=sL2V7n8Y_UUaSDbWJY2o4UxDb4FU5AaFIRR8R25DB8o,4634
9
9
  osa/version.py,sha256=9T2TtuGBQeOy5PJDxMCeGlqx5baxLaq47VmFTDc09z8,796
@@ -12,17 +12,17 @@ osa/configs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  osa/configs/config.py,sha256=cX0Vr4sorBVQ2KRqPIRSEGENKH_uC5lrgVHx1hp6YTk,1148
13
13
  osa/configs/datamodel.py,sha256=L_WRM91PBlMrtuE30akh7YR-56P0g9D994qzKSfhNJc,1950
14
14
  osa/configs/options.py,sha256=WbsyKhOs1Ud4Yt21O8KcISUvJYmhipa8vl_pxD_TZf4,558
15
- osa/configs/sequencer.cfg,sha256=49m-umbLo9w4gCv1oEdvFqukCh1py2nJDduOu28b_Qc,5652
15
+ osa/configs/sequencer.cfg,sha256=CEqegb_Irbx0fM6KcQgbvoMXIAI1TyYTKjR8AToSr8k,5690
16
16
  osa/high_level/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  osa/high_level/selection_cuts.toml,sha256=ReSmcKtOPZY5JsZ9ExnxYdz7OrJEB8gghCbzHmeOyFg,128
18
18
  osa/high_level/significance.py,sha256=mMeG_y2wDnt0O2lSosYkSjaGZQl0V4GnuFrqKwSKSbE,9066
19
19
  osa/high_level/tests/test_significance.py,sha256=nTHgwnL2zkQJduJuWXC4J4N8DlAghOidQki7njZmpSI,572
20
20
  osa/nightsummary/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- osa/nightsummary/database.py,sha256=6-1Y0Rw67QQtp0ekoynX9CVpImfECEUO64sA3fw24og,4276
22
- osa/nightsummary/extract.py,sha256=vI_A_GrP6eQH8f2mr8OreUfrs5LwLNcH9KQLx_WikhQ,11498
21
+ osa/nightsummary/database.py,sha256=XI2eozRK49-yDWnmH6irxfFWhi0qh6W0j1RuWXvk-uA,3980
22
+ osa/nightsummary/extract.py,sha256=VJnQISpT-NR_W0rRUrfI1xb8PkQYSwa1qcFTntS5NPk,11390
23
23
  osa/nightsummary/nightsummary.py,sha256=rPEN_J-rJSgsoCR_ONaW4PB9vjJzZvHgw0a7sYOA7wE,2666
24
24
  osa/nightsummary/set_source_coordinates.py,sha256=e2UT_I_Epm8vte22TasIp28A3KRdcl4dgV4NjlIxwak,1579
25
- osa/nightsummary/tests/test_database.py,sha256=w5Fts-H0eTi1KPV6Of0V2ZOD849Ie14KRe4wFhc9Hh0,242
25
+ osa/nightsummary/tests/test_database.py,sha256=jbWOY1ADJhp949qYpeXMDTMDXn7uZQuiLcMXIUT4avY,128
26
26
  osa/nightsummary/tests/test_extract.py,sha256=SJf0ld33UlK1d2jKweCQWNpGY9ZrIU1vFUdX4slnJps,1877
27
27
  osa/nightsummary/tests/test_nightsummary.py,sha256=0bgGJtlXzwclO2ma9I3X315X0X1bDkjqCaYcp3fay1s,987
28
28
  osa/nightsummary/tests/test_source_coordinates.py,sha256=doyF2e93RnFScPbG79aUmtHLCdGg-5oW8D5qqNDzivw,942
@@ -36,13 +36,14 @@ osa/provenance/config/logger.yaml,sha256=hy_lH3DfbRFh2VM_iawI-c-3wE0cjTRHy465C2e
36
36
  osa/scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
37
  osa/scripts/autocloser.py,sha256=yjROBHDMbKHu0HAb0UW_3pZ-K9R20gQJTlMzKCA9dvU,14401
38
38
  osa/scripts/calibration_pipeline.py,sha256=oWX2L9WIofVHp5qYsX3YMfwoslfiUgnXtWA2EI8KuG4,6056
39
- osa/scripts/closer.py,sha256=vhoe9wkvPvMc5O_kXqAiBuWeqm_KrBpYzgb9zs9ZORw,21718
39
+ osa/scripts/closer.py,sha256=lvaNH2l_fO8PvzThtJ0SDcf9i05vnc-ZfKp4xtj4s2A,21722
40
40
  osa/scripts/copy_datacheck.py,sha256=rAJu5-5oJxspynjYqVWLDKnXTF7Ib9xtzSf3_DiIHsk,3212
41
41
  osa/scripts/datasequence.py,sha256=0EZYlkX7ouaD66Ia2a5SWKbL0Fhlk8p4knKh0TG-2Gw,8592
42
- osa/scripts/gain_selection.py,sha256=1-wUCKuwI0W7yjja951sTXTKsGBl5YunLYcT1TwhCMQ,23240
43
- osa/scripts/gainsel_webmaker.py,sha256=40_DX7RUmImX-31iqRTFPEUvdVXXPSKp0xMEJWuAu80,5006
42
+ osa/scripts/gain_selection.py,sha256=hgA1o7l8wi6SiGNiG7gQlR2kJ50c3_56ZtaN6DvZeBw,23296
43
+ osa/scripts/gainsel_webmaker.py,sha256=nCu4x7O3xnsSIOBSQt5j5J-fxlSrBKUlb2A8hI2dWec,5253
44
+ osa/scripts/interleaved_date.py,sha256=J5aKbPlUdx-dRhYE48SPgYv7B0AxTibUwXVuwyWY8FY,8019
44
45
  osa/scripts/provprocess.py,sha256=Zv8sHTOgGNuEzu1QPUF142VQbHyfOsrxO073-xA6KG8,19107
45
- osa/scripts/reprocess_longterm.py,sha256=wMfc3UVwickkGFiviIhOlB9ebMIqQPWoUrgg8hQ78Lg,2138
46
+ osa/scripts/reprocess_longterm.py,sha256=a8gc7iiBB16T2sxwYlQrqp5Kgda-WZcKsqYq7dYC6Fc,2137
46
47
  osa/scripts/reprocessing.py,sha256=IL30e2McFA0EGbzvkTXdzmKJGBTbafYlCA-PsafyzyY,4571
47
48
  osa/scripts/sequencer.py,sha256=hqkTXuYKvHHD6-s_wbzBulb2qpMF-pTLif0UOcIXu70,13150
48
49
  osa/scripts/sequencer_catB_tailcuts.py,sha256=zVBuCBiiIoO3HKHbJ0zqaDtuwxkxcziR4T5i-2WyDX8,10476
@@ -51,7 +52,7 @@ osa/scripts/show_run_summary.py,sha256=ofznLdoFHKQB-TELD6g5zMqD_TE9M9BuJR12zHvNd
51
52
  osa/scripts/simulate_processing.py,sha256=D3b0kjyUS6Lm7pkQNJ-tJzQMveTwoPabfBBW80RMqN0,6828
52
53
  osa/scripts/update_source_catalog.py,sha256=Po4KSBOQCAT2Do1DUu5wnKV48Dq_pONfvtD47hh8fYI,8277
53
54
  osa/scripts/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
- osa/scripts/tests/test_osa_scripts.py,sha256=HaKVW-E2Gs6vvJoozIeCAg8-sxyu9k5zwNLFDPpCVrs,15116
55
+ osa/scripts/tests/test_osa_scripts.py,sha256=tKcsjkV7y_YXPCqLir_gXuyAwrqLwoIJxeJ8IpobMZI,15098
55
56
  osa/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
56
57
  osa/tests/test_jobs.py,sha256=ozrMBjHujDhEMzEvJE7WynChqjbBTcORHsHmFXozeWk,16112
57
58
  osa/tests/test_osa.py,sha256=QCOsjUgPuNMHoef3Ym2sDXVjun2LaBrfKyroAIH-os8,415
@@ -70,15 +71,15 @@ osa/utils/utils.py,sha256=a--RvA0P2JHVkuyG8uBy1BSFINLi33Ek-rNZ9jg3Fs8,13514
70
71
  osa/utils/tests/test_iofile.py,sha256=e35_EqJerp-dEOrOqwXEUZCc5P_9llf2QfveltagfIk,399
71
72
  osa/utils/tests/test_utils.py,sha256=ybngpeUyzHgr96Gcx8r2g5oHFTo3aDQIwkC5aE9Ztic,2357
72
73
  osa/webserver/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
73
- osa/webserver/utils.py,sha256=ymB2wTzsFkPH4ebUcz_zK_zyolpnBzEbWGYwCvbaHf0,2155
74
+ osa/webserver/utils.py,sha256=NcxGQ3fDVuQvZ9d99E4LmIrY3KK49e2qJsTad6jjvGc,2135
74
75
  osa/workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
75
76
  osa/workflow/dl3.py,sha256=ZMXPrdJA0WOxDiHEW9sdM2vmYix8R3eSMTGc6o42yxg,9860
76
77
  osa/workflow/stages.py,sha256=ILg91VyNXcy--si7CpDa1UFRiugqIH6nKl10Ru2zZVc,7422
77
78
  osa/workflow/tests/test_dl3.py,sha256=81Vt4lNxNjdKi_ehzymqfFWFjncw7GuQcix9S0Yigaw,384
78
79
  osa/workflow/tests/test_stages.py,sha256=aslfOIjf-rvf3r9N7NtimaOKkVB6K1M3bidgHOzRkMs,3985
79
- lstosa-0.11.5.dist-info/LICENSE,sha256=h6iWot11EtMvaDaS_AvCHKLTNByO5wEbMyNj1c90y1c,1519
80
- lstosa-0.11.5.dist-info/METADATA,sha256=w662afh4R8FFG9Ln0iYtMWvvE8pCYXYcVj1rDezy_dA,7302
81
- lstosa-0.11.5.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
82
- lstosa-0.11.5.dist-info/entry_points.txt,sha256=CzDKpLjZZQm8jJBxOVpMR8Czpgg_Yh-k6IPETp30VZE,1048
83
- lstosa-0.11.5.dist-info/top_level.txt,sha256=_Tj8zVHdrOoWZuuWTHbDpNofxW0imUmKdlXhnxsXJek,4
84
- lstosa-0.11.5.dist-info/RECORD,,
80
+ lstosa-0.11.7.dist-info/LICENSE,sha256=h6iWot11EtMvaDaS_AvCHKLTNByO5wEbMyNj1c90y1c,1519
81
+ lstosa-0.11.7.dist-info/METADATA,sha256=NPpGT5dYeuo0baO6qjNuzqE0PCLzFdpdBa4OsqkREgk,7302
82
+ lstosa-0.11.7.dist-info/WHEEL,sha256=WnJ8fYhv8N4SYVK2lLYNI6N0kVATA7b0piVUNvqIIJE,91
83
+ lstosa-0.11.7.dist-info/entry_points.txt,sha256=CzDKpLjZZQm8jJBxOVpMR8Czpgg_Yh-k6IPETp30VZE,1048
84
+ lstosa-0.11.7.dist-info/top_level.txt,sha256=_Tj8zVHdrOoWZuuWTHbDpNofxW0imUmKdlXhnxsXJek,4
85
+ lstosa-0.11.7.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.3.2)
2
+ Generator: setuptools (75.3.3)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
osa/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.11.5'
32
- __version_tuple__ = version_tuple = (0, 11, 5)
31
+ __version__ = version = '0.11.7'
32
+ __version_tuple__ = version_tuple = (0, 11, 7)
33
33
 
34
34
  __commit_id__ = commit_id = None
osa/configs/sequencer.cfg CHANGED
@@ -26,11 +26,12 @@ DL1_DIR: %(BASE)s/DL1
26
26
  DL1AB_DIR: %(BASE)s/DL1
27
27
  DL2_DIR: %(BASE)s/DL2
28
28
  DL3_DIR: %(BASE)s/DL3
29
+ DATACHECK_DIR: %(DL1_DIR)s/datacheck_files
29
30
  RF_MODELS: %(BASE)s/models/AllSky
30
31
  OSA_DIR: %(BASE)s/OSA
31
32
  CLOSER_DIR: %(OSA_DIR)s/Closer
32
33
  HIGH_LEVEL_DIR: %(OSA_DIR)s/HighLevel
33
- LONGTERM_DIR: %(OSA_DIR)s/DL1DataCheck_LongTerm
34
+ LONGTERM_DIR: %(DATACHECK_DIR)s/night_wise
34
35
  MERGED_SUMMARY: %(OSA_DIR)s/Catalog/merged_RunSummary.ecsv
35
36
  SOURCE_CATALOG: %(OSA_DIR)s/Catalog
36
37
  SEQUENCER_WEB_DIR: %(OSA_DIR)s/SequencerWeb
osa/conftest.py CHANGED
@@ -511,15 +511,15 @@ def datacheck_dl1_files(base_test_dir):
511
511
 
512
512
 
513
513
  @pytest.fixture(scope="session")
514
- def longterm_dir(base_test_dir):
515
- directory = base_test_dir / "OSA" / "DL1DataCheck_LongTerm" / prod_id / date_to_dir(date)
514
+ def longterm_dir(datacheck_dir):
515
+ directory = datacheck_dir / prod_id / date_to_dir(date)
516
516
  directory.mkdir(parents=True, exist_ok=True)
517
517
  return directory
518
518
 
519
519
 
520
520
  @pytest.fixture(scope="session")
521
- def longterm_link_latest_dir(base_test_dir):
522
- directory = base_test_dir / "OSA" / "DL1DataCheck_LongTerm" / "night_wise" / "all"
521
+ def datacheck_dir(base_test_dir):
522
+ directory = base_test_dir / "DL1" / "datacheck_files" / "night_wise"
523
523
  directory.mkdir(parents=True, exist_ok=True)
524
524
  return directory
525
525
 
@@ -14,27 +14,21 @@ __all__ = ["query", "db_available", "get_run_info_from_TCU"]
14
14
 
15
15
  log = myLogger(logging.getLogger(__name__))
16
16
 
17
-
18
- CACO_DB = cfg.get("database", "caco_db")
19
17
  TCU_DB = cfg.get("database", "tcu_db")
20
18
 
21
-
22
19
  def db_available():
23
20
  """Check the connection to the TCU database."""
24
- caco_client = MongoClient(CACO_DB, serverSelectionTimeoutMS=3000)
25
21
  tcu_client = MongoClient(TCU_DB, serverSelectionTimeoutMS=3000)
26
22
  try:
27
- caco_client.server_info()
28
23
  tcu_client.server_info()
29
24
  except ConnectionFailure:
30
- log.warning("TCU or CaCo database not available. No source info will be added.")
25
+ log.warning("TCU database not available. No source info will be added.")
31
26
  return False
32
27
  else:
33
- log.debug("TCU and CaCo database are available. Source info will be added.")
28
+ log.debug("TCU database is available. Source info will be added.")
34
29
  return True
35
30
 
36
-
37
- def query(obs_id: int, property_name: str):
31
+ def query(obs_id: int):
38
32
  """
39
33
  Query the source name and coordinates from TCU database.
40
34
 
@@ -42,13 +36,11 @@ def query(obs_id: int, property_name: str):
42
36
  ----------
43
37
  obs_id : int
44
38
  Run number
45
- property_name : str
46
- Properties from drive information e.g. `DriveControl_SourceName`,
47
- `DriveControl_RA_Target`, `DriveControl_Dec_Target`
39
+
48
40
 
49
41
  Returns
50
42
  -------
51
- query_result : str or None
43
+ query_result : Dict
52
44
  Query result from database. It can be either the source name or its coordinates.
53
45
 
54
46
  Raises
@@ -60,47 +52,47 @@ def query(obs_id: int, property_name: str):
60
52
  if not isinstance(obs_id, int):
61
53
  obs_id = int(obs_id)
62
54
 
63
- caco_client = MongoClient(CACO_DB)
64
- tcu_client = MongoClient(TCU_DB)
65
-
66
- with caco_client, tcu_client:
67
- run_info = caco_client["CACO"]["RUN_INFORMATION"]
68
- run = run_info.find_one({"run_number": obs_id})
55
+ try:
56
+ tcu_client = MongoClient(TCU_DB, serverSelectionTimeoutMS=3000)
57
+ db = tcu_client["lst1_obs_summary"]
58
+ camera_col = db["camera"]
69
59
 
70
- try:
71
- start = datetime.fromisoformat(run["start_time"].replace("Z", ""))
72
- end = datetime.fromisoformat(run["stop_time"].replace("Z", ""))
73
- except TypeError:
74
- return None
60
+ run_info = camera_col.find_one({"run_number": obs_id})
75
61
 
76
- bridges_monitoring = tcu_client["bridgesmonitoring"]
77
- property_collection = bridges_monitoring["properties"]
78
- chunk_collection = bridges_monitoring["chunks"]
79
- descriptors = property_collection.find(
80
- {"property_name": property_name},
81
- )
62
+ if not run_info:
63
+ log.info(f"Run {obs_id} not found 'lst1_obs_summary.camera'")
64
+ else:
82
65
 
83
- entries = {"name": property_name, "time": [], "value": []}
66
+ tstart = run_info.get("tstart")
67
+ tstop = run_info.get("tstop")
68
+ run_type = run_info.get("kind")
84
69
 
85
- for descriptor in descriptors:
86
- query_property = {"pid": descriptor["_id"]}
70
+ tstart_iso = datetime.fromtimestamp(tstart).isoformat(sep=" ", timespec="seconds")
87
71
 
88
- if start is not None:
89
- query_property["begin"] = {"$gte": start}
72
+ log.info(f"Run {obs_id} ({run_type}) found.")
73
+ log.info(f"Time: {tstart_iso} (Timestamp: {tstart})")
90
74
 
91
- if end is not None:
92
- query_property["end"] = {"$lte": end}
75
+ telescope_col = db["telescope"]
76
+ query = {
77
+ "tstart": {"$lte": tstop},
78
+ "tstop": {"$gte": tstart}
79
+ }
93
80
 
94
- chunks = chunk_collection.find(query_property)
81
+ tel_doc = telescope_col.find_one(query, sort=[("tstart", -1)])
95
82
 
96
- for chunk in chunks:
97
- for value in chunk["values"]:
98
- entries["time"].append(value["t"])
99
- entries["value"].append(value["val"])
83
+ if tel_doc:
100
84
 
101
- source_name = entries["value"][0]
102
- return source_name if source_name != "" else None
85
+ config = tel_doc.get("data", {}).get("structure", [])[0]
86
+ target = config.get("target", {})
87
+ source_name = target.get("name", "Desconocido")
88
+ ra = target.get("source_ra", "N/A")
89
+ dec = target.get("source_dec", "N/A")
90
+ return {"source_name": source_name, "ra": ra, "dec": dec}
91
+ else:
92
+ log.info("\nNo information found for that time range in 'lst1_obs_summary.telescope'.")
103
93
 
94
+ except Exception as e:
95
+ log.info(f"ERROR: {e}")
104
96
 
105
97
  def get_run_info_from_TCU(run_id: int, tcu_server: str) -> Tuple:
106
98
  """
@@ -173,15 +173,16 @@ def extract_runs(summary_table):
173
173
  # Make sure we are looking at actual data runs. Avoid test runs.
174
174
  if run.run > 0 and run.type == "DATA":
175
175
  log.debug(f"Looking info in TCU DB for run {run.run}")
176
- run.source_name = database.query(
177
- obs_id=run.run, property_name="DriveControl_SourceName"
176
+
177
+ tcu_result = database.query(
178
+ obs_id=run.run
178
179
  )
179
- run.source_ra = database.query(
180
- obs_id=run.run, property_name="DriveControl_RA_Target"
181
- )
182
- run.source_dec = database.query(
183
- obs_id=run.run, property_name="DriveControl_Dec_Target"
184
- )
180
+
181
+ if tcu_result is not None:
182
+ run.source_name = tcu_result.get("source_name")
183
+ run.source_ra = tcu_result.get("ra")
184
+ run.source_dec = tcu_result.get("dec")
185
+
185
186
  # Store this source information (run_id, source_name, source_ra, source_dec)
186
187
  # into an astropy Table and save to disk in RunCatalog files. In this way, the
187
188
  # information can be dumped anytime later more easily than accessing the
@@ -1,9 +1,8 @@
1
- import pytest
2
- from pymongo.errors import ConnectionFailure
3
-
1
+ from osa.nightsummary import database
4
2
 
5
3
  def test_query():
6
- from osa.nightsummary import database
4
+ result = database.query(obs_id=20038)
5
+ assert result is None
6
+
7
+
7
8
 
8
- with pytest.raises(ConnectionFailure):
9
- database.query(obs_id=1616, property_name="DriveControl_SourceName")
osa/paths.py CHANGED
@@ -388,18 +388,66 @@ def is_job_completed(job_id: str):
388
388
  return False
389
389
 
390
390
 
391
- def create_longterm_symlink(cherenkov_job_id: str = None):
391
+ def create_longterm_symlink():
392
392
  """If the created longterm DL1 datacheck file corresponds to the latest
393
393
  version available, make symlink to it in the "all" common directory."""
394
- if not cherenkov_job_id or is_job_completed(cherenkov_job_id):
395
- nightdir = utils.date_to_dir(options.date)
396
- longterm_dir = Path(cfg.get("LST1", "LONGTERM_DIR"))
397
- linked_longterm_file = longterm_dir / f"night_wise/all/DL1_datacheck_{nightdir}.h5"
398
- all_longterm_files = longterm_dir.rglob(f"v*/{nightdir}/DL1_datacheck_{nightdir}.h5")
394
+
395
+ nightdir = utils.date_to_dir(options.date)
396
+ longterm_dir = Path(cfg.get("LST1", "LONGTERM_DIR"))
397
+ output_dir = Path(cfg.get("LST1", "DATACHECK_DIR"))
398
+
399
+ extensions = ["h5", "log", "html"]
400
+
401
+ for ext in extensions:
402
+ linked_longterm_file = output_dir / f"night_wise/DL1_datacheck_{nightdir}.{ext}"
403
+ all_longterm_files = longterm_dir.rglob(f"v*/{nightdir}/DL1_datacheck_{nightdir}.{ext}")
399
404
  latest_version_file = get_latest_version_file(all_longterm_files)
400
- log.info("Symlink the latest version longterm DL1 datacheck file in the common directory.")
401
405
  linked_longterm_file.unlink(missing_ok=True)
402
406
  linked_longterm_file.symlink_to(latest_version_file)
407
+
408
+
409
+ def create_runwise_datacheck_symlinks():
410
+ """Create symlinks of the run-wise datacheck files in the "datacheck" directory."""
411
+ nightdir = utils.date_to_dir(options.date)
412
+ dl1_dir = Path(cfg.get("LST1", "DL1_DIR")) / nightdir / options.prod_id
413
+ output_dir = Path(cfg.get("LST1", "DATACHECK_DIR")) / nightdir
414
+ output_dir.mkdir(parents=True, exist_ok=True)
415
+
416
+ patterns = [
417
+ "tailcut*/datacheck/datacheck_dl1_LST-1.Run?????.pdf",
418
+ "tailcut*/datacheck/datacheck_dl1_LST-1.Run?????.h5",
419
+ ]
420
+
421
+ for pattern in patterns:
422
+ for input_file in dl1_dir.rglob(pattern):
423
+ output_file = output_dir / input_file.name
424
+ if not output_file.is_symlink():
425
+ output_file.symlink_to(input_file.resolve())
426
+
427
+
428
+ def create_muons_symlinks():
429
+ """Create symlinks of the muon files in the "datacheck" directory."""
430
+ nightdir = utils.date_to_dir(options.date)
431
+ muons_dir = destination_dir("MUON", create_dir=False)
432
+ muons_file_list = muons_dir.rglob("muons_LST-1*.fits")
433
+ output_dir = Path(cfg.get("LST1", "DATACHECK_DIR")) / nightdir / "muons"
434
+ output_dir.mkdir(parents=True, exist_ok=True)
435
+
436
+ for input_file in muons_file_list:
437
+ output_file = output_dir / input_file.name
438
+ if not output_file.is_symlink():
439
+ print(f"input file exists: {input_file.exists()}")
440
+ output_file.symlink_to(input_file.resolve())
441
+
442
+
443
+ def create_datacheck_symlinks(cherenkov_job_id: str=None):
444
+ """Once all steps of autocloser have finished successfully, create symlinks of the run-wise
445
+ and night-wise datacheck files, and of the muon files in the "datacheck" directory."""
446
+ if not cherenkov_job_id or is_job_completed(cherenkov_job_id):
447
+ log.info("Creating symlinks of the datacheck and muon files in the common directory.")
448
+ create_longterm_symlink()
449
+ create_runwise_datacheck_symlinks()
450
+ create_muons_symlinks()
403
451
  else:
404
452
  log.warning(f"Job {cherenkov_job_id} (lstchain_cherenkov_transparency) did not finish successfully.")
405
453
 
osa/scripts/closer.py CHANGED
@@ -26,7 +26,7 @@ from osa.nightsummary.extract import extract_runs, extract_sequences
26
26
  from osa.nightsummary.nightsummary import run_summary_table
27
27
  from osa.paths import (
28
28
  destination_dir,
29
- create_longterm_symlink,
29
+ create_datacheck_symlinks,
30
30
  dl1_datacheck_longterm_file_exits
31
31
  )
32
32
  from osa.raw import is_raw_data_available
@@ -161,7 +161,7 @@ def post_process(seq_tuple):
161
161
 
162
162
  if dl1_datacheck_longterm_file_exits() and not options.test:
163
163
  if cfg.getboolean("lstchain", "create_longterm_symlink"):
164
- create_longterm_symlink()
164
+ create_datacheck_symlinks()
165
165
 
166
166
  else:
167
167
  # Close the sequences
@@ -187,7 +187,7 @@ def post_process(seq_tuple):
187
187
  longterm_job_id = daily_datacheck(daily_longterm_cmd(list_job_id))
188
188
  cherenkov_job_id = cherenkov_transparency(cherenkov_transparency_cmd(longterm_job_id))
189
189
  if cfg.getboolean("lstchain", "create_longterm_symlink"):
190
- create_longterm_symlink(cherenkov_job_id)
190
+ create_datacheck_symlinks(cherenkov_job_id)
191
191
 
192
192
  time.sleep(600)
193
193
 
@@ -584,7 +584,7 @@ def merge_muon_files(sequence_list):
584
584
  def daily_longterm_cmd(parent_job_ids: List[str]) -> List[str]:
585
585
  """Build the daily longterm command."""
586
586
  nightdir = date_to_dir(options.date)
587
- datacheck_dir = destination_dir("DATACHECK", create_dir=False, dl1_prod_id="tailcut84")
587
+ datacheck_dir = destination_dir("DATACHECK", create_dir=False, dl1_prod_id="tailcut*")
588
588
  muons_dir = destination_dir("MUON", create_dir=False)
589
589
  longterm_dir = Path(cfg.get("LST1", "LONGTERM_DIR")) / options.prod_id / nightdir
590
590
  longterm_output_file = longterm_dir / f"DL1_datacheck_{nightdir}.h5"
@@ -629,7 +629,7 @@ def daily_datacheck(cmd: List[str]):
629
629
  def cherenkov_transparency_cmd(longterm_job_id: str) -> List[str]:
630
630
  """Build the cherenkov transparency command."""
631
631
  nightdir = date_to_dir(options.date)
632
- datacheck_dir = destination_dir("DATACHECK", create_dir=False, dl1_prod_id="tailcut84")
632
+ datacheck_dir = destination_dir("DATACHECK", create_dir=False, dl1_prod_id="tailcut*")
633
633
  longterm_dir = Path(cfg.get("LST1", "LONGTERM_DIR")) / options.prod_id / nightdir
634
634
  longterm_datacheck_file = longterm_dir / f"DL1_datacheck_{nightdir}.h5"
635
635
  slurm_account = cfg.get("SLURM", "ACCOUNT")
@@ -334,21 +334,23 @@ def update_history_file(run_id: str, subrun: str, log_dir: Path, history_file: P
334
334
  log.debug(f"Cannot find a job_id for the run {run_id:05d}.{subrun:04d}")
335
335
  else:
336
336
  job_status = get_sacct_output(run_sacct(job_id=job_id))["State"]
337
+ now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
338
+
337
339
  if job_status.item() in ["RUNNING", "PENDING"]:
338
340
  log.info(f"Job {job_id} is still running.")
339
341
  return
340
-
342
+
341
343
  elif job_status.item() == "COMPLETED":
342
344
  log.debug(f"Job {job_id} finished successfully, updating history file.")
343
345
  string_to_write = (
344
- f"{run_id:05d}.{subrun:04d} gain_selection 0\n"
346
+ f"{now} | {run_id:05d}.{subrun:04d} gain_selection 0\n"
345
347
  )
346
348
  append_to_file(history_file, string_to_write)
347
-
349
+
348
350
  else:
349
351
  log.info(f"Job {job_id} failed, updating history file.")
350
352
  string_to_write = (
351
- f"{run_id:05d}.{subrun:04d} gain_selection 1\n"
353
+ f"{now} | {run_id:05d}.{subrun:04d} gain_selection 1\n"
352
354
  )
353
355
  append_to_file(history_file, string_to_write)
354
356
 
@@ -144,6 +144,10 @@ def main():
144
144
  content = "<p>No data found</p>"
145
145
  log.warning(f"No data found for date {date}, creating an empty HTML file.")
146
146
 
147
+ elif len(Table.read(run_summary_file)[Table.read(run_summary_file)["run_type"] == "DATA"]) == 0:
148
+ content = "<p>Only calibration events were taken</p>"
149
+ log.warning(f"No DATA runs for date {date}, creating an empty HTML file.")
150
+
147
151
  else:
148
152
  # Get the table with the gain selection check report in HTML format:
149
153
  table_gain_selection_jobs = check_failed_jobs(options.date)
@@ -0,0 +1,212 @@
1
+ """
2
+ Remove interleaved directories for data runs that have already been processed.
3
+
4
+ Given an input date (<YYYYMMDD>), the script searches for interleaved directories
5
+ within the last month. For each date in that period, it retrieves the runs taken
6
+ and identifies the corresponding observed sources.
7
+ It creates a shell file to remove all of the interleaved files identified
8
+ (those that do not correspond to Crab observations). Then, it have to be removed
9
+ using SLURM command sbatch.
10
+
11
+ At the moment, this shell files to be removed are saved in my workspace:
12
+ fefs/aswg/workspace/maria.rivero/remove_sh
13
+ """
14
+
15
+ import os
16
+ import sys
17
+ import csv
18
+ from datetime import datetime
19
+ from dateutil.relativedelta import relativedelta
20
+ import glob
21
+
22
+ # Data directories to look for links of interleaved directories
23
+ base_dirs = [
24
+ "/fefs/onsite/data/lst-pipe/LSTN-01/running_analysis",
25
+ "/fefs/aswg/data/real/running_analysis"
26
+ ]
27
+
28
+ # Directory of RunSummary files
29
+ summary_dir = "/fefs/aswg/data/real/monitoring/RunSummary"
30
+ backup_summary_dir = "/fefs/onsite/data/lst-pipe/LSTN-01/monitoring/RunSummary"
31
+
32
+ # Directories to check run's sources in RunCatalogs
33
+ catalog_dir = "/fefs/aswg/data/real/monitoring/RunCatalog"
34
+ backup_catalog_dir = "/fefs/onsite/data/lst-pipe/LSTN-01/monitoring/RunCatalog"
35
+
36
+
37
+ def find_interleaved(target_date_str):
38
+ """Look into base directories to find interleaved directories to be removed.
39
+ Args:
40
+ target_date_str (str): Last date to search within a month.
41
+ Returns:
42
+ interleaved_paths (list): Paths to interleaved directory.
43
+ interleaved_dates (list): Observation dates of each interleaved.
44
+ """
45
+
46
+ # Change date format (YYYYMMDD to python date)
47
+ try:
48
+ target_date = datetime.strptime(target_date_str, "%Y%m%d").date()
49
+ except ValueError:
50
+ print("Invalid format. Use YYYYMMDD")
51
+ sys.exit(1)
52
+
53
+ interleaved_paths = []
54
+ interleaved_dates = []
55
+ # Let's look into both dirs and check if both exist
56
+ for base_dir in base_dirs:
57
+ if not os.path.isdir(base_dir):
58
+ print(f"Path not found: {base_dir}")
59
+ continue
60
+ # Look in each date directory
61
+ for date_dir in sorted(os.listdir(base_dir)):
62
+ date_path = os.path.join(base_dir, date_dir) # given date path
63
+ if not os.path.isdir(date_path):
64
+ continue
65
+
66
+ if not (len(date_dir) == 8 and date_dir.isdigit()):
67
+ continue
68
+
69
+ try:
70
+ date_obj = datetime.strptime(date_dir, "%Y%m%d").date() #save it as date python object
71
+ except ValueError:
72
+ continue
73
+ # search only in the last month to the input date
74
+ if date_obj > target_date or date_obj < (target_date - relativedelta(months=1)):
75
+ continue
76
+
77
+ # look for interleaved directory (save path and date)
78
+ for root, dirs, _ in os.walk(date_path):
79
+ for d in dirs:
80
+ if d == "interleaved":
81
+ interleaved_paths.append(os.path.join(root, d))
82
+ interleaved_dates.append(date_dir)
83
+ return interleaved_paths, interleaved_dates
84
+
85
+ def info_dates(date, catalog_dir, runs_id):
86
+ """Given an observation date, it classifies runs in RunCatalog by their sources (Crab or not).
87
+ Args:
88
+ date (str): <YYYYMMDD> format.
89
+ catalog_dir (str): path to RunCatalog files.
90
+ runs_id (list): DATA runs taken from RunSummary
91
+ Returns:
92
+ entry (dict): stores runs by source (Crab or other) and saves other sources' names.
93
+ """
94
+
95
+ entry = {"crab": [], "other_source": [], "others_names": []}
96
+ filename = f"RunCatalog_{date}.ecsv"
97
+ catalog_file = os.path.join(catalog_dir,filename)
98
+ if not os.path.isfile(catalog_file):
99
+ print(f"File not found: {catalog_file}")
100
+ return entry
101
+
102
+ else:
103
+ with open(catalog_file, "r") as f:
104
+ reader = csv.reader(f)
105
+ for row in reader:
106
+ if not row or row[0].startswith("#") or row[0].startswith("run"):
107
+ continue # skip comment and header lines
108
+ if int(row[0]) in runs_id:
109
+ if "Crab" in row[1]:
110
+ entry["crab"].append(int(row[0]))
111
+ elif "crab" in row[1]:
112
+ entry["crab"].append(int(row[0]))
113
+ else:
114
+ entry["other_source"].append(int(row[0]))
115
+ entry["others_names"].append(row[1])
116
+ else:
117
+ continue
118
+ return(entry)
119
+
120
+
121
+ def summary_dates(date, summary_dir):
122
+ """Given an observation date, it stores run_ids and types.
123
+ Args:
124
+ date (str): <YYYYMMDD> format.
125
+ summary_dir (str): path to RunSummary files.
126
+ Returns:
127
+ entry (dict): stores run_id and run_type.
128
+ """
129
+
130
+ entry = {"run_id": [], "run_type": []}
131
+ filename = f"RunSummary_{date}.ecsv"
132
+ summary_file = os.path.join(summary_dir, filename)
133
+ if not os.path.isfile(summary_file):
134
+ print(f"File not found: {summary_file}")
135
+ return entry
136
+ else:
137
+ with open(summary_file, "r") as f:
138
+ reader = csv.reader(f)
139
+ for row in reader:
140
+ if not row or row[0].startswith("#"):
141
+ continue # skip comment lines
142
+ try:
143
+ entry["run_id"].append(int(row[0]))
144
+ entry["run_type"].append(row[2])
145
+
146
+ except ValueError:
147
+ continue
148
+ return(entry)
149
+
150
+ if __name__ == "__main__":
151
+ if len(sys.argv) < 2:
152
+ print("Use: python interleaved_date.py <YYYYMMDD>")
153
+ sys.exit(1)
154
+
155
+ date_arg = sys.argv[1]
156
+ found_paths, found_dates = find_interleaved(date_arg)
157
+ month = date_arg[:6]
158
+ recordfile = f'/fefs/aswg/workspace/maria.rivero/remove_sh/entries_rm{month}.sh'
159
+ with open(recordfile, 'w') as file:
160
+ file.write('#!/bin/bash \n')
161
+
162
+ dl1_paths = [
163
+ p.replace("/running_analysis/", "/DL1/")
164
+ for p in found_paths
165
+ ]
166
+ print('Interleaved path: ' , dl1_paths)
167
+
168
+ for path,link_path, date in zip(dl1_paths, found_paths, found_dates):
169
+ summary = summary_dates(date, summary_dir)
170
+ if not summary['run_id']:
171
+ summary = summary_dates(date, backup_summary_dir)
172
+ if not summary['run_id']:
173
+ continue
174
+
175
+ data_runs = [
176
+ run_id
177
+ for run_id, run_type in zip(summary["run_id"], summary["run_type"])
178
+ if run_type == "DATA"]
179
+
180
+ entry = info_dates(date, catalog_dir, data_runs)
181
+ if entry["crab"] == [] and entry["other_source"] == []:
182
+ entry = info_dates(date, backup_catalog_dir, data_runs)
183
+ if not entry["other_source"]:
184
+ continue
185
+
186
+ print('\n Dates with interleaved: ' , date)
187
+
188
+ print('RunSummary info:')
189
+ print(summary)
190
+ print('Run info (Crab or not):')
191
+ print(entry)
192
+
193
+ found_dataruns = sorted(entry["crab"] + entry["other_source"])
194
+ if not entry["crab"] and len(found_dataruns) == len(data_runs):
195
+ print(f"rm -r {path}")
196
+ with open(recordfile, 'a') as file:
197
+ file.write(f"rm -r {path} \n")
198
+ file.write(f"rm -r {link_path} \n")
199
+ else:
200
+ for runid in entry["other_source"]:
201
+ run_str = f"{runid:05d}" # run_ids must be always five digits
202
+ filename = f"interleaved_LST-1.Run{run_str}.*.h5"
203
+ filepath = os.path.join(path, filename)
204
+ link_filepath = os.path.join(link_path, filename)
205
+ matching_files = glob.glob(filepath) # check that exist files with run_id
206
+ if matching_files:
207
+ print(f"rm {filepath}")
208
+ with open(recordfile, 'a') as file:
209
+ file.write(f"rm {filepath} \n")
210
+ file.write(f"rm {link_filepath} \n")
211
+ else:
212
+ continue
@@ -30,7 +30,7 @@ def run_longterm(date: str, prod_id: str, new_prod_id: str, log_dir: Path):
30
30
  log_dir : Path
31
31
  Path to the directory where job logs will be stored.
32
32
  """
33
- dl1_dir = ANALYSIS_PATH / "DL1" / date / prod_id / "tailcut84" / "datacheck"
33
+ dl1_dir = ANALYSIS_PATH / "DL1" / date / prod_id / "tailcut*" / "datacheck"
34
34
  muons_dir = ANALYSIS_PATH / "DL1" / date / prod_id / "muons"
35
35
  new_longterm_dir = LONGTERM_PATH / new_prod_id / date
36
36
  longterm_output_file = new_longterm_dir / f"DL1_datacheck_{date}.h5"
@@ -198,7 +198,7 @@ def test_closer(
198
198
  systematic_correction_files,
199
199
  merged_run_summary,
200
200
  longterm_dir,
201
- longterm_link_latest_dir,
201
+ datacheck_dir,
202
202
  daily_datacheck_dl1_files,
203
203
  dl1b_config_files,
204
204
  tailcuts_log_files,
@@ -223,7 +223,7 @@ def test_closer(
223
223
  assert obs_file.exists()
224
224
  assert merged_run_summary.exists()
225
225
  assert longterm_dir.exists()
226
- assert longterm_link_latest_dir.exists()
226
+ assert datacheck_dir.exists()
227
227
  for check_file in daily_datacheck_dl1_files:
228
228
  assert check_file.exists()
229
229
  assert rf_models[2].exists()
@@ -389,8 +389,8 @@ def test_daily_longterm_cmd():
389
389
  "log/longterm_daily_%j.log",
390
390
  "--dependency=afterok:12345,54321",
391
391
  "lstchain_longterm_dl1_check",
392
- "--input-dir=test_osa/test_files0/DL1/20200117/v0.1.0/tailcut84/datacheck",
393
- "--output-file=test_osa/test_files0/OSA/DL1DataCheck_LongTerm/v0.1.0/20200117/DL1_datacheck_20200117.h5",
392
+ "--input-dir=test_osa/test_files0/DL1/20200117/v0.1.0/tailcut*/datacheck",
393
+ "--output-file=test_osa/test_files0/DL1/datacheck_files/night_wise/v0.1.0/20200117/DL1_datacheck_20200117.h5",
394
394
  "--muons-dir=test_osa/test_files0/DL1/20200117/v0.1.0/muons",
395
395
  "--batch",
396
396
  ]
osa/webserver/utils.py CHANGED
@@ -37,8 +37,8 @@ def directory_in_webserver(host: str, datacheck_type: str, date: str, prod_id: s
37
37
  DATACHECK_WEB_DIRS = {
38
38
  "PEDESTAL": f"drs4/{prod_id}/{date}",
39
39
  "CALIB": f"enf_calibration/{prod_id}/{date}",
40
- "DL1AB": f"dl1/{prod_id}/{date}/pdf",
41
- "LONGTERM": f"dl1/{prod_id}/{date}",
40
+ "DL1AB": f"dl1/{date}/pdf",
41
+ "LONGTERM": f"dl1/{date}",
42
42
  "HIGH_LEVEL": f"high_level/{prod_id}/{date}",
43
43
  }
44
44