roc-film 1.14.0__py3-none-any.whl → 1.14.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
roc/film/commands.py CHANGED
@@ -1142,24 +1142,6 @@ class CdfPostProCommand(Command):
1142
1142
  required=True,
1143
1143
  )
1144
1144
 
1145
- parser.add_argument(
1146
- "--rpw-obs-json",
1147
- help="""
1148
- List of RPW SoopKitchen export JSON files. Pattern can also be passed.
1149
- """,
1150
- type=str,
1151
- nargs="+",
1152
- )
1153
-
1154
- parser.add_argument(
1155
- "--rpw-ior-xml",
1156
- help="""
1157
- List of RPW IOR XML files. Pattern or ZIP archive can be also passed.
1158
- """,
1159
- type=str,
1160
- nargs="+",
1161
- )
1162
-
1163
1145
  parser.add_argument(
1164
1146
  "--options",
1165
1147
  help=f"""
roc/film/constants.py CHANGED
@@ -141,7 +141,6 @@ TIMEOUT = 14400
141
141
 
142
142
  # Allowed values for keyword --options in l1_post_pro command
143
143
  CDF_POST_PRO_OPTS_ARGS = [
144
- "soop_type",
145
144
  "obs_id",
146
145
  "resize_wf",
147
146
  "update_cdf",
roc/film/descriptor.json CHANGED
@@ -6,10 +6,10 @@
6
6
  "description": "RPW FILe Maker (FILM): Plugin to make RPW L0, L1 and HK data files"
7
7
  },
8
8
  "release": {
9
- "version": "1.14.0",
10
- "date": "2024-12-06",
9
+ "version": "1.14.2",
10
+ "date": "2025-01-30",
11
11
  "author": "Xavier Bonnin <xavier.bonnin@obspm.fr>, ROC Team <roc.support@sympa.obspm.fr>",
12
- "modification": "Update python dev deps and README",
12
+ "modification": "Update cdf_postpro",
13
13
  "url": "https://gitlab.obspm.fr/ROC/Pipelines/Plugins/FILM"
14
14
  },
15
15
  "tasks": [
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
- """Module to create the RPW L1 SBM1/SBM2 CDF files."""
4
+ """Module to perform some post-processing on the RPW CDF files."""
5
5
 
6
6
  import json
7
7
  import os
@@ -22,13 +22,11 @@ from poppy.core.target import FileTarget
22
22
 
23
23
  from roc.rpl.time import Time
24
24
  from roc.dingo.models.data import EventLog
25
+ from roc.dingo.models.packet import TcLog
25
26
  from roc.dingo.tools import query_db
26
27
  from roc.dingo.constants import PIPELINE_DATABASE
27
28
 
28
29
  from roc.film import (
29
- TIME_JSON_STRFORMAT,
30
- TIME_DOY1_STRFORMAT,
31
- TIME_DOY2_STRFORMAT,
32
30
  INPUT_DATETIME_STRFTIME,
33
31
  )
34
32
  from roc.film.tools.file_helpers import is_output_dir, get_output_dir
@@ -80,14 +78,6 @@ class CdfPostPro(Task):
80
78
  self.pipeline.exit()
81
79
  return
82
80
 
83
- # Get list of RPW Soopkitchen Observations JSON files
84
- self.rpw_obs_json_list = glob_list(
85
- self.pipeline.get("rpw_obs_json", default=[])
86
- )
87
-
88
- # Get list of RPW IOR XML files
89
- self.rpw_ior_xml_list = glob_list(self.pipeline.get("rpw_ior_xml", default=[]))
90
-
91
81
  # Get post-processing options
92
82
  self.options = [
93
83
  opt.lower()
@@ -167,7 +157,6 @@ class CdfPostPro(Task):
167
157
  self.session = Connector.manager[PIPELINE_DATABASE].session
168
158
 
169
159
  # Initialize some class variables
170
- self.soop_type_list = []
171
160
  self.obs_id_list = []
172
161
  self.event_log = None
173
162
 
@@ -227,21 +216,16 @@ class CdfPostPro(Task):
227
216
  # Get time range of the input L1 CDF
228
217
  self.epoch = cdf["Epoch"][...]
229
218
  self.nrec = self.epoch.shape[0]
230
- self.time_min = self.epoch[0]
231
- self.time_max = self.epoch[-1]
219
+ self.time_min = min(self.epoch)
220
+ self.time_max = max(self.epoch)
232
221
  logger.info(
233
222
  f"{self.current_file} has {self.nrec} records "
234
223
  f"between {self.time_min} "
235
224
  f"and {self.time_max}\t[{self.job_id}]"
236
225
  )
237
226
 
238
- # Set SOOP_TYPE global attribute from RPW SOOPKitchen
239
- # export observation JSON files
240
- if "soop_type" in self.options:
241
- self._set_soop_type(cdf)
242
-
243
- # Set OBS_ID global attribute from IOR XML files (get
244
- # observationID)
227
+ # Set OBS_ID global attribute
228
+ # from unique_id entries in pipeline.tc_log database table
245
229
  if "obs_id" in self.options:
246
230
  self._set_obs_id(cdf)
247
231
 
@@ -410,88 +394,6 @@ class CdfPostPro(Task):
410
394
 
411
395
  return is_succeeded
412
396
 
413
- def _set_soop_type(self, cdf_obj):
414
- """
415
- Set input CDF file with expected value for SOOP_TYPE g.attribute.
416
-
417
- :param cdf_obj: CDF to update (passed as a spacepy.pycdf.CDF class instance)
418
- :return: True if SOOP_TYPE has been set, False otherwise
419
- """
420
-
421
- logger.info(
422
- "Setting SOOP_TYPE global attribute "
423
- f"in {self.current_file} ...\t[{self.job_id}]"
424
- )
425
-
426
- # Get list of SOOP type from RPW soopkitchen observation json files
427
- if not self.soop_type_list:
428
- logger.info(
429
- "Extracting soopType elements from input "
430
- f"list of {len(self.rpw_obs_json_list)} RPW SoopKitchen JSON files...\t[{self.job_id}]"
431
- )
432
- self.soop_type_list = CdfPostPro.get_soop_type(self.rpw_obs_json_list)
433
-
434
- # Only keep list of soop type betwen time_min and time_max
435
- soop_type_list = [
436
- current_soop_type["soopType"]
437
- for current_soop_type in self.soop_type_list
438
- if (
439
- datetime.strptime(current_soop_type["startDate"], TIME_JSON_STRFORMAT)
440
- <= self.time_max
441
- and datetime.strptime(current_soop_type["endDate"], TIME_JSON_STRFORMAT)
442
- >= self.time_min
443
- )
444
- ]
445
-
446
- soop_type_len = len(soop_type_list)
447
- if soop_type_len == 0:
448
- logger.info(
449
- "No Soop Type value found "
450
- f"between {self.time_min} and {self.time_max}\t[{self.job_id}]"
451
- )
452
- cdf_obj.attrs["SOOP_TYPE"] = "none"
453
- return False
454
- else:
455
- cdf_obj.attrs["SOOP_TYPE"] = list(set(soop_type_list))
456
- logger.debug(f"SOOP_TYPE = {soop_type_list} in {self.current_file}")
457
- logger.info(
458
- f"{soop_type_len} entries set for "
459
- f"SOOP_TYPE in {self.current_file}\t[{self.job_id}]"
460
- )
461
-
462
- # make sure to save the change
463
- cdf_obj.save()
464
-
465
- return True
466
-
467
- @staticmethod
468
- def get_soop_type(rpw_obs_json_list):
469
- """
470
- Return list of SOOP_TYPE values for a given set of input RPW SoopKitchen observation JSON files
471
-
472
- :param rpw_obs_json_list: List of input RPW SK JSON files
473
- :return: list of SOOP_TYPE values found
474
- """
475
-
476
- # Define sub-method
477
- def extract_soop_type(json_file):
478
- """Extract soopType from input JSON"""
479
-
480
- # Open JSON file
481
- with open(json_file, "r") as json_buff:
482
- data = json.load(json_buff)
483
-
484
- # Retrieve all "soopType" field from file
485
- return data["soops"]
486
-
487
- # Initialize output list
488
- soop_type_list = []
489
-
490
- for current_json in rpw_obs_json_list:
491
- soop_type_list.extend(extract_soop_type(current_json))
492
-
493
- return soop_type_list
494
-
495
397
  def _set_obs_id(self, cdf_obj):
496
398
  """
497
399
  Set input CDF file with expected value for OBS_ID g.attribute.
@@ -507,19 +409,19 @@ class CdfPostPro(Task):
507
409
 
508
410
  # Get list of RPW TC obs id values
509
411
  if not self.obs_id_list:
510
- logger.info(
511
- f"Extracting uniqueID elements from "
512
- f"input list of {len(self.rpw_ior_xml_list)} RPW IOR files...\t[{self.job_id}]"
412
+ logger.debug(
413
+ f"Requesting pipeline.tc_log table entries from database...\t[{self.job_id}]"
513
414
  )
514
- self.obs_id_list = CdfPostPro.get_ior_obs_id(self.rpw_ior_xml_list)
415
+ self.obs_id_list = self.get_tc_log_data()
515
416
 
516
417
  # Keep only obs_id between time_min and time_max
517
418
  obs_id_list = list(
518
419
  set(
519
420
  [
520
- current_tc[1]
421
+ current_tc["unique_id"]
521
422
  for current_tc in self.obs_id_list
522
- if self.time_max >= current_tc[0] >= self.time_min and current_tc[1]
423
+ if current_tc["unique_id"]
424
+ and self.time_max >= current_tc["utc_time"] >= self.time_min
523
425
  ]
524
426
  )
525
427
  )
@@ -546,87 +448,22 @@ class CdfPostPro(Task):
546
448
 
547
449
  return True
548
450
 
549
- @staticmethod
550
- def get_ior_obs_id(rpw_ior_xml_list):
451
+ def get_tc_log_data(self):
551
452
  """
552
- Return list of OBS_ID values from
553
- an input list of RPW IOR XML files
453
+ Return list of [unique_id, utc_time] entries from
454
+ pipeline.tc_log database table.
554
455
 
555
- :param rpw_ior_xml_list: List of input RPW TC XML files.
556
- (ZIP files containing IOR XML can be also passed).
557
- :return: list of OBS_ID values found
456
+ :return: tc_log data entries found
558
457
  """
559
- import zipfile
560
-
561
- # Define sub-method
562
- def extract_obs_id(xml):
563
- """
564
- Extract OBS_ID from input XML
565
-
566
- :param xml: input IOR XML stream
567
- :return: List of (time, observationID) values extracted from IOR
568
- """
569
- import xmltodict
570
-
571
- # Convert input IOR XML stream into dictionary
572
- logger.debug(f"Parsing {xml.name} ...")
573
- data = xmltodict.parse(xml.read())
574
458
 
575
- # Extract list of sequences
576
- sequence_list = data["planningData"]["commandRequests"]["occurrenceList"][
577
- "sequence"
578
- ]
579
-
580
- # Make sure that returned sequence_list is a list
581
- # (If only one sequence tag is found in the XML
582
- # the xml_to_dict method returns a collections.OrderedDict()
583
- # instance).
584
- if not isinstance(sequence_list, list):
585
- sequence_list = [sequence_list]
586
-
587
- # Retrieve all "observationID" field from input TC XML file
588
- # Return as a list of tuple (ExecutionTime, observationID)
589
- ior_seq_list = []
590
- for current_seq in sequence_list:
591
- # Make sure to retrieve executionTime with the
592
- # right time format (two are possible)
593
- for current_strtformat in [TIME_DOY1_STRFORMAT, TIME_DOY2_STRFORMAT]:
594
- current_time = cast_ior_seq_datetime(
595
- current_seq, current_strtformat
596
- )
597
- if current_time is not None:
598
- break
599
- current_obsid = current_seq["observationID"]
600
-
601
- ior_seq_list.append((current_time, current_obsid))
602
-
603
- return ior_seq_list
604
-
605
- # Initialize output list
606
- obs_id_list = []
607
-
608
- for current_file in rpw_ior_xml_list:
609
- if not os.path.basename(current_file).startswith("IOR"):
610
- logger.debug(f"{current_file} not a valid RPW IOR file, skip it")
611
- continue
612
-
613
- if zipfile.is_zipfile(current_file):
614
- with zipfile.ZipFile(current_file, "r") as zip_stream:
615
- for current_xml in zip_stream.namelist():
616
- with zip_stream.open(current_xml, "r") as ior_xml:
617
- if ior_xml.name.startswith("IOR") and ior_xml.name.endswith(
618
- ".SOL"
619
- ):
620
- obs_id_list.extend(extract_obs_id(ior_xml))
621
- else:
622
- logger.debug(
623
- f"{current_xml} is not a valid RPW IOR XML file, skip it"
624
- )
625
- else:
626
- with open(current_file, "r") as ior_xml:
627
- obs_id_list.extend(extract_obs_id(ior_xml))
459
+ tc_log_data = query_db(
460
+ self.session,
461
+ [TcLog.unique_id, TcLog.utc_time],
462
+ filters=(TcLog.tc_exe_state == "PASSED"),
463
+ to_dict="records",
464
+ )
628
465
 
629
- return obs_id_list
466
+ return tc_log_data
630
467
 
631
468
  def _update_cdf(self, cdf_obj):
632
469
  """
@@ -824,11 +661,11 @@ class CdfPostPro(Task):
824
661
  # Set 7th bit (X000000)
825
662
  bitmask[w] = bitmask[w] | 64
826
663
 
827
- logger.debug(
828
- f"Set {len(w)} QUALITY_BITMASK records for {row['label']} "
829
- f"between {row['start_time']} "
830
- f"and {row['end_time']}\t[{self.job_id}]"
831
- )
664
+ logger.debug(
665
+ f"Set {len(w)} QUALITY_BITMASK records for {row['label']} "
666
+ f"between {row['start_time']} "
667
+ f"and {row['end_time']}\t[{self.job_id}]"
668
+ )
832
669
 
833
670
  # Save quality_bitmask
834
671
  cdf_obj["QUALITY_BITMASK"] = bitmask[...]
@@ -31,10 +31,16 @@ class MoveToProdDir(Task):
31
31
  # the pipeline is still working on it
32
32
  # Add a LockFile class instance to the Task class in Poppy ?
33
33
 
34
+ self.job_uuid = str(uuid.uuid4())
35
+ self.job_id = self.job_uuid[:8]
36
+ logger.debug(f"Task {self.job_id} is starting")
37
+
34
38
  # See if --no-move keyword is defined
35
39
  no_move = self.pipeline.get("no_move", default=False, args=True)
36
40
  if no_move:
37
- logger.info('Skip current task "move_to_products_dir": --no-move is True')
41
+ logger.info(
42
+ f'--no-move is passed: skip current task "move_to_products_dir"\t[{self.job_id}]'
43
+ )
38
44
  return
39
45
 
40
46
  # Retrieve pipeline output file directory
@@ -49,7 +55,7 @@ class MoveToProdDir(Task):
49
55
 
50
56
  if not products_dir:
51
57
  logger.info(
52
- 'Skip current task "move_to_products_dir": products_dir argument not defined'
58
+ f'products_dir argument not defined: Skip current task "move_to_products_dir"\t[{self.job_id}]'
53
59
  )
54
60
  else:
55
61
  output_dirbasename = os.path.basename(output_dir)
@@ -58,6 +64,8 @@ class MoveToProdDir(Task):
58
64
  if safe_move(output_dir, target_dir, ignore_patterns=ignore_patterns):
59
65
  logger.info(f"{output_dir} moved into {products_dir}")
60
66
 
67
+ logger.debug(f"Task {self.job_id} completed")
68
+
61
69
 
62
70
  class MoveFailedFiles(Task):
63
71
  """Move any failed files found
@@ -81,7 +89,7 @@ class MoveFailedFiles(Task):
81
89
  # Define task job ID (long and short)
82
90
  self.job_uuid = str(uuid.uuid4())
83
91
  self.job_id = self.job_uuid[:8]
84
- logger.info(f"Task {self.job_id} is starting")
92
+ logger.debug(f"Task {self.job_id} is starting")
85
93
  try:
86
94
  self.setup_inputs()
87
95
  except Exception:
@@ -90,7 +98,7 @@ class MoveFailedFiles(Task):
90
98
  return
91
99
 
92
100
  if self.failed_file_count == 0:
93
- logger.debug("No failed file(s) to move")
101
+ logger.debug("No failed file(s) to move\t[{self.job_id}")
94
102
  else:
95
103
  # Loop over failed files list
96
104
  for i, failed_file in enumerate(self.failed_file_list):
@@ -113,7 +121,11 @@ class MoveFailedFiles(Task):
113
121
  # perform a safe move (i.e., copy, check and delete) into
114
122
  # failed dir
115
123
  if safe_move(failed_file, target_filepath):
116
- logger.info(f"{failed_file} moved into {failed_dir}")
124
+ logger.warning(
125
+ f"{failed_file} moved into {failed_dir}\t[{self.job_id}]"
126
+ )
127
+
128
+ logger.debug(f"Task {self.job_id} completed")
117
129
 
118
130
 
119
131
  class CopyProcessedDds(Task):
@@ -125,6 +137,11 @@ class CopyProcessedDds(Task):
125
137
  name = "copy_processed_dds"
126
138
 
127
139
  def run(self):
140
+ # Define task job ID (long and short)
141
+ self.job_uuid = str(uuid.uuid4())
142
+ self.job_id = self.job_uuid[:8]
143
+ logger.debug(f"Task {self.job_id} is starting")
144
+
128
145
  # Get processed file target directory
129
146
  processed_dir = self.pipeline.get(
130
147
  "processed_dds_dir", default=[None], args=True
@@ -133,14 +150,14 @@ class CopyProcessedDds(Task):
133
150
  # skip task if processed_dir is None
134
151
  if processed_dir is None:
135
152
  logger.info(
136
- "Skip task copy_processed_dds: No processed_dds_dir argument defined"
153
+ f"No processed_dds_dir argument defined: skip task copy_processed_dds\t[{self.job_id}]"
137
154
  )
138
155
  return
139
156
  elif not os.path.isdir(processed_dir):
140
- logger.debug(f"Creating {processed_dir}...")
157
+ logger.debug(f"Creating {processed_dir}...\t[{self.job_id}]")
141
158
  os.makedirs(processed_dir)
142
159
  else:
143
- logger.debug(f"process_dir set to {processed_dir}")
160
+ logger.debug(f"process_dir set to {processed_dir}\t[{self.job_id}]")
144
161
 
145
162
  # If processed_files list not defined in the pipeline properties,
146
163
  # initialize it
@@ -148,7 +165,9 @@ class CopyProcessedDds(Task):
148
165
  processed_files_count = len(processed_file_list)
149
166
  # Skip task if no processed files
150
167
  if processed_files_count == 0:
151
- logger.info("Skip task copy_processed_dds: No processed file to move")
168
+ logger.info(
169
+ f"No processed file to move: skip task copy_processed_dds\t[{self.job_id}]"
170
+ )
152
171
  return
153
172
 
154
173
  # Get clear-dds keyword
@@ -161,7 +180,9 @@ class CopyProcessedDds(Task):
161
180
  for processed_file in processed_file_list.copy():
162
181
  # Check first that processed file is not in failed list
163
182
  if processed_file in failed_file_list:
164
- logger.warning(f"{processed_file} found in the failed file list!")
183
+ logger.warning(
184
+ f"{processed_file} found in the failed file list!\t[{self.job_id}]"
185
+ )
165
186
  continue
166
187
 
167
188
  # Build target filepath
@@ -169,16 +190,20 @@ class CopyProcessedDds(Task):
169
190
  target_filepath = os.path.join(processed_dir, basename)
170
191
 
171
192
  # copy file
172
- logger.debug(f"Copying {processed_file} into {processed_dir}")
193
+ logger.debug(
194
+ f"Copying {processed_file} into {processed_dir}\t[{self.job_id}]"
195
+ )
173
196
  try:
174
197
  shutil.copyfile(processed_file, target_filepath)
175
198
  except Exception as e:
176
199
  logger.exception(
177
- f"Copying {processed_file} into {processed_dir} has failed!"
200
+ f"Copying {processed_file} into {processed_dir} has failed!\t[{self.job_id}]"
178
201
  )
179
202
  logger.debug(e)
180
203
  else:
181
- logger.info(f"{processed_file} copied into {target_filepath}")
204
+ logger.info(
205
+ f"{processed_file} copied into {target_filepath}\t[{self.job_id}]"
206
+ )
182
207
 
183
208
  # Remove current file from the list in pipeline properties
184
209
  processed_file_list.remove(processed_file)
@@ -186,7 +211,7 @@ class CopyProcessedDds(Task):
186
211
  # if clear-dds keyword is passed, then remove processed Dds
187
212
  if clear_dds:
188
213
  os.remove(processed_file)
189
- logger.debug(f"{processed_file} deleted")
214
+ logger.debug(f"{processed_file} deleted\t[{self.job_id}]")
190
215
 
191
216
 
192
217
  class CopyFailedDds(Task):
@@ -198,17 +223,24 @@ class CopyFailedDds(Task):
198
223
  name = "copy_failed_dds"
199
224
 
200
225
  def run(self):
226
+ # Define task job ID (long and short)
227
+ self.job_uuid = str(uuid.uuid4())
228
+ self.job_id = self.job_uuid[:8]
229
+ logger.debug(f"Task {self.job_id} is starting")
230
+
201
231
  # Get failed file target directory
202
232
  failed_dir = self.pipeline.get("failed_dds_dir", default=[None], args=True)[0]
203
233
  # skip task if failed_dir is None
204
234
  if failed_dir is None:
205
- logger.info("Skip task copy_failed_dds: No failed_dds_dir argument defined")
235
+ logger.info(
236
+ f"No failed_dds_dir argument defined: skip task copy_failed_dds\t[{self.job_id}]"
237
+ )
206
238
  return
207
239
  elif not os.path.isdir(failed_dir):
208
- logger.debug(f"Creating {failed_dir}...")
240
+ logger.debug(f"Creating {failed_dir}...\t[{self.job_id}]")
209
241
  os.makedirs(failed_dir)
210
242
  else:
211
- logger.debug(f"failed_dir set to {failed_dir}")
243
+ logger.debug(f"failed_dir set to {failed_dir}\t[{self.job_id}]")
212
244
 
213
245
  # If failed_files list not defined in the pipeline properties,
214
246
  # initialize it
@@ -216,7 +248,9 @@ class CopyFailedDds(Task):
216
248
  failed_files_count = len(failed_file_list)
217
249
  # Skip task if no failed dds files
218
250
  if failed_files_count == 0:
219
- logger.info("Skip task copy_failed_dds: No failed file to move")
251
+ logger.info(
252
+ f"No failed file to move: skip task copy_failed_dds\t[{self.job_id}]"
253
+ )
220
254
  return
221
255
 
222
256
  # Get clear-dds keyword
@@ -229,14 +263,18 @@ class CopyFailedDds(Task):
229
263
  target_filepath = os.path.join(failed_dir, basename)
230
264
 
231
265
  # copy file
232
- logger.debug(f"Copying {failed_file} into {failed_dir}")
266
+ logger.debug(f"Copying {failed_file} into {failed_dir}\t[{self.job_id}]")
233
267
  try:
234
268
  shutil.copyfile(failed_file, target_filepath)
235
269
  except Exception as e:
236
- logger.exception(f"Copying {failed_file} into {failed_dir} has failed!")
270
+ logger.exception(
271
+ f"Copying {failed_file} into {failed_dir} has failed!\t[{self.job_id}]"
272
+ )
237
273
  logger.debug(e)
238
274
  else:
239
- logger.info(f"{failed_file} copied into {target_filepath}")
275
+ logger.info(
276
+ f"{failed_file} copied into {target_filepath}\t[{self.job_id}]"
277
+ )
240
278
 
241
279
  # Remove current file from the list in pipeline properties
242
280
  failed_file_list.remove(failed_file)
@@ -244,14 +282,14 @@ class CopyFailedDds(Task):
244
282
  # if clear-dds keyword is passed, then remove processed Dds
245
283
  if clear_dds:
246
284
  os.remove(failed_file)
247
- logger.debug(f"{failed_file} deleted")
285
+ logger.debug(f"{failed_file} deleted\t[{self.job_id}]")
248
286
 
249
287
  # Get failed tmraw list
250
288
  failed_tmraw_list = self.pipeline.get("failed_tmraw", default=[])
251
289
  failed_tmraw_count = len(failed_tmraw_list)
252
290
  # Skip task if no failed tmraw
253
291
  if failed_tmraw_count == 0:
254
- logger.debug("No failed tmraw to write")
292
+ logger.debug("No failed tmraw to write\t[{self.job_id}]")
255
293
  return
256
294
  else:
257
295
  # Else save list of failed tmraw into text file
@@ -260,5 +298,5 @@ class CopyFailedDds(Task):
260
298
  fw.writelines(failed_tmraw_list)
261
299
  logger.info(
262
300
  f"{failed_tmraw_count} failed TmRaw entries "
263
- f"saved into {tmraw_failed_file}"
301
+ f"saved into {tmraw_failed_file}\t[{self.job_id}]"
264
302
  )
@@ -57,7 +57,7 @@ def L0ToHk(task):
57
57
 
58
58
  # Define task job ID (long and short)
59
59
  job_uuid = str(uuid.uuid4())
60
- job_id = f"L0ToHk-{job_uuid[:8]}"
60
+ job_id = f"{job_uuid[:8]}"
61
61
  logger.info(f"Task {job_id} is starting")
62
62
 
63
63
  try:
@@ -4,6 +4,7 @@
4
4
  """Contains task to create the RPW L1 Bias current CDF files."""
5
5
 
6
6
  import os
7
+ import uuid
7
8
 
8
9
  from poppy.core.logger import logger
9
10
  from poppy.core.target import FileTarget
@@ -31,6 +32,11 @@ def L0ToL1BiaCurrent(task):
31
32
  :return:
32
33
  """
33
34
 
35
+ # Define task job ID (long and short)
36
+ job_uuid = str(uuid.uuid4())
37
+ task.job_id = f"{job_uuid[:8]}"
38
+ logger.info(f"Task {task.job_id} is starting")
39
+
34
40
  # Get list of input l0 file(s)
35
41
  l0_file_list = task.inputs["l0_file"].filepath
36
42
 
@@ -572,19 +572,28 @@ def l0_to_trange_cdf(
572
572
  l0_file_len = len(l0_file_list)
573
573
 
574
574
  # Get L0 files time_min/time_max
575
- l0_time_min, l0_time_max = get_l0_trange(l0_file_list)
575
+ try:
576
+ l0_time_min, l0_time_max = get_l0_trange(l0_file_list)
577
+ if None in l0_time_min or None in l0_time_max:
578
+ raise ValueError
579
+ except TypeError:
580
+ logger.error(f'Input "l0_files" must be a list!\t[{task.job_id}]')
581
+ return []
582
+ except ValueError:
583
+ logger.error(f"Output L0 time min. or max. list is not valid!\t[{task.job_id}]")
584
+ return []
576
585
 
577
586
  # Get start_time for output CDF (use time min of L0 files if not defined)
578
587
  if not start_time:
579
588
  start_time = task.pipeline.get("start_time", default=[min(l0_time_min)])[0]
580
589
 
581
- logger.debug(f"start_time value is {start_time} [{task.job_id}]")
590
+ logger.debug(f"start_time value is {start_time}\t[{task.job_id}]")
582
591
 
583
592
  # Get end_time for output CDF (use time max of L0 files if not defined)
584
593
  if not end_time:
585
594
  end_time = task.pipeline.get("end_time", default=[max(l0_time_max)])[0]
586
595
 
587
- logger.debug(f"end_time value is {end_time} [{task.job_id}]")
596
+ logger.debug(f"end_time value is {end_time}\t[{task.job_id}]")
588
597
 
589
598
  # Loops over each output dataset to produce for the current task
590
599
  for current_dataset in dataset_list:
@@ -594,7 +603,7 @@ def l0_to_trange_cdf(
594
603
 
595
604
  logger.debug(
596
605
  "Running file production for the dataset "
597
- f"{dataset_name} (V{data_version}) [{task.job_id}]"
606
+ f"{dataset_name} (V{data_version})\t[{task.job_id}]"
598
607
  )
599
608
 
600
609
  # get the path to the master CDF file of this dataset
@@ -612,14 +621,14 @@ def l0_to_trange_cdf(
612
621
  # Check existence
613
622
  if not master_path:
614
623
  raise FileNotFoundError(
615
- "{0} master CDF not found for the dataset {1}! [{2}]".format(
624
+ "{0} master CDF not found for the dataset {1}!\t[{2}]".format(
616
625
  master_pattern, dataset_name, task.job_id
617
626
  )
618
627
  )
619
628
  else:
620
629
  master_path = sorted(master_path)[-1]
621
630
  logger.info(
622
- 'Producing dataset "{0}" with the master CDF "{1}" [{2}]'.format(
631
+ 'Producing dataset "{0}" with the master CDF "{1}"\t[{2}]'.format(
623
632
  dataset_name, master_path, task.job_id
624
633
  )
625
634
  )
@@ -635,12 +644,12 @@ def l0_to_trange_cdf(
635
644
  if l0_time_max[i] < start_time or l0_time_min[i] > end_time:
636
645
  logger.debug(
637
646
  f"{l0_file} is outside the time range: "
638
- f"[{start_time}, {end_time}], skip it [{task.job_id}]"
647
+ f"[{start_time}, {end_time}], skip it\t[{task.job_id}]"
639
648
  )
640
649
  continue
641
650
  else:
642
651
  logger.debug(
643
- f"Processing {l0_file} [{l0_file_len - i - 1}] [{task.job_id}]"
652
+ f"Processing {l0_file} [{l0_file_len - i - 1}]\t[{task.job_id}]"
644
653
  )
645
654
 
646
655
  # Append current l0 file to parent list
@@ -655,7 +664,7 @@ def l0_to_trange_cdf(
655
664
  ):
656
665
  logger.info(
657
666
  f"No expected packet found for {dataset_name}"
658
- f" in {l0_file} [{','.join(expected_packet)}] [{task.job_id}]"
667
+ f" in {l0_file} [{','.join(expected_packet)}]\t[{task.job_id}]"
659
668
  )
660
669
  continue
661
670
 
@@ -664,18 +673,20 @@ def l0_to_trange_cdf(
664
673
  # as the dataset alias in the descriptor
665
674
  func = dataset_func.get(dataset_name)
666
675
  if func is None:
667
- logger.error(f"No function found for {dataset_name}")
676
+ logger.error(
677
+ f"No function found for {dataset_name}\t[{task.job_id}]"
678
+ )
668
679
  failed_files.append(l0_file)
669
680
  break
670
681
 
671
682
  # call the dataset-related function
672
683
  try:
673
- logger.debug(f"Running {func} [{task.job_id}]")
684
+ logger.debug(f"Running {func}\t[{task.job_id}]")
674
685
  result = func(l0, task)
675
686
  except Exception as e:
676
687
  # TODO catch exception in the ROC database
677
688
  logger.exception(
678
- f'Running "{func}" function has failed [{task.job_id}]: \n{e}'
689
+ f'Running "{func}" function has failed\t[{task.job_id}]: \n{e}'
679
690
  )
680
691
  # TODO - Add the current failed dataset processing to failed_files
681
692
  failed_files.append(l0_file)
@@ -685,12 +696,12 @@ def l0_to_trange_cdf(
685
696
  if result is None or result.shape[0] == 0:
686
697
  logger.debug(
687
698
  f"Returned {dataset_name} dataset array"
688
- f" is empty for {l0_file} [{task.job_id}]"
699
+ f" is empty for {l0_file}\t[{task.job_id}]"
689
700
  )
690
701
  else:
691
702
  logger.debug(
692
703
  f"{result.shape[0]} {dataset_name} dataset samples"
693
- f" returned from {l0_file} [{task.job_id}]"
704
+ f" returned from {l0_file}\t[{task.job_id}]"
694
705
  )
695
706
 
696
707
  # If data is empty
@@ -706,7 +717,7 @@ def l0_to_trange_cdf(
706
717
  if nrec == 0:
707
718
  logger.warning(
708
719
  "No data for dataset"
709
- f" {dataset_name}: skip output cdf creation [{task.job_id}]"
720
+ f" {dataset_name}: skip output cdf creation\t[{task.job_id}]"
710
721
  )
711
722
  continue
712
723
 
@@ -716,12 +727,12 @@ def l0_to_trange_cdf(
716
727
  # Generation date
717
728
  generation_date = datetime.utcnow().strftime(INPUT_DATETIME_STRFTIME)
718
729
  logger.debug(
719
- f'Set "Generation_date" attr. value to {generation_date} [{task.job_id}]'
730
+ f'Set "Generation_date" attr. value to {generation_date}\t[{task.job_id}]'
720
731
  )
721
732
 
722
733
  # file ID
723
734
  file_id = str(uuid.uuid4())
724
- logger.debug(f'Set "File_ID" attr. value to {file_id} [{task.job_id}]')
735
+ logger.debug(f'Set "File_ID" attr. value to {file_id}\t[{task.job_id}]')
725
736
 
726
737
  # Re-define datetime and parents g.attribute for time range CDF data
727
738
  # products
@@ -781,7 +792,7 @@ def l0_to_trange_cdf(
781
792
  metadata["SPICE_KERNELS"] = sclk_file[-1]
782
793
  else:
783
794
  logger.warning(
784
- f"No SPICE SCLK kernel saved for {filepath} [{task.job_id}]"
795
+ f"No SPICE SCLK kernel saved for {filepath}\t[{task.job_id}]"
785
796
  )
786
797
 
787
798
  # open the target to update its status according to errors etc
@@ -808,27 +819,27 @@ def l0_to_trange_cdf(
808
819
  else:
809
820
  logger.warning(
810
821
  f"No data found between {start_time} and {end_time}"
811
- f" to be written into {filepath} [{task.job_id}]"
822
+ f" to be written into {filepath}\t[{task.job_id}]"
812
823
  )
813
824
 
814
825
  except Exception as e:
815
826
  logger.exception(
816
- f"{filepath} production has failed [{task.job_id}]:\n{e}"
827
+ f"{filepath} production has failed\t[{task.job_id}]:\n{e}"
817
828
  )
818
829
  cdf.attrs["Validate"] = "-1"
819
830
  failed_files.append(filepath)
820
831
 
821
832
  if nrec == 0:
822
- logger.info(f"Removing empty file {filepath}... [{task.job_id}]")
833
+ logger.info(f"Removing empty file {filepath}...\t[{task.job_id}]")
823
834
  os.remove(filepath)
824
835
  filepath = ""
825
836
  elif os.path.isfile(filepath):
826
837
  processed_files.append(filepath)
827
- logger.info(f"{filepath} saved [{task.job_id}]")
838
+ logger.info(f"{filepath} saved\t[{task.job_id}]")
828
839
  output_file_list.append(filepath)
829
840
  else:
830
841
  failed_files.append(filepath)
831
- logger.error(f"Writing {filepath} has failed! [{task.job_id}]")
842
+ logger.error(f"Writing {filepath} has failed!\t[{task.job_id}]")
832
843
 
833
844
  # Set output target filepath
834
845
  target.filepath = filepath
@@ -849,8 +860,7 @@ def get_l0_trange(l0_files: list, minmax: bool = False) -> tuple:
849
860
  """
850
861
 
851
862
  if not isinstance(l0_files, list):
852
- logger.error('Input "l0_files" must be a list!')
853
- return None, None
863
+ raise TypeError
854
864
 
855
865
  # Get number of l0_files
856
866
  nl0 = len(l0_files)
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: roc-film
3
- Version: 1.14.0
3
+ Version: 1.14.2
4
4
  Summary: RPW FILe Maker (FILM): Plugin to make RPW L0, L1 and HK data files
5
- License: CeCILL
5
+ License: CECILL-2.1
6
6
  Author: Xavier Bonnin
7
7
  Author-email: xavier.bonnin@obspm.fr
8
8
  Requires-Python: >=3.9,<4
9
- Classifier: License :: Other/Proprietary License
9
+ Classifier: License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)
10
10
  Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Programming Language :: Python :: 3.9
12
12
  Classifier: Programming Language :: Python :: 3.10
@@ -1,20 +1,20 @@
1
1
  roc/__init__.py,sha256=0LOVV4ExL9DrAzmNJj6c6395-nQTPM6v6S1hJnBAf3E,89
2
2
  roc/film/__init__.py,sha256=XBPsnRsgcUjJnutv_tkgNViw2TqnA5ozO-bNO2HY-mg,143
3
- roc/film/commands.py,sha256=kxc2z2qxyLpXG1oxiGYC7cHISMzrbgw0mklIvOAww0o,37489
3
+ roc/film/commands.py,sha256=xzyARalgWc6mPfN5nUvDtqTzaovP2_Xbi6wCDsS4tVY,37019
4
4
  roc/film/config/__init__.py,sha256=Yh8cG85CwN7OIAndtBs213LZn5NwEjPjFYKpMnagJuc,68
5
- roc/film/constants.py,sha256=0qQ3DvJ87gW3IvyfJe_qfvsodMaJbxn6om6ImGXihRI,4015
6
- roc/film/descriptor.json,sha256=t3WSiLgqH6ykAm8RvrWJ-4gCtwJsHkCFFf69lYSFjgQ,33501
5
+ roc/film/constants.py,sha256=zibC4vjLfg3fpH3BP7AGW4j48vf-AaXn--Afx5gmGH4,3998
6
+ roc/film/descriptor.json,sha256=3Hft4Up8MzSpNy6H11br6BV5VfMAKxhmjbrR1g_-_uw,33486
7
7
  roc/film/exceptions.py,sha256=kGWP9VR_DjDdyZYC5gDixtvV7xAv7qEx5srzS4_8TlU,5177
8
8
  roc/film/tasks/__init__.py,sha256=GNuQAi7E9nOApSSUUg4DEKlY0OlOzVaJexFKK0h9A2c,961
9
9
  roc/film/tasks/cat_solo_hk.py,sha256=PX3HkyJCgFHrw_wwa44r_tWcXQ7jCbd6oiZGS8KRXfI,11227
10
- roc/film/tasks/cdf_postpro.py,sha256=atGaJgg__QLJF3ZIEeZtYStuJDK7JmoR67FJ21bUNFg,31606
10
+ roc/film/tasks/cdf_postpro.py,sha256=eqw_80t03jeiw7MNZFWclvkQAm_1GndwFRtm5_1tjIg,25309
11
11
  roc/film/tasks/check_dds.py,sha256=pb0YZGu_cqPL2-Vi5aUaXAFJy86R2rOLVVaXnTLArug,3734
12
12
  roc/film/tasks/db_to_anc_bia_sweep_table.py,sha256=o-rJpj1VzS9azAT_EiWL9fxaZQt2qaArKtGiLMHv3S4,12781
13
13
  roc/film/tasks/dds_to_l0.py,sha256=nhsBESTlkEqiKAVVKek2oXe5neLFet1nTVsTiGTm1zY,22120
14
14
  roc/film/tasks/export_solo_coord.py,sha256=iuUfyQo3-vkdEdDsp2ZscFxsDVvdZJXxF0K-5IRQitw,5753
15
- roc/film/tasks/file_handler.py,sha256=jCEfM5YHdcG_vHsPB_VbmJ0f4oDkIZ4SCihwwT6T_k4,9739
16
- roc/film/tasks/l0_to_hk.py,sha256=95tWZUhW5FUzyeArAY7BmgPO61BkTDSfwv8LBjAMjgM,11008
17
- roc/film/tasks/l0_to_l1_bia_current.py,sha256=w4KrldizPbKB5M1E7uA5oYd6fd7mO5V8IwSEKD5DddU,2389
15
+ roc/film/tasks/file_handler.py,sha256=elJmRBeMLRhXtvf_YvuBuIa7Wn_JffFV_zLQYRWzqO0,11116
16
+ roc/film/tasks/l0_to_hk.py,sha256=bn9hYHmxGJWBuj_uYovkI9lnonQeplsr4_apxkK1YuA,11001
17
+ roc/film/tasks/l0_to_l1_bia_current.py,sha256=GS6GvukagltY1CUW__b2CX-nBAYjOxxaatfppP5gmVg,2564
18
18
  roc/film/tasks/l0_to_l1_bia_sweep.py,sha256=VYjD_ml2M0K7WA7I4UKsmrm4cvxArYRBazmzuNs7_vs,34980
19
19
  roc/film/tasks/l0_to_l1_sbm.py,sha256=OjgMdCk7ldhs4mTlAxLVER_Cud6ILlBkW88a7P46ZSI,19219
20
20
  roc/film/tasks/l0_to_l1_surv.py,sha256=-n78s2r9aEAGw5U0LRd7NyiwdEehHufg5K4lEY4vR-k,15522
@@ -38,13 +38,13 @@ roc/film/tests/test_metadata.py,sha256=1nl5or5RPMsIgMqtvYz28qdZXVgjYw6olphMm6y8W
38
38
  roc/film/tests/tests.py,sha256=boEPC42mZYQfinYE3jyol-7O34ttU5p_Ei0a_9YUHM4,44273
39
39
  roc/film/tools/__init__.py,sha256=dkYmLgy3b_B1T3ZZ6s_rv7NyDjLF0yOJPerjLEKAFlA,303
40
40
  roc/film/tools/dataset_tasks.py,sha256=CI9UIYchLwXfcjJoD8PsaFIcNX8akAsXz4RQ4hqhJeU,2825
41
- roc/film/tools/file_helpers.py,sha256=RKPu2nkGPkNOQUvARGhrwPULfuaVKs8UGAH9DANLToU,30679
41
+ roc/film/tools/file_helpers.py,sha256=KtcQxwIBfawj6HnrmvV91YjdvXm4tRfKG4ORVOhccds,30983
42
42
  roc/film/tools/l0.py,sha256=a5xxk3BYxtwOeZp36AoDPtF07VZpXfyKrpIYcC83u6w,46657
43
43
  roc/film/tools/metadata.py,sha256=gYFoo_VinBFoJOVIBwDfSqq0WvYTdDaS3NjEDtYREUM,12409
44
44
  roc/film/tools/skeleton.py,sha256=aTe6VWvy5Y79FuA3aKieTQ91m26PEvJ3MnzC4ZZqMvc,10765
45
45
  roc/film/tools/tools.py,sha256=q2eN494T8irkNUn0d6_oQs2W_ZJj1OxJu_ViORGOa3g,17945
46
46
  roc/film/tools/xlsx2skt.py,sha256=sHmFLGuL1pT1N4V4Nk8i7yoHM2lveXphuvUUN28MvJs,19599
47
- roc_film-1.14.0.dist-info/LICENSE,sha256=TqI0k3vHsKpSR-Q2aQ0euTJIdbx1kOzeUL79OONRkKU,21778
48
- roc_film-1.14.0.dist-info/METADATA,sha256=pSz1BvelqyQQMh6YhuD0dxuQydGB2oMLLZtydGKTfpA,1747
49
- roc_film-1.14.0.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
50
- roc_film-1.14.0.dist-info/RECORD,,
47
+ roc_film-1.14.2.dist-info/LICENSE,sha256=TqI0k3vHsKpSR-Q2aQ0euTJIdbx1kOzeUL79OONRkKU,21778
48
+ roc_film-1.14.2.dist-info/METADATA,sha256=9Y6VcPHLBQT674Wf85XWF9vNiqTuqcyZKEcwvqdwzdM,1805
49
+ roc_film-1.14.2.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
50
+ roc_film-1.14.2.dist-info/RECORD,,