roc-film 1.13.4__py3-none-any.whl → 1.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. roc/__init__.py +2 -1
  2. roc/film/__init__.py +2 -2
  3. roc/film/commands.py +372 -323
  4. roc/film/config/__init__.py +0 -1
  5. roc/film/constants.py +101 -65
  6. roc/film/descriptor.json +126 -95
  7. roc/film/exceptions.py +28 -27
  8. roc/film/tasks/__init__.py +16 -16
  9. roc/film/tasks/cat_solo_hk.py +86 -74
  10. roc/film/tasks/cdf_postpro.py +438 -309
  11. roc/film/tasks/check_dds.py +39 -45
  12. roc/film/tasks/db_to_anc_bia_sweep_table.py +381 -0
  13. roc/film/tasks/dds_to_l0.py +232 -180
  14. roc/film/tasks/export_solo_coord.py +147 -0
  15. roc/film/tasks/file_handler.py +91 -75
  16. roc/film/tasks/l0_to_hk.py +117 -103
  17. roc/film/tasks/l0_to_l1_bia_current.py +38 -30
  18. roc/film/tasks/l0_to_l1_bia_sweep.py +417 -329
  19. roc/film/tasks/l0_to_l1_sbm.py +250 -208
  20. roc/film/tasks/l0_to_l1_surv.py +185 -130
  21. roc/film/tasks/make_daily_tm.py +40 -37
  22. roc/film/tasks/merge_tcreport.py +77 -71
  23. roc/film/tasks/merge_tmraw.py +102 -89
  24. roc/film/tasks/parse_dds_xml.py +21 -20
  25. roc/film/tasks/set_l0_utc.py +51 -49
  26. roc/film/tests/cdf_compare.py +565 -0
  27. roc/film/tests/hdf5_compare.py +84 -62
  28. roc/film/tests/test_dds_to_l0.py +93 -51
  29. roc/film/tests/test_dds_to_tc.py +8 -11
  30. roc/film/tests/test_dds_to_tm.py +8 -10
  31. roc/film/tests/test_film.py +161 -116
  32. roc/film/tests/test_l0_to_hk.py +64 -36
  33. roc/film/tests/test_l0_to_l1_bia.py +10 -14
  34. roc/film/tests/test_l0_to_l1_sbm.py +14 -19
  35. roc/film/tests/test_l0_to_l1_surv.py +68 -41
  36. roc/film/tests/test_metadata.py +21 -20
  37. roc/film/tests/tests.py +743 -396
  38. roc/film/tools/__init__.py +5 -5
  39. roc/film/tools/dataset_tasks.py +34 -2
  40. roc/film/tools/file_helpers.py +390 -269
  41. roc/film/tools/l0.py +402 -324
  42. roc/film/tools/metadata.py +147 -127
  43. roc/film/tools/skeleton.py +12 -17
  44. roc/film/tools/tools.py +109 -92
  45. roc/film/tools/xlsx2skt.py +161 -139
  46. {roc_film-1.13.4.dist-info → roc_film-1.14.0.dist-info}/LICENSE +127 -125
  47. roc_film-1.14.0.dist-info/METADATA +60 -0
  48. roc_film-1.14.0.dist-info/RECORD +50 -0
  49. {roc_film-1.13.4.dist-info → roc_film-1.14.0.dist-info}/WHEEL +1 -1
  50. roc/film/tasks/l0_to_anc_bia_sweep_table.py +0 -348
  51. roc_film-1.13.4.dist-info/METADATA +0 -120
  52. roc_film-1.13.4.dist-info/RECORD +0 -48
@@ -1,8 +1,7 @@
1
- #!/usr/bin/env python
1
+ #!/usr/bin/env python3
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
4
  from glob import glob
5
- import sys
6
5
  import os
7
6
  from datetime import datetime
8
7
  import uuid
@@ -20,154 +19,161 @@ from roc.rap.tasks.utils import order_by_increasing_time
20
19
  from roc.rpl.time import Time
21
20
 
22
21
  # Import methods to extract data from RPW packets
23
- from roc.film.tools.dataset_tasks import *
24
-
25
- from roc.film.tools.file_helpers import put_cdf_global, \
26
- generate_filepath, get_master_cdf_dir, \
27
- is_packet, put_cdf_zvars, get_l0_file, get_output_dir, is_output_dir
22
+ from roc.film.tools.dataset_tasks import dataset_func
23
+
24
+ from roc.film.tools.file_helpers import (
25
+ put_cdf_global,
26
+ generate_filepath,
27
+ get_master_cdf_dir,
28
+ is_packet,
29
+ put_cdf_zvars,
30
+ get_l0_file,
31
+ get_output_dir,
32
+ is_output_dir,
33
+ )
28
34
  from roc.film.tools.metadata import init_cdf_global, get_spice_kernels
29
35
 
30
-
31
36
  # task for L0 to L1 Survey data CDF production (including LFM)
32
37
  from roc.film.tools.tools import get_datasets
38
+ from roc.film.constants import INPUT_DATETIME_STRFTIME
33
39
 
34
- __all__ = ['L0ToL1Surv']
35
-
40
+ __all__ = ["L0ToL1Surv"]
36
41
 
37
42
 
38
43
  class L0ToL1Surv(Task):
39
44
  """
40
45
  Task to produce RPW L1 survey data daily CDF from a L0 daily file
41
46
  """
42
- plugin_name = 'roc.film'
43
- name = 'l0_to_l1_surv'
44
47
 
45
- def add_targets(self):
48
+ plugin_name = "roc.film"
49
+ name = "l0_to_l1_surv"
46
50
 
47
- self.add_input(identifier='l0_file', filepath=get_l0_file,
48
- target_class=FileTarget)
49
- self.add_output(identifier='l1_surv_rswf',
50
- target_class=FileTarget)
51
- self.add_output(identifier='l1_surv_tswf', target_class=FileTarget)
52
- self.add_output(identifier='l1_surv_hist1d', target_class=FileTarget)
53
- self.add_output(identifier='l1_surv_hist2d', target_class=FileTarget)
54
- self.add_output(identifier='l1_surv_stat', target_class=FileTarget)
55
- self.add_output(identifier='l1_surv_mamp', target_class=FileTarget)
56
- self.add_output(identifier='l1_lfm_rswf', target_class=FileTarget)
57
- self.add_output(identifier='l1_lfm_cwf', target_class=FileTarget)
58
- self.add_output(identifier='l1_lfm_sm', target_class=FileTarget)
59
- self.add_output(identifier='l1_lfm_psd', target_class=FileTarget)
60
- self.add_output(identifier='l1_surv_asm', target_class=FileTarget)
61
- self.add_output(identifier='l1_surv_bp1', target_class=FileTarget)
62
- self.add_output(identifier='l1_surv_bp2', target_class=FileTarget)
63
- self.add_output(identifier='l1_surv_cwf', target_class=FileTarget)
64
- self.add_output(identifier='l1_surv_swf', target_class=FileTarget)
65
- self.add_output(identifier='l1_surv_tnr', target_class=FileTarget)
66
- self.add_output(identifier='l1_surv_hfr', target_class=FileTarget)
51
+ def add_targets(self):
52
+ self.add_input(
53
+ identifier="l0_file", filepath=get_l0_file, target_class=FileTarget
54
+ )
55
+ self.add_output(identifier="l1_surv_rswf", target_class=FileTarget)
56
+ self.add_output(identifier="l1_surv_tswf", target_class=FileTarget)
57
+ self.add_output(identifier="l1_surv_hist1d", target_class=FileTarget)
58
+ self.add_output(identifier="l1_surv_hist2d", target_class=FileTarget)
59
+ self.add_output(identifier="l1_surv_stat", target_class=FileTarget)
60
+ self.add_output(identifier="l1_surv_mamp", target_class=FileTarget)
61
+ self.add_output(identifier="l1_lfm_rswf", target_class=FileTarget)
62
+ self.add_output(identifier="l1_lfm_cwf", target_class=FileTarget)
63
+ self.add_output(identifier="l1_lfm_sm", target_class=FileTarget)
64
+ self.add_output(identifier="l1_lfm_psd", target_class=FileTarget)
65
+ self.add_output(identifier="l1_surv_asm", target_class=FileTarget)
66
+ self.add_output(identifier="l1_surv_bp1", target_class=FileTarget)
67
+ self.add_output(identifier="l1_surv_bp2", target_class=FileTarget)
68
+ self.add_output(identifier="l1_surv_cwf", target_class=FileTarget)
69
+ self.add_output(identifier="l1_surv_swf", target_class=FileTarget)
70
+ self.add_output(identifier="l1_surv_tnr", target_class=FileTarget)
71
+ self.add_output(identifier="l1_surv_hfr", target_class=FileTarget)
67
72
 
68
73
  def setup_inputs(self):
69
-
70
74
  # Get products directory (folder where final output files will be
71
75
  # moved)
72
- self.products_dir = self.pipeline.get('products_dir',
73
- default=[None], args=True)[0]
76
+ self.products_dir = self.pipeline.get(
77
+ "products_dir", default=[None], args=True
78
+ )[0]
74
79
 
75
80
  # Get output dir
76
81
  self.output_dir = get_output_dir(self.pipeline)
77
- if not is_output_dir(self.output_dir,
78
- products_dir=self.products_dir):
79
- logger.info(f'Making {self.output_dir}')
82
+ if not is_output_dir(self.output_dir, products_dir=self.products_dir):
83
+ logger.info(f"Making {self.output_dir}")
80
84
  os.makedirs(self.output_dir)
81
85
  else:
82
- logger.debug(f'Output files will be '
83
- f'saved into folder {self.output_dir}')
86
+ logger.debug(f"Output files will be saved into folder {self.output_dir}")
84
87
 
85
88
  # get the input l0 file
86
- self.l0_file = self.inputs['l0_file']
89
+ self.l0_file = self.inputs["l0_file"]
87
90
 
88
91
  # Get or create failed_files list from pipeline properties
89
- self.failed_files = self.pipeline.get(
90
- 'failed_files', default=[], create=True)
92
+ self.failed_files = self.pipeline.get("failed_files", default=[], create=True)
91
93
 
92
94
  # Get or create processed_files list from pipeline properties
93
95
  self.processed_files = self.pipeline.get(
94
- 'processed_files', default=[], create=True)
96
+ "processed_files", default=[], create=True
97
+ )
95
98
 
96
99
  # Retrieve list of output datasets to produce for the given task
97
100
  try:
98
101
  self.dataset_list = get_datasets(self, self.name)
99
- except:
100
- raise LoadDataSetError(f'Cannot load the list of datasets to produce for {self.name}')
102
+ except Exception:
103
+ raise LoadDataSetError(
104
+ f"Cannot load the list of datasets to produce for {self.name}"
105
+ )
101
106
  else:
102
- logger.debug(f'Produce L1 CDF file(s) for the following dataset(s): {[ds["name"] for ds in self.dataset_list]}')
107
+ logger.debug(
108
+ f"Produce L1 CDF file(s) for the following dataset(s): {[ds['name'] for ds in self.dataset_list]}"
109
+ )
103
110
 
104
111
  # Get overwrite optional keyword
105
- self.overwrite = self.pipeline.get(
106
- 'overwrite', default=False, args=True)
112
+ self.overwrite = self.pipeline.get("overwrite", default=False, args=True)
107
113
 
108
114
  # Get force optional keyword
109
- self.force = self.pipeline.get('force', default=False, args=True)
115
+ self.force = self.pipeline.get("force", default=False, args=True)
110
116
 
111
117
  # Get (optional) arguments for SPICE
112
- self.predictive = self.pipeline.get(
113
- 'predictive', default=False, args=True)
114
- self.kernel_date = self.pipeline.get(
115
- 'kernel_date', default=None, args=True)
116
- self.no_spice = self.pipeline.get('no_spice', default=False, args=True)
118
+ self.predictive = self.pipeline.get("predictive", default=False, args=True)
119
+ self.kernel_date = self.pipeline.get("kernel_date", default=None, args=True)
120
+ self.no_spice = self.pipeline.get("no_spice", default=False, args=True)
117
121
  # if is_cdag = True, add '-cdag' suffix to the end of the descriptor field in
118
122
  # the output filename
119
123
  # (used to indicate preliminary files to distributed to the CDAG members only)
120
- self.is_cdag = self.pipeline.get('cdag', default=False, args=True)
124
+ self.is_cdag = self.pipeline.get("cdag", default=False, args=True)
121
125
  if self.is_cdag:
122
126
  logger.info('Producing "cdag" output CDF')
123
127
 
124
128
  # Get/create Time singleton
125
- self.time_instance = Time(predictive=self.predictive,
126
- kernel_date=self.kernel_date,
127
- no_spice=self.no_spice)
129
+ self.time_instance = Time(
130
+ predictive=self.predictive,
131
+ kernel_date=self.kernel_date,
132
+ no_spice=self.no_spice,
133
+ )
128
134
 
129
135
  return True
130
136
 
131
137
  def run(self):
132
-
133
138
  # Import external modules
134
139
  from spacepy.pycdf import CDF
135
140
 
136
141
  # Define task job ID (long and short)
137
142
  self.job_uuid = str(uuid.uuid4())
138
- self.job_id = f'L0ToL1Surv-{self.job_uuid[:8]}'
139
- logger.info(f'Task {self.job_id} is starting')
143
+ self.job_id = self.job_uuid[:8]
144
+ logger.info(f"Task job {self.job_id} is starting")
140
145
  try:
141
146
  self.setup_inputs()
142
- except:
143
- logger.exception(
144
- f'Initializing inputs has failed for {self.job_id}!')
147
+ except Exception:
148
+ logger.exception(f"Initializing inputs has failed for {self.job_id}!")
145
149
  try:
146
- os.makedirs(os.path.join(self.output_dir, 'failed'))
147
- except:
148
- logger.error(f'output_dir argument is not defined for {self.job_id}!')
150
+ os.makedirs(os.path.join(self.output_dir, "failed"))
151
+ except Exception:
152
+ logger.error(f"output_dir argument is not defined for {self.job_id}!")
149
153
  self.pipeline.exit()
150
154
  return
151
155
 
152
156
  # Open the HDF5 file to extract information
153
- with h5py.File(self.l0_file.filepath, 'r') as l0:
157
+ with h5py.File(self.l0_file.filepath, "r") as l0:
154
158
  # get modes for TNR
155
159
  logger.info(
156
- f'Producing RPW L1 SURVEY data file(s) from {self.l0_file.filepath} [{self.job_id}]'
160
+ f"Producing RPW L1 SURVEY data file(s) from {self.l0_file.filepath} [{self.job_id}]"
157
161
  )
158
162
 
159
163
  # Loops over each output dataset to produce for the current task
160
164
  for current_dataset in self.dataset_list:
161
- dataset_name = current_dataset['name']
162
- data_descr = current_dataset['descr']
163
- data_version = current_dataset['version']
164
- logger.debug(f'Running file production for the dataset {dataset_name} (V{data_version}) [{self.job_id}]')
165
+ dataset_name = current_dataset["name"]
166
+ data_descr = current_dataset["descr"]
167
+ data_version = current_dataset["version"]
168
+ logger.debug(
169
+ f"Running file production for the dataset {dataset_name} (V{data_version}) [{self.job_id}]"
170
+ )
165
171
 
166
172
  # get the path to the master CDF file of this dataset
167
173
  master_cdf_dir = get_master_cdf_dir(self)
168
174
 
169
175
  # Get master cdf filename from descriptor
170
- master_cdf_file = data_descr['template']
176
+ master_cdf_file = data_descr["template"]
171
177
 
172
178
  # Get master filepath
173
179
  master_pattern = os.path.join(master_cdf_dir, master_cdf_file)
@@ -175,67 +181,86 @@ class L0ToL1Surv(Task):
175
181
 
176
182
  # Check existence
177
183
  if not master_path:
178
- os.makedirs(os.path.join(self.output_dir, 'failed'))
179
- raise FileNotFoundError(f'{master_pattern} MASTER CDF '
180
- f'FILE NOT FOUND! [{self.job_id}]')
184
+ os.makedirs(os.path.join(self.output_dir, "failed"))
185
+ raise FileNotFoundError(
186
+ f"{master_pattern} MASTER CDF "
187
+ f"FILE NOT FOUND! [{self.job_id}]"
188
+ )
181
189
  else:
182
190
  master_path = sorted(master_path)[-1]
183
- logger.info('Producing dataset "{0}" from {1} '
184
- 'with the master CDF "{2} [{3}]"'.format(
185
- dataset_name,
186
- os.path.basename(self.l0_file.filepath),
187
- master_path,
188
- self.job_id))
191
+ logger.info(
192
+ 'Producing dataset "{0}" from {1} '
193
+ 'with the master CDF "{2} [{3}]"'.format(
194
+ dataset_name,
195
+ os.path.basename(self.l0_file.filepath),
196
+ master_path,
197
+ self.job_id,
198
+ )
199
+ )
189
200
 
190
201
  # Set CDF metadata
191
- metadata = init_cdf_global(l0.attrs, self, master_path,
192
- overwrite={'MODS': data_descr['mods']})
202
+ metadata = init_cdf_global(
203
+ l0.attrs, self, master_path, overwrite={"MODS": data_descr["mods"]}
204
+ )
193
205
 
194
206
  # Build output filepath from pipeline properties and metadata
195
- filepath = generate_filepath(self, metadata, 'cdf',
196
- is_cdag=self.is_cdag,
197
- overwrite=self.overwrite)
207
+ filepath = generate_filepath(
208
+ self,
209
+ metadata,
210
+ "cdf",
211
+ is_cdag=self.is_cdag,
212
+ overwrite=self.overwrite,
213
+ )
198
214
 
199
215
  # Get TM packet(s) required to generate HK CDF for the current
200
216
  # dataset
201
- expected_packet = data_descr['packet']
217
+ expected_packet = data_descr["packet"]
202
218
  # Check that TM packet(s) are in the input l0 data
203
- if not is_packet(expected_packet, l0['TM']):
204
- logger.info(f'No packet for "{current_dataset}" '
205
- f'found in {self.l0_file.filepath} [{self.job_id}]')
219
+ if not is_packet(expected_packet, l0["TM"]):
220
+ logger.info(
221
+ f'No packet for "{current_dataset}" '
222
+ f"found in {self.l0_file.filepath} [{self.job_id}]"
223
+ )
206
224
  # if not continue
207
225
  continue
208
226
 
209
227
  # Get function to process data
210
228
  # IMPORTANT: function alias in import should have the same name
211
- # than the dataset alias in the descriptor
212
- func = getattr(sys.modules[__name__], dataset_name)
229
+ # as the dataset alias in the descriptor
230
+ func = dataset_func.get(dataset_name)
231
+ if func is None:
232
+ logger.error(
233
+ 'No function found for dataset "{0}"'.format(dataset_name)
234
+ )
235
+ self.failed_files.append(filepath)
236
+ continue
213
237
 
214
238
  # call the function
215
239
  try:
216
240
  result = func(l0, self)
217
- except:
241
+ except Exception as e:
218
242
  # Print error message
219
- msg = (f'Running "{func}" function has failed '
220
- f'for dataset {dataset_name} [{self.job_id}]')
243
+ msg = (
244
+ f'Running "{func}" function has failed '
245
+ f"for dataset {dataset_name} [{self.job_id}]:\n{e}"
246
+ )
221
247
  logger.exception(msg)
222
248
  # creating empty output CDF to be saved into
223
249
  # failed dir
224
- cdf = CDF(filepath, master_path)
225
- cdf.readonly(False)
226
- cdf.attrs['Validate'] = '-1'
227
- cdf.attrs['TEXT_supplement_1'] = ':'.join(
228
- [msg, traceback.format_exc()])
229
- cdf.close()
250
+ with CDF(filepath, master_path) as cdf:
251
+ cdf.readonly(False)
252
+ cdf.attrs["Validate"] = "-1"
253
+ cdf.attrs["TEXT_supplement_1"] = ":".join(
254
+ [msg, traceback.format_exc()]
255
+ )
230
256
  self.failed_files.append(filepath)
231
257
  continue
232
258
 
233
259
  # open the target to update its status according to errors etc
234
260
  target = self.outputs[dataset_name]
235
261
  with target.activate():
236
-
237
262
  try:
238
- # check non empty
263
+ # check non empty data
239
264
  if result is None or result.shape[0] == 0:
240
265
  self.failed_files.append(filepath)
241
266
  raise target.TargetEmpty()
@@ -244,19 +269,38 @@ class L0ToL1Surv(Task):
244
269
  # NOTE - Temporary avoid sorting
245
270
  # for l1_surv_tnr dataset due to time issue
246
271
  # (see https://gitlab.obspm.fr/ROC/RCS/THR_CALBAR/-/issues/45)
247
- if dataset_name != 'l1_surv_tnr' and dataset_name != 'l1_surv_mamp' and dataset_name != 'l1_surv_hfr':
272
+ if (
273
+ dataset_name != "l1_surv_tnr"
274
+ and dataset_name != "l1_surv_mamp"
275
+ and dataset_name != "l1_surv_hfr"
276
+ ):
277
+ # Sorting records by increasing Epoch times
248
278
  result = order_by_increasing_time(result)
249
- elif dataset_name == 'l1_surv_hfr':
250
- result = order_by_increasing_time(result, sort_by='acquisition_time')
279
+ elif (
280
+ dataset_name == "l1_surv_tnr"
281
+ or dataset_name == "l1_surv_hfr"
282
+ ):
283
+ # Remove data points which have bad delta time value
284
+ result = result[result["epoch"] != -1.0e31]
285
+
286
+ # Sorting records by increasing acquisition_time
287
+ result = order_by_increasing_time(
288
+ result, sort_by="acquisition_time"
289
+ )
290
+
251
291
  # Make sure to have increasing sweep num
252
- i_sweep = 0
292
+ i_sweep = 1
253
293
  nrec = result.shape[0]
254
- new_sweep = np.zeros(nrec, dtype=int)
294
+ new_sweep = np.zeros(nrec, dtype=int) + 1
255
295
  for i in range(1, nrec):
256
- if result['sweep_num'][i] != result['sweep_num'][i - 1]:
296
+ if result["sweep_num"][i] != result["sweep_num"][i - 1]:
257
297
  i_sweep += 1
298
+ elif result["sweep_num"][i] == 4294967295:
299
+ new_sweep[i] = 4294967295
300
+ continue
258
301
  new_sweep[i] = i_sweep
259
- result['sweep_num'] = new_sweep
302
+
303
+ result["sweep_num"] = new_sweep
260
304
 
261
305
  # create the file for the CDF containing results
262
306
  cdf = CDF(filepath, master_path)
@@ -269,42 +313,53 @@ class L0ToL1Surv(Task):
269
313
  time_min, time_max, nrec = put_cdf_zvars(cdf, result)
270
314
 
271
315
  # Fill Generation date
272
- cdf.attrs[
273
- 'Generation_date'] = datetime.utcnow().isoformat()
316
+ cdf.attrs["Generation_date"] = datetime.utcnow().strftime(
317
+ INPUT_DATETIME_STRFTIME
318
+ )
274
319
 
275
320
  # Fill file uuid
276
- cdf.attrs['File_ID'] = str(uuid.uuid4())
321
+ cdf.attrs["File_ID"] = str(uuid.uuid4())
277
322
 
278
323
  # Fill TIME_MIN, TIME_MAX (in julian days)
279
- cdf.attrs['TIME_MIN'] = str(
280
- Time.tt2000_to_jd(time_min))
281
- cdf.attrs['TIME_MAX'] = str(
282
- Time.tt2000_to_jd(time_max))
324
+ cdf.attrs["TIME_MIN"] = (
325
+ str(self.time_instance.tt2000_to_utc(time_min)) + "Z"
326
+ ).replace(" ", "T")
327
+ cdf.attrs["TIME_MAX"] = (
328
+ str(self.time_instance.tt2000_to_utc(time_max)) + "Z"
329
+ ).replace(" ", "T")
283
330
 
284
331
  # Add SPICE SCLK kernel as an entry
285
332
  # of the "Kernels" g. attr
286
- sclk_file = get_spice_kernels(time_instance=self.time_instance,
287
- pattern='solo_ANC_soc-sclk_')
333
+ sclk_file = get_spice_kernels(
334
+ time_instance=self.time_instance,
335
+ pattern="solo_ANC_soc-sclk_",
336
+ )
288
337
  if sclk_file:
289
- cdf.attrs['SPICE_KERNELS'] = sorted(sclk_file)[-1]
338
+ cdf.attrs["SPICE_KERNELS"] = sorted(sclk_file)[-1]
290
339
  else:
291
- logger.warning('No SPICE SCLK kernel '
292
- f'saved for {filepath} [{self.job_id}]')
340
+ logger.warning(
341
+ "No SPICE SCLK kernel "
342
+ f"saved for {filepath} [{self.job_id}]"
343
+ )
293
344
 
294
345
  cdf.close()
295
346
 
296
347
  if os.path.isfile(filepath):
297
- logger.info(f'{filepath} saved')
348
+ logger.info(f"{filepath} saved [{self.job_id}]")
298
349
  self.processed_files.append(filepath)
299
350
  else:
300
- raise L1SurvProdFailure(f'{filepath} not found [{self.job_id}]')
351
+ raise L1SurvProdFailure(
352
+ f"{filepath} not found [{self.job_id}]"
353
+ )
301
354
  except NoData:
302
355
  # close cdf
303
356
  cdf.close()
304
357
  # output CDF is outside time range, remove it
305
358
  os.remove(filepath)
306
- except:
307
- logger.exception(f'{filepath} production has failed! [{self.job_id}]')
359
+ except Exception:
360
+ logger.exception(
361
+ f"{filepath} production has failed! [{self.job_id}]"
362
+ )
308
363
  self.failed_files.append(filepath)
309
364
  finally:
310
365
  target.filepath = filepath
@@ -16,103 +16,106 @@ from roc.film import DATA_VERSION, TIME_DAILY_STRFORMAT, TMRAW_PREFIX_BASENAME
16
16
  from roc.film.tools.file_helpers import get_output_dir
17
17
  from roc.film.tools import valid_data_version
18
18
 
19
- __all__ = ['MakeDailyTm']
19
+ __all__ = ["MakeDailyTm"]
20
+
20
21
 
21
22
  class MakeDailyTm(Task):
22
23
  """
23
24
  Task to write daily XML files containing RPW DDS TmRaw data.
24
25
  """
25
- plugin_name = 'roc.film'
26
- name = 'make_daily_tm'
26
+
27
+ plugin_name = "roc.film"
28
+ name = "make_daily_tm"
27
29
 
28
30
  def add_targets(self):
29
- self.add_output(identifier='daily_tm_xml', many=True,
30
- target_class=FileTarget)
31
+ self.add_output(identifier="daily_tm_xml", many=True, target_class=FileTarget)
31
32
 
32
33
  def setup_inputs(self):
33
-
34
34
  # Get data_version input keyword (can be used to force version of
35
35
  # output file)
36
36
  self.data_version = valid_data_version(
37
- self.pipeline.get('data_version', default=[DATA_VERSION])[0])
37
+ self.pipeline.get("data_version", default=[DATA_VERSION])[0]
38
+ )
38
39
 
39
40
  # Get/create list of well processed DDS files
40
41
  self.processed_files = self.pipeline.get(
41
- 'processed_files', default=[], create=True)
42
+ "processed_files", default=[], create=True
43
+ )
42
44
 
43
45
  # Get/create list of failed DDS files
44
- self.failed_files = self.pipeline.get(
45
- 'failed_files', default=[], create=True)
46
+ self.failed_files = self.pipeline.get("failed_files", default=[], create=True)
46
47
 
47
48
  # If output directory not found, create it
48
49
  self.output_dir = get_output_dir(self.pipeline)
49
50
  if not os.path.isdir(self.output_dir):
50
- logger.debug(f'Making {self.output_dir}...')
51
+ logger.debug(f"Making {self.output_dir}...")
51
52
  os.makedirs(self.output_dir)
52
53
 
53
54
  # Get packet cache and has new packet flag
54
- self.packet_cache = self.pipeline.get('packet_cache', default={})
55
- self.has_new_packet = self.pipeline.get('has_new_packet', default={})
55
+ self.packet_cache = self.pipeline.get("packet_cache", default={})
56
+ self.has_new_packet = self.pipeline.get("has_new_packet", default={})
56
57
  if not self.packet_cache or not self.has_new_packet:
57
58
  return False
58
59
 
59
60
  return True
60
61
 
61
62
  def run(self):
62
-
63
- logger.debug('Running MakeDailyTm task...')
63
+ logger.debug("Running MakeDailyTm task...")
64
64
 
65
65
  if not self.setup_inputs():
66
- logger.warning('Missing inputs for MakeDailyTm task!')
66
+ logger.warning("Missing inputs for MakeDailyTm task!")
67
67
  return
68
68
 
69
69
  # Loop over each day in the outputs
70
70
  output_files = []
71
71
  for current_category, current_dates in self.packet_cache.items():
72
72
  for current_date, packet_data in current_dates.items():
73
-
74
73
  # Check if new packets have retrieved
75
74
  # If not, then no need to write a new output file
76
75
  if not self.has_new_packet[current_category][current_date]:
77
- logger.info('No need to create new output '
78
- f'{current_category} file for {current_date}')
76
+ logger.info(
77
+ "No need to create new output "
78
+ f"{current_category} file for {current_date}"
79
+ )
79
80
  continue
80
81
 
81
82
  # define format of data version
82
- data_version = f'V{int(self.data_version):02d}'
83
+ data_version = f"V{int(self.data_version):02d}"
83
84
 
84
85
  # Build output TmRaw file basename
85
86
  packet_date_str = current_date.strftime(TIME_DAILY_STRFORMAT)
86
- file_basename = '_'.join([
87
- TMRAW_PREFIX_BASENAME + '-' + current_category,
88
- packet_date_str]
87
+ file_basename = "_".join(
88
+ [TMRAW_PREFIX_BASENAME + "-" + current_category, packet_date_str]
89
89
  )
90
90
 
91
91
  # Build full new output file basename
92
- file_basename = '_'.join(
93
- [file_basename, data_version]) + '.xml'
92
+ file_basename = "_".join([file_basename, data_version]) + ".xml"
94
93
 
95
94
  # Build output file path
96
- output_target_path = os.path.join(
97
- self.output_dir, file_basename)
95
+ output_target_path = os.path.join(self.output_dir, file_basename)
98
96
 
99
97
  # Build list of output packets
100
- output_packets = [current_packet[1]
101
- for current_packet in packet_data]
98
+ output_packets = [current_packet[1] for current_packet in packet_data]
102
99
 
103
100
  # Write output file
104
- logger.info(f'Writing {len(output_packets)} TmRaw Packet elements '
105
- f'into {output_target_path}...')
101
+ logger.info(
102
+ f"Writing {len(output_packets)} TmRaw Packet elements "
103
+ f"into {output_target_path}..."
104
+ )
106
105
  try:
107
- if make_tmraw_xml(output_packets, output_target_path,
108
- overwrite=True,
109
- logger=logger):
106
+ if make_tmraw_xml(
107
+ output_packets,
108
+ output_target_path,
109
+ overwrite=True,
110
+ logger=logger,
111
+ ):
110
112
  self.processed_files.append(output_target_path)
111
113
  output_files.append(output_target_path)
112
114
  else:
113
115
  raise FileNotFoundError
114
- except:
115
- logger.exception(f'Writing {output_target_path} has failed!')
116
+ except Exception as e:
117
+ logger.exception(f"Writing {output_target_path} has failed!")
118
+ logger.debug(e)
116
119
  self.failed_files.append(output_target_path)
117
120
 
118
- self.outputs['daily_tm_xml'].filepath = output_files
121
+ self.outputs["daily_tm_xml"].filepath = output_files