roc-film 1.13.4__py3-none-any.whl → 1.14.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (52) hide show
  1. roc/__init__.py +2 -1
  2. roc/film/__init__.py +2 -2
  3. roc/film/commands.py +372 -323
  4. roc/film/config/__init__.py +0 -1
  5. roc/film/constants.py +101 -65
  6. roc/film/descriptor.json +126 -95
  7. roc/film/exceptions.py +28 -27
  8. roc/film/tasks/__init__.py +16 -16
  9. roc/film/tasks/cat_solo_hk.py +86 -74
  10. roc/film/tasks/cdf_postpro.py +438 -309
  11. roc/film/tasks/check_dds.py +39 -45
  12. roc/film/tasks/db_to_anc_bia_sweep_table.py +381 -0
  13. roc/film/tasks/dds_to_l0.py +232 -180
  14. roc/film/tasks/export_solo_coord.py +147 -0
  15. roc/film/tasks/file_handler.py +91 -75
  16. roc/film/tasks/l0_to_hk.py +117 -103
  17. roc/film/tasks/l0_to_l1_bia_current.py +38 -30
  18. roc/film/tasks/l0_to_l1_bia_sweep.py +417 -329
  19. roc/film/tasks/l0_to_l1_sbm.py +250 -208
  20. roc/film/tasks/l0_to_l1_surv.py +185 -130
  21. roc/film/tasks/make_daily_tm.py +40 -37
  22. roc/film/tasks/merge_tcreport.py +77 -71
  23. roc/film/tasks/merge_tmraw.py +102 -89
  24. roc/film/tasks/parse_dds_xml.py +21 -20
  25. roc/film/tasks/set_l0_utc.py +51 -49
  26. roc/film/tests/cdf_compare.py +565 -0
  27. roc/film/tests/hdf5_compare.py +84 -62
  28. roc/film/tests/test_dds_to_l0.py +93 -51
  29. roc/film/tests/test_dds_to_tc.py +8 -11
  30. roc/film/tests/test_dds_to_tm.py +8 -10
  31. roc/film/tests/test_film.py +161 -116
  32. roc/film/tests/test_l0_to_hk.py +64 -36
  33. roc/film/tests/test_l0_to_l1_bia.py +10 -14
  34. roc/film/tests/test_l0_to_l1_sbm.py +14 -19
  35. roc/film/tests/test_l0_to_l1_surv.py +68 -41
  36. roc/film/tests/test_metadata.py +21 -20
  37. roc/film/tests/tests.py +743 -396
  38. roc/film/tools/__init__.py +5 -5
  39. roc/film/tools/dataset_tasks.py +34 -2
  40. roc/film/tools/file_helpers.py +390 -269
  41. roc/film/tools/l0.py +402 -324
  42. roc/film/tools/metadata.py +147 -127
  43. roc/film/tools/skeleton.py +12 -17
  44. roc/film/tools/tools.py +109 -92
  45. roc/film/tools/xlsx2skt.py +161 -139
  46. {roc_film-1.13.4.dist-info → roc_film-1.14.0.dist-info}/LICENSE +127 -125
  47. roc_film-1.14.0.dist-info/METADATA +60 -0
  48. roc_film-1.14.0.dist-info/RECORD +50 -0
  49. {roc_film-1.13.4.dist-info → roc_film-1.14.0.dist-info}/WHEEL +1 -1
  50. roc/film/tasks/l0_to_anc_bia_sweep_table.py +0 -348
  51. roc_film-1.13.4.dist-info/METADATA +0 -120
  52. roc_film-1.13.4.dist-info/RECORD +0 -48
@@ -12,36 +12,41 @@ import uuid
12
12
  import numpy as np
13
13
  import h5py
14
14
 
15
-
16
15
  from poppy.core.logger import logger
17
16
  from poppy.core.task import Task
18
17
  from poppy.core.target import FileTarget
19
18
 
20
19
  from roc.rpl.time import Time
21
20
 
22
- from roc.film.tools.file_helpers import put_cdf_global, \
23
- get_master_cdf_dir, get_output_dir, is_output_dir
21
+ from roc.film.tools.file_helpers import (
22
+ put_cdf_global,
23
+ get_master_cdf_dir,
24
+ get_output_dir,
25
+ is_output_dir,
26
+ )
24
27
  from roc.film.tools.metadata import init_cdf_global, get_spice_kernels
25
28
  from roc.film.tools.file_helpers import generate_filepath, is_packet, get_l0_file
29
+ from roc.film.constants import INPUT_DATETIME_STRFTIME
26
30
 
27
- __all__ = ['L0ToHk']
28
-
29
- # Generate SOLO RPW HK daily CDF files
30
31
  from roc.film.exceptions import LoadDataSetError
31
-
32
32
  from roc.film.tools.tools import get_datasets, sort_cdf_by_epoch
33
33
 
34
+ __all__ = ["L0ToHk"]
35
+
36
+
37
+ # Generate SOLO RPW HK daily CDF files
38
+
34
39
 
35
- @FileTarget.input(identifier='l0_file', filepath=get_l0_file)
36
- @FileTarget.output(identifier='hk_dbs')
37
- @FileTarget.output(identifier='hk_das')
38
- @FileTarget.output(identifier='hk_das_stat')
39
- @FileTarget.output(identifier='hk_tds')
40
- @FileTarget.output(identifier='hk_lfr')
41
- @FileTarget.output(identifier='hk_thr')
42
- @FileTarget.output(identifier='hk_pdu')
43
- @FileTarget.output(identifier='hk_bia')
44
- @Task.as_task(plugin_name='roc.film', name='l0_to_hk')
40
+ @FileTarget.input(identifier="l0_file", filepath=get_l0_file)
41
+ @FileTarget.output(identifier="hk_dbs")
42
+ @FileTarget.output(identifier="hk_das")
43
+ @FileTarget.output(identifier="hk_das_stat")
44
+ @FileTarget.output(identifier="hk_tds")
45
+ @FileTarget.output(identifier="hk_lfr")
46
+ @FileTarget.output(identifier="hk_thr")
47
+ @FileTarget.output(identifier="hk_pdu")
48
+ @FileTarget.output(identifier="hk_bia")
49
+ @Task.as_task(plugin_name="roc.film", name="l0_to_hk")
45
50
  def L0ToHk(task):
46
51
  """
47
52
  Task to write RPW HK "digest" survey data CDF files from an input L0 file.
@@ -52,122 +57,127 @@ def L0ToHk(task):
52
57
 
53
58
  # Define task job ID (long and short)
54
59
  job_uuid = str(uuid.uuid4())
55
- job_id = f'L0ToHk-{job_uuid[:8]}'
56
- logger.info(f'Task {job_id} is starting')
60
+ job_id = f"L0ToHk-{job_uuid[:8]}"
61
+ logger.info(f"Task {job_id} is starting")
57
62
 
58
63
  try:
59
64
  # Get products directory (folder where final output files will be moved)
60
- products_dir = task.pipeline.get('products_dir',
61
- default=[None], args=True)[0]
65
+ products_dir = task.pipeline.get("products_dir", default=[None], args=True)[0]
62
66
 
63
67
  # Get output dir
64
68
  output_dir = get_output_dir(task.pipeline)
65
- if not is_output_dir(output_dir,
66
- products_dir=products_dir):
67
- logger.info(f'Making {output_dir}')
69
+ if not is_output_dir(output_dir, products_dir=products_dir):
70
+ logger.info(f"Making {output_dir}")
68
71
  os.makedirs(output_dir)
69
72
  else:
70
- logger.debug(f'Output files will be '
71
- f'saved into folder {output_dir}')
72
-
73
+ logger.debug(f"Output files will be saved into folder {output_dir}")
73
74
 
74
75
  # Get or create failed_files list from pipeline properties
75
- failed_files = task.pipeline.get('failed_files', default=[], create=True)
76
+ failed_files = task.pipeline.get("failed_files", default=[], create=True)
76
77
 
77
78
  # Get or create processed_files list from pipeline properties
78
- processed_files = task.pipeline.get(
79
- 'processed_files', default=[], create=True)
79
+ processed_files = task.pipeline.get("processed_files", default=[], create=True)
80
80
 
81
81
  # Get overwrite argument
82
- overwrite = task.pipeline.get('overwrite', default=False, args=True)
82
+ overwrite = task.pipeline.get("overwrite", default=False, args=True)
83
83
 
84
84
  # get the input l0_file
85
- l0_file = task.inputs['l0_file']
85
+ l0_file = task.inputs["l0_file"]
86
86
 
87
87
  # Get (optional) arguments for SPICE
88
- predictive = task.pipeline.get('predictive', default=False, args=True)
89
- kernel_date = task.pipeline.get('kernel_date', default=None, args=True)
90
- no_spice = task.pipeline.get('no_spice', default=False, args=True)
88
+ predictive = task.pipeline.get("predictive", default=False, args=True)
89
+ kernel_date = task.pipeline.get("kernel_date", default=None, args=True)
90
+ no_spice = task.pipeline.get("no_spice", default=False, args=True)
91
91
 
92
92
  # Get/create Time singleton
93
- time_instance = Time(predictive=predictive,
94
- kernel_date=kernel_date,
95
- no_spice=no_spice)
96
- except:
97
- logger.exception(
98
- f'Initializing inputs has failed for {job_id}!')
93
+ time_instance = Time(
94
+ predictive=predictive, kernel_date=kernel_date, no_spice=no_spice
95
+ )
96
+ except Exception:
97
+ logger.exception(f"Initializing inputs has failed for {job_id}!")
99
98
  try:
100
- os.makedirs(os.path.join(output_dir, 'failed'))
101
- except:
102
- logger.error(f'output_dir argument is not defined for {job_id}!')
99
+ os.makedirs(os.path.join(output_dir, "failed"))
100
+ except Exception:
101
+ logger.error(f"output_dir argument is not defined for {job_id}!")
103
102
  task.pipeline.exit()
104
103
  return
105
104
 
106
105
  # Retrieve list of output datasets to produce for the given task
107
106
  try:
108
107
  dataset_list = get_datasets(task, task.name)
109
- except:
110
- raise LoadDataSetError(f'Cannot load the list of datasets to produce for {task.name} [{job_id}]')
108
+ except Exception:
109
+ raise LoadDataSetError(
110
+ f"Cannot load the list of datasets to produce for {task.name} [{job_id}]"
111
+ )
111
112
  else:
112
- logger.debug(f'Produce HK CDF file(s) for the following dataset(s): {[ds["name"] for ds in dataset_list]} [{job_id}]')
113
+ logger.debug(
114
+ f"Produce HK CDF file(s) for the following dataset(s): {[ds['name'] for ds in dataset_list]} [{job_id}]"
115
+ )
113
116
 
114
117
  # open the HDF5 file to extract information
115
- with h5py.File(l0_file.filepath, 'r') as l0:
116
-
118
+ with h5py.File(l0_file.filepath, "r") as l0:
117
119
  # Loops over each output dataset to produce for the current task
118
120
  for current_dataset in dataset_list:
119
- dataset_name = current_dataset['name']
120
- data_descr = current_dataset['descr']
121
- data_version = current_dataset['version']
121
+ dataset_name = current_dataset["name"]
122
+ data_descr = current_dataset["descr"]
123
+ # data_version = current_dataset['version']
122
124
 
123
125
  filepath = None
124
126
  try:
125
127
  # Display
126
- logger.info('Processing HK dataset {0} [{1}]'.format(dataset_name, job_id))
128
+ logger.info(
129
+ "Processing HK dataset {0} [{1}]".format(dataset_name, job_id)
130
+ )
127
131
 
128
132
  # get the path to the master CDF file of this dataset
129
133
  master_cdf_dir = get_master_cdf_dir(task)
130
134
  # Get master cdf filename from descriptor
131
- master_cdf_file = data_descr['template']
135
+ master_cdf_file = data_descr["template"]
132
136
  # Build master file pattern
133
- master_pattern = osp.join(master_cdf_dir,
134
- master_cdf_file)
137
+ master_pattern = osp.join(master_cdf_dir, master_cdf_file)
135
138
  # Get master filepath
136
139
  master_path = glob.glob(master_pattern)
137
140
 
138
141
  # Check existence
139
142
  if not master_path:
140
- os.makedirs(os.path.join(output_dir, 'failed'))
141
- raise FileNotFoundError('{0} MASTER CDF '
142
- 'FILE NOT FOUND! [{1}]'.format(master_pattern, job_id))
143
+ os.makedirs(os.path.join(output_dir, "failed"))
144
+ raise FileNotFoundError(
145
+ "{0} MASTER CDF FILE NOT FOUND! [{1}]".format(
146
+ master_pattern, job_id
147
+ )
148
+ )
143
149
  else:
144
150
  master_path = sorted(master_path)[-1]
145
- logger.debug('Use {0} as a master CDF [{1}]'.format(master_path, job_id))
151
+ logger.debug(
152
+ "Use {0} as a master CDF [{1}]".format(master_path, job_id)
153
+ )
146
154
 
147
155
  # Set CDF metadata
148
156
  metadata = init_cdf_global(l0.attrs, task, master_path)
149
157
 
150
158
  # Build output filepath from pipeline properties and L0 metadata
151
159
  # Generate output filepath
152
- filepath = generate_filepath(task, metadata, '.cdf',
153
- overwrite=overwrite)
160
+ filepath = generate_filepath(
161
+ task, metadata, ".cdf", overwrite=overwrite
162
+ )
154
163
 
155
164
  # Get TM packet(s) required to generate HK CDF for the current
156
165
  # dataset
157
- packet = data_descr['packet']
166
+ packet = data_descr["packet"]
158
167
  # Check that TM packet(s) are in the input l0 data
159
- if is_packet(packet, l0['TM']):
160
- data = l0['TM'][packet[0]]
168
+ if is_packet(packet, l0["TM"]):
169
+ data = l0["TM"][packet[0]]
161
170
  else:
162
171
  # if not continue
163
- logger.info(f'No packet found in {l0_file.filepath} '
164
- f'for dataset {dataset_name} [{job_id}]')
172
+ logger.info(
173
+ f"No packet found in {l0_file.filepath} "
174
+ f"for dataset {dataset_name} [{job_id}]"
175
+ )
165
176
  continue
166
177
 
167
178
  # open the target to update its status according to errors etc
168
179
  target = task.outputs[dataset_name]
169
180
  with target.activate():
170
-
171
181
  # if output dir not exists, create it
172
182
  if not os.path.isdir(task.pipeline.output):
173
183
  os.makedirs(task.pipeline.output)
@@ -179,76 +189,80 @@ def L0ToHk(task):
179
189
  put_cdf_global(cdf, metadata)
180
190
 
181
191
  # get the time from the header
182
- time = data['data_field_header']['time']
192
+ time = data["data_field_header"]["time"]
183
193
  # convert time to CDF_TIME_TT2000
184
- tt2000 = time_instance.obt_to_utc(time,
185
- to_tt2000=True)
194
+ tt2000 = time_instance.obt_to_utc(time, to_tt2000=True)
186
195
  # set time zVars into the CDF
187
- cdf['ACQUISITION_TIME'] = time[:, :2]
188
- cdf['Epoch'] = tt2000
189
- cdf['SCET'] = Time.cuc_to_scet(time[:, :2])
190
- cdf['SYNCHRO_FLAG'] = time[:, 2]
196
+ cdf["ACQUISITION_TIME"] = time[:, :2]
197
+ cdf["Epoch"] = tt2000
198
+ cdf["SCET"] = Time.cuc_to_scet(time[:, :2])
199
+ cdf["SYNCHRO_FLAG"] = time[:, 2]
191
200
 
192
201
  # copy data from file into memory
193
202
  # TODO - Add conversion of HK parameters into engineering
194
203
  # values
195
204
  parameters = {}
196
- for parameter, values in data['source_data'].items():
205
+ for parameter, values in data["source_data"].items():
197
206
  parameters[parameter] = values[...]
198
207
  # loop over parameters and add them to the CDF file
199
208
  for parameter, values in parameters.items():
200
209
  cdf[parameter] = values
201
- cdf[parameter].attrs['SCALEMIN'] = np.min(values)
202
- cdf[parameter].attrs['SCALEMAX'] = np.max(values)
210
+ cdf[parameter].attrs["SCALEMIN"] = np.min(values)
211
+ cdf[parameter].attrs["SCALEMAX"] = np.max(values)
203
212
 
204
213
  # TODO - Improve this part (add a check of requested attributes?)
205
214
  # Generation date
206
- cdf.attrs['Generation_date'] = datetime.utcnow().isoformat()
215
+ cdf.attrs["Generation_date"] = datetime.utcnow().strftime(
216
+ INPUT_DATETIME_STRFTIME
217
+ )
207
218
 
208
219
  # file uuid
209
- cdf.attrs['File_ID'] = str(uuid.uuid4())
220
+ cdf.attrs["File_ID"] = str(uuid.uuid4())
210
221
 
211
- cdf.attrs['TIME_MIN'] = str(
212
- Time.tt2000_to_jd(min(tt2000)))
213
- cdf.attrs['TIME_MAX'] = str(
214
- Time.tt2000_to_jd(max(tt2000)))
222
+ cdf.attrs["TIME_MIN"] = (
223
+ str(time_instance.tt2000_to_utc(min(tt2000))) + "Z"
224
+ ).replace(" ", "T")
225
+ cdf.attrs["TIME_MAX"] = (
226
+ str(time_instance.tt2000_to_utc(max(tt2000))) + "Z"
227
+ ).replace(" ", "T")
215
228
 
216
229
  # Add SPICE SCLK kernel as an entry
217
230
  # of the "Kernels" g. attr
218
- sclk_file = get_spice_kernels(time_instance=time_instance,
219
- pattern='solo_ANC_soc-sclk')
231
+ sclk_file = get_spice_kernels(
232
+ time_instance=time_instance, pattern="solo_ANC_soc-sclk"
233
+ )
220
234
 
221
235
  if sclk_file:
222
- cdf.attrs['SPICE_KERNELS'] = sclk_file[-1]
236
+ cdf.attrs["SPICE_KERNELS"] = sclk_file[-1]
223
237
  else:
224
- logger.warning('No SPICE SCLK kernel '
225
- f'saved for {filepath} [{job_id}]')
238
+ logger.warning(
239
+ f"No SPICE SCLK kernel saved for {filepath} [{job_id}]"
240
+ )
226
241
 
227
242
  cdf.close()
228
243
 
229
244
  if os.path.isfile(filepath):
230
245
  # Sort by ascending Epoch time
231
- logger.debug(f'Sorting by ascending Epoch times [{job_id}]')
246
+ logger.debug(f"Sorting by ascending Epoch times [{job_id}]")
232
247
  # Build list of Zvar to sort
233
248
  zvar_list = list(parameters.keys())
234
- zvar_list.extend(['Epoch',
235
- 'ACQUISITION_TIME',
236
- 'SCET',
237
- 'SYNCHRO_FLAG'])
249
+ zvar_list.extend(
250
+ ["Epoch", "ACQUISITION_TIME", "SCET", "SYNCHRO_FLAG"]
251
+ )
238
252
  cdf = CDF(filepath)
239
253
  cdf.readonly(False)
240
254
  cdf = sort_cdf_by_epoch(cdf, zvar_list=zvar_list)
241
255
  cdf.close()
242
256
 
243
- logger.info(
244
- f'{filepath} saved [{job_id}]'
245
- )
257
+ logger.info(f"{filepath} saved [{job_id}]")
246
258
  else:
247
- raise FileNotFoundError(f'{filepath} not found! [{job_id}]')
259
+ raise FileNotFoundError(f"{filepath} not found! [{job_id}]")
248
260
 
249
- except:
250
- logger.exception(f'Production for dataset {dataset_name} '
251
- f'from {l0_file.filepath} has failed! [{job_id}]')
261
+ except Exception:
262
+ logger.exception(
263
+ f"Production for dataset {dataset_name} "
264
+ f"from {l0_file.filepath} has failed! [{job_id}]"
265
+ )
252
266
  if filepath:
253
267
  failed_files.append(filepath)
254
268
  else:
@@ -260,6 +274,6 @@ def L0ToHk(task):
260
274
 
261
275
 
262
276
  # TODO - Add task to generate SOLO RPW HK dump CDF files
263
- #L0ToHkDumpTask = Plugin.manager[PLUGIN].task("l0_to_hk_dump")
264
- #@L0ToHkDumpTask.as_task
277
+ # L0ToHkDumpTask = Plugin.manager[PLUGIN].task("l0_to_hk_dump")
278
+ # @L0ToHkDumpTask.as_task
265
279
  # def l0_to_hk_dump(task):
@@ -10,13 +10,19 @@ from poppy.core.target import FileTarget
10
10
  from poppy.core.task import Task
11
11
  from roc.rpl import Time
12
12
 
13
- from roc.film.tools.file_helpers import get_l0_files, get_output_dir, is_output_dir, l0_to_trange_cdf
13
+ from roc.film.tools.file_helpers import (
14
+ get_l0_files,
15
+ get_output_dir,
16
+ is_output_dir,
17
+ l0_to_trange_cdf,
18
+ )
14
19
 
15
- __all__ = ['L0ToL1BiaCurrent']
20
+ __all__ = ["L0ToL1BiaCurrent"]
16
21
 
17
- @FileTarget.input(identifier='l0_file', many=True, filepath=get_l0_files)
18
- @FileTarget.output(identifier='l1_bia_current')
19
- @Task.as_task(plugin_name='roc.film', name='l0_to_l1_bia_current')
22
+
23
+ @FileTarget.input(identifier="l0_file", many=True, filepath=get_l0_files)
24
+ @FileTarget.output(identifier="l1_bia_current")
25
+ @Task.as_task(plugin_name="roc.film", name="l0_to_l1_bia_current")
20
26
  def L0ToL1BiaCurrent(task):
21
27
  """
22
28
  Task to generate l1 bias current CDF from l0 file(s)
@@ -26,47 +32,49 @@ def L0ToL1BiaCurrent(task):
26
32
  """
27
33
 
28
34
  # Get list of input l0 file(s)
29
- l0_file_list = task.inputs['l0_file'].filepath
35
+ l0_file_list = task.inputs["l0_file"].filepath
30
36
 
31
37
  # Get --monthly keyword
32
- monthly = task.pipeline.get('monthly', default=False, args=True)
38
+ monthly = task.pipeline.get("monthly", default=False, args=True)
33
39
 
34
40
  # Get overwrite argument
35
- overwrite = task.pipeline.get('overwrite', default=False, args=True)
41
+ overwrite = task.pipeline.get("overwrite", default=False, args=True)
36
42
 
37
43
  # Get cdag keyword
38
- is_cdag = task.pipeline.get('cdag', default=False, args=True)
44
+ is_cdag = task.pipeline.get("cdag", default=False, args=True)
39
45
 
40
46
  # Get (optional) arguments for SPICE
41
- predictive = task.pipeline.get('predictive', default=False, args=True)
42
- kernel_date = task.pipeline.get('kernel_date', default=None, args=True)
43
- no_spice = task.pipeline.get('no_spice', default=False, args=True)
47
+ predictive = task.pipeline.get("predictive", default=False, args=True)
48
+ kernel_date = task.pipeline.get("kernel_date", default=None, args=True)
49
+ no_spice = task.pipeline.get("no_spice", default=False, args=True)
44
50
 
45
51
  # Get/create Time singleton
46
- Time(predictive=predictive,
47
- kernel_date=kernel_date,
48
- no_spice=no_spice)
52
+ time_instance = Time(
53
+ predictive=predictive, kernel_date=kernel_date, no_spice=no_spice
54
+ )
49
55
 
50
56
  # Get products directory (folder where final output files will be moved)
51
- products_dir = task.pipeline.get('products_dir',
52
- default=[None], args=True)[0]
57
+ products_dir = task.pipeline.get("products_dir", default=[None], args=True)[0]
53
58
 
54
59
  # Get output dir
55
60
  output_dir = get_output_dir(task.pipeline)
56
- if not is_output_dir(output_dir,
57
- products_dir=products_dir):
58
- logger.info(f'Making {output_dir}')
61
+ if not is_output_dir(output_dir, products_dir=products_dir):
62
+ logger.info(f"Making {output_dir}")
59
63
  os.makedirs(output_dir)
60
64
  else:
61
- logger.debug(f'Output files will be '
62
- f'saved into folder {output_dir}')
65
+ logger.debug(f"Output files will be saved into folder {output_dir}")
63
66
 
64
67
  try:
65
- l0_to_trange_cdf(task, 'l0_to_l1_bia_current',
66
- l0_file_list, output_dir,
67
- monthly=monthly,
68
- unique=True,
69
- is_cdag=is_cdag,
70
- overwrite=overwrite)
71
- except:
72
- logger.exception('L1 Bias current CDF production has failed!')
68
+ l0_to_trange_cdf(
69
+ task,
70
+ "l0_to_l1_bia_current",
71
+ l0_file_list,
72
+ output_dir,
73
+ time_instance=time_instance,
74
+ monthly=monthly,
75
+ unique=True,
76
+ is_cdag=is_cdag,
77
+ overwrite=overwrite,
78
+ )
79
+ except Exception as e:
80
+ logger.exception(f"L1 Bias current CDF production has failed:\n{e}")