roc-film 1.14.0__tar.gz → 1.14.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. {roc_film-1.14.0 → roc_film-1.14.1}/PKG-INFO +1 -1
  2. {roc_film-1.14.0 → roc_film-1.14.1}/pyproject.toml +2 -2
  3. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/descriptor.json +3 -3
  4. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/file_handler.py +62 -24
  5. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/l0_to_hk.py +1 -1
  6. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/l0_to_l1_bia_current.py +6 -0
  7. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tools/file_helpers.py +35 -25
  8. {roc_film-1.14.0 → roc_film-1.14.1}/LICENSE +0 -0
  9. {roc_film-1.14.0 → roc_film-1.14.1}/README.md +0 -0
  10. {roc_film-1.14.0 → roc_film-1.14.1}/roc/__init__.py +0 -0
  11. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/__init__.py +0 -0
  12. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/commands.py +0 -0
  13. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/config/__init__.py +0 -0
  14. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/constants.py +0 -0
  15. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/exceptions.py +0 -0
  16. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/__init__.py +0 -0
  17. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/cat_solo_hk.py +0 -0
  18. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/cdf_postpro.py +0 -0
  19. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/check_dds.py +0 -0
  20. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/db_to_anc_bia_sweep_table.py +0 -0
  21. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/dds_to_l0.py +0 -0
  22. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/export_solo_coord.py +0 -0
  23. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/l0_to_l1_bia_sweep.py +0 -0
  24. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/l0_to_l1_sbm.py +0 -0
  25. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/l0_to_l1_surv.py +0 -0
  26. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/make_daily_tm.py +0 -0
  27. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/merge_tcreport.py +0 -0
  28. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/merge_tmraw.py +0 -0
  29. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/parse_dds_xml.py +0 -0
  30. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tasks/set_l0_utc.py +0 -0
  31. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tests/__init__.py +0 -0
  32. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tests/cdf_compare.py +0 -0
  33. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tests/hdf5_compare.py +0 -0
  34. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tests/test_dds_to_l0.py +0 -0
  35. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tests/test_dds_to_tc.py +0 -0
  36. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tests/test_dds_to_tm.py +0 -0
  37. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tests/test_film.py +0 -0
  38. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tests/test_l0_to_hk.py +0 -0
  39. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tests/test_l0_to_l1_bia.py +0 -0
  40. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tests/test_l0_to_l1_sbm.py +0 -0
  41. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tests/test_l0_to_l1_surv.py +0 -0
  42. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tests/test_metadata.py +0 -0
  43. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tests/tests.py +0 -0
  44. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tools/__init__.py +0 -0
  45. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tools/dataset_tasks.py +0 -0
  46. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tools/l0.py +0 -0
  47. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tools/metadata.py +0 -0
  48. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tools/skeleton.py +0 -0
  49. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tools/tools.py +0 -0
  50. {roc_film-1.14.0 → roc_film-1.14.1}/roc/film/tools/xlsx2skt.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: roc-film
3
- Version: 1.14.0
3
+ Version: 1.14.1
4
4
  Summary: RPW FILe Maker (FILM): Plugin to make RPW L0, L1 and HK data files
5
5
  License: CeCILL
6
6
  Author: Xavier Bonnin
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "roc-film"
7
- version = "1.14.0"
7
+ version = "1.14.1"
8
8
  readme = "README.md"
9
9
  license = "CeCILL"
10
10
  repository = "https://gitlab.obspm.fr/ROC/Pipelines/Plugins/FILM"
@@ -26,7 +26,7 @@ xmltodict = ">=0.12,<1.0"
26
26
  poppy-core = ">0.12.0"
27
27
  poppy-pop = ">0.12.0"
28
28
 
29
- [tool.poetry.dev-dependencies]
29
+ [tool.poetry.group.dev.dependencies]
30
30
  pytest = "<9"
31
31
  pytest-cov = "<6"
32
32
  pytest-timeout = "<3"
@@ -6,10 +6,10 @@
6
6
  "description": "RPW FILe Maker (FILM): Plugin to make RPW L0, L1 and HK data files"
7
7
  },
8
8
  "release": {
9
- "version": "1.14.0",
10
- "date": "2024-12-06",
9
+ "version": "1.14.1",
10
+ "date": "2025-01-29",
11
11
  "author": "Xavier Bonnin <xavier.bonnin@obspm.fr>, ROC Team <roc.support@sympa.obspm.fr>",
12
- "modification": "Update python dev deps and README",
12
+ "modification": "Hotfix in file_helper + minor updates",
13
13
  "url": "https://gitlab.obspm.fr/ROC/Pipelines/Plugins/FILM"
14
14
  },
15
15
  "tasks": [
@@ -31,10 +31,16 @@ class MoveToProdDir(Task):
31
31
  # the pipeline is still working on it
32
32
  # Add a LockFile class instance to the Task class in Poppy ?
33
33
 
34
+ self.job_uuid = str(uuid.uuid4())
35
+ self.job_id = self.job_uuid[:8]
36
+ logger.debug(f"Task {self.job_id} is starting")
37
+
34
38
  # See if --no-move keyword is defined
35
39
  no_move = self.pipeline.get("no_move", default=False, args=True)
36
40
  if no_move:
37
- logger.info('Skip current task "move_to_products_dir": --no-move is True')
41
+ logger.info(
42
+ f'--no-move is passed: skip current task "move_to_products_dir"\t[{self.job_id}]'
43
+ )
38
44
  return
39
45
 
40
46
  # Retrieve pipeline output file directory
@@ -49,7 +55,7 @@ class MoveToProdDir(Task):
49
55
 
50
56
  if not products_dir:
51
57
  logger.info(
52
- 'Skip current task "move_to_products_dir": products_dir argument not defined'
58
+ f'products_dir argument not defined: Skip current task "move_to_products_dir"\t[{self.job_id}]'
53
59
  )
54
60
  else:
55
61
  output_dirbasename = os.path.basename(output_dir)
@@ -58,6 +64,8 @@ class MoveToProdDir(Task):
58
64
  if safe_move(output_dir, target_dir, ignore_patterns=ignore_patterns):
59
65
  logger.info(f"{output_dir} moved into {products_dir}")
60
66
 
67
+ logger.debug(f"Task {self.job_id} completed")
68
+
61
69
 
62
70
  class MoveFailedFiles(Task):
63
71
  """Move any failed files found
@@ -81,7 +89,7 @@ class MoveFailedFiles(Task):
81
89
  # Define task job ID (long and short)
82
90
  self.job_uuid = str(uuid.uuid4())
83
91
  self.job_id = self.job_uuid[:8]
84
- logger.info(f"Task {self.job_id} is starting")
92
+ logger.debug(f"Task {self.job_id} is starting")
85
93
  try:
86
94
  self.setup_inputs()
87
95
  except Exception:
@@ -90,7 +98,7 @@ class MoveFailedFiles(Task):
90
98
  return
91
99
 
92
100
  if self.failed_file_count == 0:
93
- logger.debug("No failed file(s) to move")
101
+ logger.debug("No failed file(s) to move\t[{self.job_id}")
94
102
  else:
95
103
  # Loop over failed files list
96
104
  for i, failed_file in enumerate(self.failed_file_list):
@@ -113,7 +121,11 @@ class MoveFailedFiles(Task):
113
121
  # perform a safe move (i.e., copy, check and delete) into
114
122
  # failed dir
115
123
  if safe_move(failed_file, target_filepath):
116
- logger.info(f"{failed_file} moved into {failed_dir}")
124
+ logger.warning(
125
+ f"{failed_file} moved into {failed_dir}\t[{self.job_id}]"
126
+ )
127
+
128
+ logger.debug(f"Task {self.job_id} completed")
117
129
 
118
130
 
119
131
  class CopyProcessedDds(Task):
@@ -125,6 +137,11 @@ class CopyProcessedDds(Task):
125
137
  name = "copy_processed_dds"
126
138
 
127
139
  def run(self):
140
+ # Define task job ID (long and short)
141
+ self.job_uuid = str(uuid.uuid4())
142
+ self.job_id = self.job_uuid[:8]
143
+ logger.debug(f"Task {self.job_id} is starting")
144
+
128
145
  # Get processed file target directory
129
146
  processed_dir = self.pipeline.get(
130
147
  "processed_dds_dir", default=[None], args=True
@@ -133,14 +150,14 @@ class CopyProcessedDds(Task):
133
150
  # skip task if processed_dir is None
134
151
  if processed_dir is None:
135
152
  logger.info(
136
- "Skip task copy_processed_dds: No processed_dds_dir argument defined"
153
+ f"No processed_dds_dir argument defined: skip task copy_processed_dds\t[{self.job_id}]"
137
154
  )
138
155
  return
139
156
  elif not os.path.isdir(processed_dir):
140
- logger.debug(f"Creating {processed_dir}...")
157
+ logger.debug(f"Creating {processed_dir}...\t[{self.job_id}]")
141
158
  os.makedirs(processed_dir)
142
159
  else:
143
- logger.debug(f"process_dir set to {processed_dir}")
160
+ logger.debug(f"process_dir set to {processed_dir}\t[{self.job_id}]")
144
161
 
145
162
  # If processed_files list not defined in the pipeline properties,
146
163
  # initialize it
@@ -148,7 +165,9 @@ class CopyProcessedDds(Task):
148
165
  processed_files_count = len(processed_file_list)
149
166
  # Skip task if no processed files
150
167
  if processed_files_count == 0:
151
- logger.info("Skip task copy_processed_dds: No processed file to move")
168
+ logger.info(
169
+ f"No processed file to move: skip task copy_processed_dds\t[{self.job_id}]"
170
+ )
152
171
  return
153
172
 
154
173
  # Get clear-dds keyword
@@ -161,7 +180,9 @@ class CopyProcessedDds(Task):
161
180
  for processed_file in processed_file_list.copy():
162
181
  # Check first that processed file is not in failed list
163
182
  if processed_file in failed_file_list:
164
- logger.warning(f"{processed_file} found in the failed file list!")
183
+ logger.warning(
184
+ f"{processed_file} found in the failed file list!\t[{self.job_id}]"
185
+ )
165
186
  continue
166
187
 
167
188
  # Build target filepath
@@ -169,16 +190,20 @@ class CopyProcessedDds(Task):
169
190
  target_filepath = os.path.join(processed_dir, basename)
170
191
 
171
192
  # copy file
172
- logger.debug(f"Copying {processed_file} into {processed_dir}")
193
+ logger.debug(
194
+ f"Copying {processed_file} into {processed_dir}\t[{self.job_id}]"
195
+ )
173
196
  try:
174
197
  shutil.copyfile(processed_file, target_filepath)
175
198
  except Exception as e:
176
199
  logger.exception(
177
- f"Copying {processed_file} into {processed_dir} has failed!"
200
+ f"Copying {processed_file} into {processed_dir} has failed!\t[{self.job_id}]"
178
201
  )
179
202
  logger.debug(e)
180
203
  else:
181
- logger.info(f"{processed_file} copied into {target_filepath}")
204
+ logger.info(
205
+ f"{processed_file} copied into {target_filepath}\t[{self.job_id}]"
206
+ )
182
207
 
183
208
  # Remove current file from the list in pipeline properties
184
209
  processed_file_list.remove(processed_file)
@@ -186,7 +211,7 @@ class CopyProcessedDds(Task):
186
211
  # if clear-dds keyword is passed, then remove processed Dds
187
212
  if clear_dds:
188
213
  os.remove(processed_file)
189
- logger.debug(f"{processed_file} deleted")
214
+ logger.debug(f"{processed_file} deleted\t[{self.job_id}]")
190
215
 
191
216
 
192
217
  class CopyFailedDds(Task):
@@ -198,17 +223,24 @@ class CopyFailedDds(Task):
198
223
  name = "copy_failed_dds"
199
224
 
200
225
  def run(self):
226
+ # Define task job ID (long and short)
227
+ self.job_uuid = str(uuid.uuid4())
228
+ self.job_id = self.job_uuid[:8]
229
+ logger.debug(f"Task {self.job_id} is starting")
230
+
201
231
  # Get failed file target directory
202
232
  failed_dir = self.pipeline.get("failed_dds_dir", default=[None], args=True)[0]
203
233
  # skip task if failed_dir is None
204
234
  if failed_dir is None:
205
- logger.info("Skip task copy_failed_dds: No failed_dds_dir argument defined")
235
+ logger.info(
236
+ f"No failed_dds_dir argument defined: skip task copy_failed_dds\t[{self.job_id}]"
237
+ )
206
238
  return
207
239
  elif not os.path.isdir(failed_dir):
208
- logger.debug(f"Creating {failed_dir}...")
240
+ logger.debug(f"Creating {failed_dir}...\t[{self.job_id}]")
209
241
  os.makedirs(failed_dir)
210
242
  else:
211
- logger.debug(f"failed_dir set to {failed_dir}")
243
+ logger.debug(f"failed_dir set to {failed_dir}\t[{self.job_id}]")
212
244
 
213
245
  # If failed_files list not defined in the pipeline properties,
214
246
  # initialize it
@@ -216,7 +248,9 @@ class CopyFailedDds(Task):
216
248
  failed_files_count = len(failed_file_list)
217
249
  # Skip task if no failed dds files
218
250
  if failed_files_count == 0:
219
- logger.info("Skip task copy_failed_dds: No failed file to move")
251
+ logger.info(
252
+ f"No failed file to move: skip task copy_failed_dds\t[{self.job_id}]"
253
+ )
220
254
  return
221
255
 
222
256
  # Get clear-dds keyword
@@ -229,14 +263,18 @@ class CopyFailedDds(Task):
229
263
  target_filepath = os.path.join(failed_dir, basename)
230
264
 
231
265
  # copy file
232
- logger.debug(f"Copying {failed_file} into {failed_dir}")
266
+ logger.debug(f"Copying {failed_file} into {failed_dir}\t[{self.job_id}]")
233
267
  try:
234
268
  shutil.copyfile(failed_file, target_filepath)
235
269
  except Exception as e:
236
- logger.exception(f"Copying {failed_file} into {failed_dir} has failed!")
270
+ logger.exception(
271
+ f"Copying {failed_file} into {failed_dir} has failed!\t[{self.job_id}]"
272
+ )
237
273
  logger.debug(e)
238
274
  else:
239
- logger.info(f"{failed_file} copied into {target_filepath}")
275
+ logger.info(
276
+ f"{failed_file} copied into {target_filepath}\t[{self.job_id}]"
277
+ )
240
278
 
241
279
  # Remove current file from the list in pipeline properties
242
280
  failed_file_list.remove(failed_file)
@@ -244,14 +282,14 @@ class CopyFailedDds(Task):
244
282
  # if clear-dds keyword is passed, then remove processed Dds
245
283
  if clear_dds:
246
284
  os.remove(failed_file)
247
- logger.debug(f"{failed_file} deleted")
285
+ logger.debug(f"{failed_file} deleted\t[{self.job_id}]")
248
286
 
249
287
  # Get failed tmraw list
250
288
  failed_tmraw_list = self.pipeline.get("failed_tmraw", default=[])
251
289
  failed_tmraw_count = len(failed_tmraw_list)
252
290
  # Skip task if no failed tmraw
253
291
  if failed_tmraw_count == 0:
254
- logger.debug("No failed tmraw to write")
292
+ logger.debug("No failed tmraw to write\t[{self.job_id}]")
255
293
  return
256
294
  else:
257
295
  # Else save list of failed tmraw into text file
@@ -260,5 +298,5 @@ class CopyFailedDds(Task):
260
298
  fw.writelines(failed_tmraw_list)
261
299
  logger.info(
262
300
  f"{failed_tmraw_count} failed TmRaw entries "
263
- f"saved into {tmraw_failed_file}"
301
+ f"saved into {tmraw_failed_file}\t[{self.job_id}]"
264
302
  )
@@ -57,7 +57,7 @@ def L0ToHk(task):
57
57
 
58
58
  # Define task job ID (long and short)
59
59
  job_uuid = str(uuid.uuid4())
60
- job_id = f"L0ToHk-{job_uuid[:8]}"
60
+ job_id = f"{job_uuid[:8]}"
61
61
  logger.info(f"Task {job_id} is starting")
62
62
 
63
63
  try:
@@ -4,6 +4,7 @@
4
4
  """Contains task to create the RPW L1 Bias current CDF files."""
5
5
 
6
6
  import os
7
+ import uuid
7
8
 
8
9
  from poppy.core.logger import logger
9
10
  from poppy.core.target import FileTarget
@@ -31,6 +32,11 @@ def L0ToL1BiaCurrent(task):
31
32
  :return:
32
33
  """
33
34
 
35
+ # Define task job ID (long and short)
36
+ job_uuid = str(uuid.uuid4())
37
+ task.job_id = f"{job_uuid[:8]}"
38
+ logger.info(f"Task {task.job_id} is starting")
39
+
34
40
  # Get list of input l0 file(s)
35
41
  l0_file_list = task.inputs["l0_file"].filepath
36
42
 
@@ -572,19 +572,28 @@ def l0_to_trange_cdf(
572
572
  l0_file_len = len(l0_file_list)
573
573
 
574
574
  # Get L0 files time_min/time_max
575
- l0_time_min, l0_time_max = get_l0_trange(l0_file_list)
575
+ try:
576
+ l0_time_min, l0_time_max = get_l0_trange(l0_file_list)
577
+ if None in l0_time_min or None in l0_time_max:
578
+ raise ValueError
579
+ except TypeError:
580
+ logger.error(f'Input "l0_files" must be a list!\t[{task.job_id}]')
581
+ return []
582
+ except ValueError:
583
+ logger.error(f"Output L0 time min. or max. list is not valid!\t[{task.job_id}]")
584
+ return []
576
585
 
577
586
  # Get start_time for output CDF (use time min of L0 files if not defined)
578
587
  if not start_time:
579
588
  start_time = task.pipeline.get("start_time", default=[min(l0_time_min)])[0]
580
589
 
581
- logger.debug(f"start_time value is {start_time} [{task.job_id}]")
590
+ logger.debug(f"start_time value is {start_time}\t[{task.job_id}]")
582
591
 
583
592
  # Get end_time for output CDF (use time max of L0 files if not defined)
584
593
  if not end_time:
585
594
  end_time = task.pipeline.get("end_time", default=[max(l0_time_max)])[0]
586
595
 
587
- logger.debug(f"end_time value is {end_time} [{task.job_id}]")
596
+ logger.debug(f"end_time value is {end_time}\t[{task.job_id}]")
588
597
 
589
598
  # Loops over each output dataset to produce for the current task
590
599
  for current_dataset in dataset_list:
@@ -594,7 +603,7 @@ def l0_to_trange_cdf(
594
603
 
595
604
  logger.debug(
596
605
  "Running file production for the dataset "
597
- f"{dataset_name} (V{data_version}) [{task.job_id}]"
606
+ f"{dataset_name} (V{data_version})\t[{task.job_id}]"
598
607
  )
599
608
 
600
609
  # get the path to the master CDF file of this dataset
@@ -612,14 +621,14 @@ def l0_to_trange_cdf(
612
621
  # Check existence
613
622
  if not master_path:
614
623
  raise FileNotFoundError(
615
- "{0} master CDF not found for the dataset {1}! [{2}]".format(
624
+ "{0} master CDF not found for the dataset {1}!\t[{2}]".format(
616
625
  master_pattern, dataset_name, task.job_id
617
626
  )
618
627
  )
619
628
  else:
620
629
  master_path = sorted(master_path)[-1]
621
630
  logger.info(
622
- 'Producing dataset "{0}" with the master CDF "{1}" [{2}]'.format(
631
+ 'Producing dataset "{0}" with the master CDF "{1}"\t[{2}]'.format(
623
632
  dataset_name, master_path, task.job_id
624
633
  )
625
634
  )
@@ -635,12 +644,12 @@ def l0_to_trange_cdf(
635
644
  if l0_time_max[i] < start_time or l0_time_min[i] > end_time:
636
645
  logger.debug(
637
646
  f"{l0_file} is outside the time range: "
638
- f"[{start_time}, {end_time}], skip it [{task.job_id}]"
647
+ f"[{start_time}, {end_time}], skip it\t[{task.job_id}]"
639
648
  )
640
649
  continue
641
650
  else:
642
651
  logger.debug(
643
- f"Processing {l0_file} [{l0_file_len - i - 1}] [{task.job_id}]"
652
+ f"Processing {l0_file} [{l0_file_len - i - 1}]\t[{task.job_id}]"
644
653
  )
645
654
 
646
655
  # Append current l0 file to parent list
@@ -655,7 +664,7 @@ def l0_to_trange_cdf(
655
664
  ):
656
665
  logger.info(
657
666
  f"No expected packet found for {dataset_name}"
658
- f" in {l0_file} [{','.join(expected_packet)}] [{task.job_id}]"
667
+ f" in {l0_file} [{','.join(expected_packet)}]\t[{task.job_id}]"
659
668
  )
660
669
  continue
661
670
 
@@ -664,18 +673,20 @@ def l0_to_trange_cdf(
664
673
  # as the dataset alias in the descriptor
665
674
  func = dataset_func.get(dataset_name)
666
675
  if func is None:
667
- logger.error(f"No function found for {dataset_name}")
676
+ logger.error(
677
+ f"No function found for {dataset_name}\t[{task.job_id}]"
678
+ )
668
679
  failed_files.append(l0_file)
669
680
  break
670
681
 
671
682
  # call the dataset-related function
672
683
  try:
673
- logger.debug(f"Running {func} [{task.job_id}]")
684
+ logger.debug(f"Running {func}\t[{task.job_id}]")
674
685
  result = func(l0, task)
675
686
  except Exception as e:
676
687
  # TODO catch exception in the ROC database
677
688
  logger.exception(
678
- f'Running "{func}" function has failed [{task.job_id}]: \n{e}'
689
+ f'Running "{func}" function has failed\t[{task.job_id}]: \n{e}'
679
690
  )
680
691
  # TODO - Add the current failed dataset processing to failed_files
681
692
  failed_files.append(l0_file)
@@ -685,12 +696,12 @@ def l0_to_trange_cdf(
685
696
  if result is None or result.shape[0] == 0:
686
697
  logger.debug(
687
698
  f"Returned {dataset_name} dataset array"
688
- f" is empty for {l0_file} [{task.job_id}]"
699
+ f" is empty for {l0_file}\t[{task.job_id}]"
689
700
  )
690
701
  else:
691
702
  logger.debug(
692
703
  f"{result.shape[0]} {dataset_name} dataset samples"
693
- f" returned from {l0_file} [{task.job_id}]"
704
+ f" returned from {l0_file}\t[{task.job_id}]"
694
705
  )
695
706
 
696
707
  # If data is empty
@@ -706,7 +717,7 @@ def l0_to_trange_cdf(
706
717
  if nrec == 0:
707
718
  logger.warning(
708
719
  "No data for dataset"
709
- f" {dataset_name}: skip output cdf creation [{task.job_id}]"
720
+ f" {dataset_name}: skip output cdf creation\t[{task.job_id}]"
710
721
  )
711
722
  continue
712
723
 
@@ -716,12 +727,12 @@ def l0_to_trange_cdf(
716
727
  # Generation date
717
728
  generation_date = datetime.utcnow().strftime(INPUT_DATETIME_STRFTIME)
718
729
  logger.debug(
719
- f'Set "Generation_date" attr. value to {generation_date} [{task.job_id}]'
730
+ f'Set "Generation_date" attr. value to {generation_date}\t[{task.job_id}]'
720
731
  )
721
732
 
722
733
  # file ID
723
734
  file_id = str(uuid.uuid4())
724
- logger.debug(f'Set "File_ID" attr. value to {file_id} [{task.job_id}]')
735
+ logger.debug(f'Set "File_ID" attr. value to {file_id}\t[{task.job_id}]')
725
736
 
726
737
  # Re-define datetime and parents g.attribute for time range CDF data
727
738
  # products
@@ -781,7 +792,7 @@ def l0_to_trange_cdf(
781
792
  metadata["SPICE_KERNELS"] = sclk_file[-1]
782
793
  else:
783
794
  logger.warning(
784
- f"No SPICE SCLK kernel saved for {filepath} [{task.job_id}]"
795
+ f"No SPICE SCLK kernel saved for {filepath}\t[{task.job_id}]"
785
796
  )
786
797
 
787
798
  # open the target to update its status according to errors etc
@@ -808,27 +819,27 @@ def l0_to_trange_cdf(
808
819
  else:
809
820
  logger.warning(
810
821
  f"No data found between {start_time} and {end_time}"
811
- f" to be written into {filepath} [{task.job_id}]"
822
+ f" to be written into {filepath}\t[{task.job_id}]"
812
823
  )
813
824
 
814
825
  except Exception as e:
815
826
  logger.exception(
816
- f"{filepath} production has failed [{task.job_id}]:\n{e}"
827
+ f"{filepath} production has failed\t[{task.job_id}]:\n{e}"
817
828
  )
818
829
  cdf.attrs["Validate"] = "-1"
819
830
  failed_files.append(filepath)
820
831
 
821
832
  if nrec == 0:
822
- logger.info(f"Removing empty file {filepath}... [{task.job_id}]")
833
+ logger.info(f"Removing empty file {filepath}...\t[{task.job_id}]")
823
834
  os.remove(filepath)
824
835
  filepath = ""
825
836
  elif os.path.isfile(filepath):
826
837
  processed_files.append(filepath)
827
- logger.info(f"{filepath} saved [{task.job_id}]")
838
+ logger.info(f"{filepath} saved\t[{task.job_id}]")
828
839
  output_file_list.append(filepath)
829
840
  else:
830
841
  failed_files.append(filepath)
831
- logger.error(f"Writing {filepath} has failed! [{task.job_id}]")
842
+ logger.error(f"Writing {filepath} has failed!\t[{task.job_id}]")
832
843
 
833
844
  # Set output target filepath
834
845
  target.filepath = filepath
@@ -849,8 +860,7 @@ def get_l0_trange(l0_files: list, minmax: bool = False) -> tuple:
849
860
  """
850
861
 
851
862
  if not isinstance(l0_files, list):
852
- logger.error('Input "l0_files" must be a list!')
853
- return None, None
863
+ raise TypeError
854
864
 
855
865
  # Get number of l0_files
856
866
  nl0 = len(l0_files)
File without changes
File without changes
File without changes