roc-film 1.13.4__py3-none-any.whl → 1.14.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- roc/__init__.py +2 -1
- roc/film/__init__.py +2 -2
- roc/film/commands.py +372 -323
- roc/film/config/__init__.py +0 -1
- roc/film/constants.py +101 -65
- roc/film/descriptor.json +126 -95
- roc/film/exceptions.py +28 -27
- roc/film/tasks/__init__.py +16 -16
- roc/film/tasks/cat_solo_hk.py +86 -74
- roc/film/tasks/cdf_postpro.py +438 -309
- roc/film/tasks/check_dds.py +39 -45
- roc/film/tasks/db_to_anc_bia_sweep_table.py +381 -0
- roc/film/tasks/dds_to_l0.py +232 -180
- roc/film/tasks/export_solo_coord.py +147 -0
- roc/film/tasks/file_handler.py +91 -75
- roc/film/tasks/l0_to_hk.py +117 -103
- roc/film/tasks/l0_to_l1_bia_current.py +38 -30
- roc/film/tasks/l0_to_l1_bia_sweep.py +417 -329
- roc/film/tasks/l0_to_l1_sbm.py +250 -208
- roc/film/tasks/l0_to_l1_surv.py +185 -130
- roc/film/tasks/make_daily_tm.py +40 -37
- roc/film/tasks/merge_tcreport.py +77 -71
- roc/film/tasks/merge_tmraw.py +102 -89
- roc/film/tasks/parse_dds_xml.py +21 -20
- roc/film/tasks/set_l0_utc.py +51 -49
- roc/film/tests/cdf_compare.py +565 -0
- roc/film/tests/hdf5_compare.py +84 -62
- roc/film/tests/test_dds_to_l0.py +93 -51
- roc/film/tests/test_dds_to_tc.py +8 -11
- roc/film/tests/test_dds_to_tm.py +8 -10
- roc/film/tests/test_film.py +161 -116
- roc/film/tests/test_l0_to_hk.py +64 -36
- roc/film/tests/test_l0_to_l1_bia.py +10 -14
- roc/film/tests/test_l0_to_l1_sbm.py +14 -19
- roc/film/tests/test_l0_to_l1_surv.py +68 -41
- roc/film/tests/test_metadata.py +21 -20
- roc/film/tests/tests.py +743 -396
- roc/film/tools/__init__.py +5 -5
- roc/film/tools/dataset_tasks.py +34 -2
- roc/film/tools/file_helpers.py +390 -269
- roc/film/tools/l0.py +402 -324
- roc/film/tools/metadata.py +147 -127
- roc/film/tools/skeleton.py +12 -17
- roc/film/tools/tools.py +109 -92
- roc/film/tools/xlsx2skt.py +161 -139
- {roc_film-1.13.4.dist-info → roc_film-1.14.0.dist-info}/LICENSE +127 -125
- roc_film-1.14.0.dist-info/METADATA +60 -0
- roc_film-1.14.0.dist-info/RECORD +50 -0
- {roc_film-1.13.4.dist-info → roc_film-1.14.0.dist-info}/WHEEL +1 -1
- roc/film/tasks/l0_to_anc_bia_sweep_table.py +0 -348
- roc_film-1.13.4.dist-info/METADATA +0 -120
- roc_film-1.13.4.dist-info/RECORD +0 -48
roc/film/tasks/cdf_postpro.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
# -*- coding: utf-8 -*-
|
3
3
|
|
4
4
|
"""Module to create the RPW L1 SBM1/SBM2 CDF files."""
|
5
|
-
|
5
|
+
|
6
6
|
import json
|
7
7
|
import os
|
8
8
|
import shutil
|
@@ -15,8 +15,6 @@ import uuid
|
|
15
15
|
import subprocess
|
16
16
|
from spacepy import pycdf
|
17
17
|
|
18
|
-
from maser.tools.cdf.cdfcompare import cdf_compare
|
19
|
-
|
20
18
|
from poppy.core.db.connector import Connector
|
21
19
|
from poppy.core.logger import logger
|
22
20
|
from poppy.core.task import Task
|
@@ -27,122 +25,143 @@ from roc.dingo.models.data import EventLog
|
|
27
25
|
from roc.dingo.tools import query_db
|
28
26
|
from roc.dingo.constants import PIPELINE_DATABASE
|
29
27
|
|
30
|
-
from roc.film import
|
28
|
+
from roc.film import (
|
29
|
+
TIME_JSON_STRFORMAT,
|
30
|
+
TIME_DOY1_STRFORMAT,
|
31
|
+
TIME_DOY2_STRFORMAT,
|
32
|
+
INPUT_DATETIME_STRFTIME,
|
33
|
+
)
|
31
34
|
from roc.film.tools.file_helpers import is_output_dir, get_output_dir
|
32
35
|
from roc.film.tools import glob_list
|
33
|
-
from roc.film.constants import
|
36
|
+
from roc.film.constants import (
|
37
|
+
CDFCONVERT_PATH,
|
38
|
+
TIMEOUT,
|
39
|
+
CDF_POST_PRO_OPTS_ARGS,
|
40
|
+
TIME_DAILY_STRFORMAT,
|
41
|
+
)
|
34
42
|
from roc.film.exceptions import L1PostProError
|
35
43
|
|
36
|
-
__all__ = [
|
44
|
+
__all__ = ["CdfPostPro"]
|
37
45
|
|
38
46
|
|
39
47
|
class CdfPostPro(Task):
|
40
|
-
|
41
48
|
"""
|
42
49
|
Task to post-process RPW CDFs
|
43
50
|
"""
|
44
|
-
plugin_name = 'roc.film'
|
45
|
-
name = 'cdf_post_pro'
|
46
51
|
|
47
|
-
|
52
|
+
plugin_name = "roc.film"
|
53
|
+
name = "cdf_post_pro"
|
48
54
|
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
55
|
+
def add_targets(self):
|
56
|
+
self.add_input(
|
57
|
+
target_class=FileTarget,
|
58
|
+
identifier="cdf_file",
|
59
|
+
filepath=self.get_cdf_files(),
|
60
|
+
many=True,
|
61
|
+
)
|
53
62
|
|
54
|
-
self.add_output(target_class=FileTarget,
|
55
|
-
identifier='cdf_file',
|
56
|
-
many=True)
|
63
|
+
self.add_output(target_class=FileTarget, identifier="cdf_file", many=True)
|
57
64
|
|
58
65
|
def get_cdf_files(self):
|
59
66
|
try:
|
60
67
|
return self.pipeline.args.cdf_files
|
61
|
-
except:
|
68
|
+
except Exception:
|
62
69
|
pass
|
63
70
|
|
64
71
|
@Connector.if_connected(PIPELINE_DATABASE)
|
65
72
|
def setup_inputs(self):
|
66
|
-
|
67
73
|
try:
|
68
|
-
self.cdf_file_list = glob_list(self.inputs[
|
69
|
-
except:
|
70
|
-
raise ValueError(
|
71
|
-
'No input target "cdf_file" passed')
|
74
|
+
self.cdf_file_list = glob_list(self.inputs["cdf_file"].filepath)
|
75
|
+
except Exception:
|
76
|
+
raise ValueError('No input target "cdf_file" passed')
|
72
77
|
|
73
78
|
if not self.cdf_file_list:
|
74
|
-
|
79
|
+
logger.warning("Empty list of input cdf files")
|
80
|
+
self.pipeline.exit()
|
81
|
+
return
|
75
82
|
|
76
83
|
# Get list of RPW Soopkitchen Observations JSON files
|
77
|
-
self.rpw_obs_json_list = glob_list(
|
78
|
-
|
84
|
+
self.rpw_obs_json_list = glob_list(
|
85
|
+
self.pipeline.get("rpw_obs_json", default=[])
|
86
|
+
)
|
79
87
|
|
80
88
|
# Get list of RPW IOR XML files
|
81
|
-
self.rpw_ior_xml_list = glob_list(self.pipeline.get(
|
82
|
-
'rpw_ior_xml', default=[]))
|
89
|
+
self.rpw_ior_xml_list = glob_list(self.pipeline.get("rpw_ior_xml", default=[]))
|
83
90
|
|
84
91
|
# Get post-processing options
|
85
|
-
self.options = [
|
86
|
-
|
87
|
-
|
92
|
+
self.options = [
|
93
|
+
opt.lower()
|
94
|
+
for opt in self.pipeline.get("options", default=[], args=True)
|
95
|
+
if opt.lower() in CDF_POST_PRO_OPTS_ARGS
|
96
|
+
]
|
88
97
|
if not self.options:
|
89
|
-
raise ValueError(
|
98
|
+
raise ValueError("No valid argument passed in --options")
|
90
99
|
|
91
|
-
# Get
|
92
|
-
self.
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
os.getenv('CDF_BIN', os.path.dirname(CDFEXPORT_PATH)), 'cdfexport')
|
100
|
+
# Get cdfconvert path
|
101
|
+
self.cdfconvert = self.pipeline.get("cdfconvert", default=[CDFCONVERT_PATH])[0]
|
102
|
+
if not self.cdfconvert or not os.path.isfile(self.cdfconvert):
|
103
|
+
self.cdfconvert = os.path.join(
|
104
|
+
os.getenv("CDF_BIN", os.path.dirname(CDFCONVERT_PATH)), "cdfconvert"
|
105
|
+
)
|
98
106
|
|
99
107
|
# get update-jon value
|
100
|
-
self.update_json = self.pipeline.get(
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
108
|
+
self.update_json = self.pipeline.get("update_json", default=[None])[0]
|
109
|
+
|
110
|
+
if "update_cdf" in self.options and not self.update_json:
|
111
|
+
raise ValueError(
|
112
|
+
'"update_cdf" input option needs '
|
113
|
+
"a valid update_json file path to be run!"
|
114
|
+
)
|
115
|
+
elif "update_cdf" in self.options and self.update_json:
|
116
|
+
# Get info in the input JSON file
|
117
|
+
try:
|
118
|
+
with open(self.update_json, "r") as jsonfile:
|
119
|
+
update_data = json.load(jsonfile)
|
120
|
+
self.update_data = update_data["updates"]
|
121
|
+
except Exception as e:
|
122
|
+
logger.exception(
|
123
|
+
f"Cannot parsing {self.update_json}\t[{self.job_id}]\n{e}"
|
124
|
+
)
|
125
|
+
raise
|
126
|
+
else:
|
127
|
+
self.update_data = None
|
106
128
|
|
107
129
|
# Get overwrite boolean input
|
108
|
-
self.overwrite = self.pipeline.get(
|
109
|
-
default=False,
|
110
|
-
args=True)
|
130
|
+
self.overwrite = self.pipeline.get("overwrite", default=False, args=True)
|
111
131
|
# Get or create failed_files list from pipeline properties
|
112
|
-
self.failed_files = self.pipeline.get(
|
113
|
-
'failed_files', default=[], create=True)
|
132
|
+
self.failed_files = self.pipeline.get("failed_files", default=[], create=True)
|
114
133
|
|
115
134
|
# Get or create processed_files list from pipeline properties
|
116
135
|
self.processed_files = self.pipeline.get(
|
117
|
-
|
136
|
+
"processed_files", default=[], create=True
|
137
|
+
)
|
118
138
|
|
119
139
|
# Get products directory (folder where final output files will be
|
120
140
|
# moved)
|
121
|
-
self.products_dir = self.pipeline.get(
|
122
|
-
|
141
|
+
self.products_dir = self.pipeline.get(
|
142
|
+
"products_dir", default=[None], args=True
|
143
|
+
)[0]
|
123
144
|
|
124
145
|
# Get output dir
|
125
146
|
self.output_dir = get_output_dir(self.pipeline)
|
126
|
-
if not is_output_dir(self.output_dir,
|
127
|
-
|
128
|
-
logger.debug(f'Making {self.output_dir}')
|
147
|
+
if not is_output_dir(self.output_dir, products_dir=self.products_dir):
|
148
|
+
logger.debug(f"Making {self.output_dir}")
|
129
149
|
os.makedirs(self.output_dir)
|
130
|
-
|
131
|
-
logger.info(
|
132
|
-
|
150
|
+
elif not self.overwrite:
|
151
|
+
logger.info(
|
152
|
+
f"Output files will be saved into existing folder {self.output_dir}"
|
153
|
+
)
|
133
154
|
|
134
155
|
# Get (optional) arguments for SPICE
|
135
|
-
self.predictive = self.pipeline.get(
|
136
|
-
|
137
|
-
self.
|
138
|
-
'kernel_date', default=None, args=True)
|
139
|
-
self.no_spice = self.pipeline.get('no_spice',
|
140
|
-
default=False, args=True)
|
156
|
+
self.predictive = self.pipeline.get("predictive", default=False, args=True)
|
157
|
+
self.kernel_date = self.pipeline.get("kernel_date", default=None, args=True)
|
158
|
+
self.no_spice = self.pipeline.get("no_spice", default=False, args=True)
|
141
159
|
# Get/create Time singleton
|
142
|
-
self.time_instance = Time(
|
143
|
-
|
144
|
-
|
145
|
-
|
160
|
+
self.time_instance = Time(
|
161
|
+
predictive=self.predictive,
|
162
|
+
kernel_date=self.kernel_date,
|
163
|
+
no_spice=self.no_spice,
|
164
|
+
)
|
146
165
|
|
147
166
|
# get a database session
|
148
167
|
self.session = Connector.manager[PIPELINE_DATABASE].session
|
@@ -150,159 +169,164 @@ class CdfPostPro(Task):
|
|
150
169
|
# Initialize some class variables
|
151
170
|
self.soop_type_list = []
|
152
171
|
self.obs_id_list = []
|
172
|
+
self.event_log = None
|
153
173
|
|
154
174
|
return True
|
155
175
|
|
156
176
|
def run(self):
|
157
|
-
|
158
177
|
# Define task job ID (long and short)
|
159
178
|
self.job_uuid = str(uuid.uuid4())
|
160
|
-
self.job_id =
|
161
|
-
logger.info(f
|
179
|
+
self.job_id = self.job_uuid[:8]
|
180
|
+
logger.info(f"Task job {self.job_id} is starting")
|
162
181
|
try:
|
163
182
|
self.setup_inputs()
|
164
|
-
except:
|
165
|
-
logger.
|
166
|
-
|
183
|
+
except Exception as e:
|
184
|
+
logger.error(f"Initializing inputs has failed for {self.job_id}!")
|
185
|
+
logger.debug(e)
|
167
186
|
self.pipeline.exit()
|
168
187
|
return
|
169
188
|
|
170
189
|
# Loop over each input CDF file
|
171
|
-
logger.info(
|
190
|
+
logger.info(
|
191
|
+
f"{len(self.cdf_file_list)} input CDF files "
|
192
|
+
f"to post-process\t[{self.job_id}]"
|
193
|
+
)
|
172
194
|
for current_file in self.cdf_file_list:
|
173
|
-
|
174
195
|
if self.overwrite:
|
175
196
|
# If overwrite is set, then update current file
|
176
|
-
logger.warning(f
|
197
|
+
logger.warning(f"{current_file} will be overwritten\t[{self.job_id}]")
|
177
198
|
self.current_file = current_file
|
178
199
|
else:
|
179
200
|
# Otherwise create a copy of the input CDF in the output
|
180
201
|
# directory, then update the copy
|
181
|
-
logger.info(
|
182
|
-
|
183
|
-
|
202
|
+
logger.info(
|
203
|
+
f"Making a copy of {current_file} in {self.output_dir}\t[{self.job_id}]"
|
204
|
+
)
|
205
|
+
self.current_file = os.path.join(
|
206
|
+
self.output_dir, os.path.basename(current_file)
|
207
|
+
)
|
184
208
|
shutil.copyfile(current_file, self.current_file)
|
185
209
|
|
186
210
|
# Open CDF
|
187
211
|
try:
|
188
|
-
logger.debug(
|
212
|
+
logger.debug(
|
213
|
+
f"Opening and updating {self.current_file}...\t[{self.job_id}]"
|
214
|
+
)
|
189
215
|
# Open CDF to change what can be updated in one shot
|
190
216
|
with pycdf.CDF(self.current_file) as cdf:
|
191
217
|
cdf.readonly(False)
|
192
218
|
|
193
219
|
# Get RPW CDF dataset ID
|
194
|
-
self.dataset_id = cdf.attrs[
|
220
|
+
self.dataset_id = cdf.attrs["Dataset_ID"][0]
|
195
221
|
|
196
222
|
# Get Datetime attribute value (only first 8 characters)
|
197
|
-
self.datetime = datetime.strptime(
|
198
|
-
|
223
|
+
self.datetime = datetime.strptime(
|
224
|
+
cdf.attrs["Datetime"][0][:8], TIME_DAILY_STRFORMAT
|
225
|
+
)
|
199
226
|
|
200
227
|
# Get time range of the input L1 CDF
|
201
|
-
self.
|
202
|
-
self.
|
203
|
-
|
204
|
-
|
228
|
+
self.epoch = cdf["Epoch"][...]
|
229
|
+
self.nrec = self.epoch.shape[0]
|
230
|
+
self.time_min = self.epoch[0]
|
231
|
+
self.time_max = self.epoch[-1]
|
232
|
+
logger.info(
|
233
|
+
f"{self.current_file} has {self.nrec} records "
|
234
|
+
f"between {self.time_min} "
|
235
|
+
f"and {self.time_max}\t[{self.job_id}]"
|
236
|
+
)
|
205
237
|
|
206
238
|
# Set SOOP_TYPE global attribute from RPW SOOPKitchen
|
207
239
|
# export observation JSON files
|
208
|
-
if
|
240
|
+
if "soop_type" in self.options:
|
209
241
|
self._set_soop_type(cdf)
|
210
242
|
|
211
243
|
# Set OBS_ID global attribute from IOR XML files (get
|
212
244
|
# observationID)
|
213
|
-
if
|
245
|
+
if "obs_id" in self.options:
|
214
246
|
self._set_obs_id(cdf)
|
215
247
|
|
216
248
|
# Set quality_bitmask
|
217
|
-
if
|
218
|
-
if
|
249
|
+
if "quality_bitmask" in self.options:
|
250
|
+
if "QUALITY_BITMASK" in cdf:
|
219
251
|
self._set_bitmask(cdf)
|
220
252
|
else:
|
221
|
-
logger.debug(
|
253
|
+
logger.debug(
|
254
|
+
'No "QUALITY_BITMASK" variable found'
|
255
|
+
f" in {self.current_file}: skip setting!\t[{self.job_id}]"
|
256
|
+
)
|
222
257
|
|
223
258
|
# Resize TDS/LFR waveform array (only for TDS/LFR RSWF/TSWF
|
224
259
|
# products)
|
225
|
-
if
|
260
|
+
if "resize_wf" in self.options:
|
226
261
|
# Only resize TDS RSWF/TSWF products
|
227
|
-
if
|
262
|
+
if "RSWF" in self.dataset_id or "TSWF" in self.dataset_id:
|
228
263
|
self._set_resize_wf(cdf)
|
229
264
|
else:
|
230
|
-
logger.debug(
|
265
|
+
logger.debug(
|
266
|
+
"Resizing wf cannot be "
|
267
|
+
f"applied on {self.dataset_id}\t[{self.job_id}]"
|
268
|
+
)
|
231
269
|
|
232
270
|
# Update CDF content with information in the input update_json file
|
233
|
-
if
|
271
|
+
if "update_cdf" in self.options:
|
234
272
|
self._update_cdf(cdf)
|
235
273
|
|
236
|
-
# Apply
|
237
|
-
if
|
274
|
+
# Apply cdfconvert to rebuild CDF properly
|
275
|
+
if "cdf_convert" in self.options:
|
238
276
|
try:
|
239
|
-
self.
|
277
|
+
self._run_cdfconvert(self.current_file)
|
240
278
|
except FileNotFoundError:
|
241
|
-
logger.
|
242
|
-
|
243
|
-
self.
|
279
|
+
logger.error(
|
280
|
+
"cdfconvert calling has failed because "
|
281
|
+
f"{self.current_file} has not been found\t[{self.job_id}]"
|
282
|
+
)
|
244
283
|
except subprocess.CalledProcessError as e:
|
245
|
-
logger.
|
246
|
-
|
247
|
-
|
284
|
+
logger.error(
|
285
|
+
f"cdfconvert calling has failed: \n {e}\t[{self.job_id}]"
|
286
|
+
)
|
248
287
|
except subprocess.TimeoutExpired as e:
|
249
|
-
logger.
|
250
|
-
|
251
|
-
|
252
|
-
except:
|
253
|
-
logger.
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
288
|
+
logger.error(
|
289
|
+
f"cdfconvert calling has expired: \n {e}\t[{self.job_id}]"
|
290
|
+
)
|
291
|
+
except Exception:
|
292
|
+
logger.error("cdfconvert calling has failed!\t[{self.job_id}]")
|
293
|
+
|
294
|
+
except Exception:
|
295
|
+
logger.exception(
|
296
|
+
f"Post-processing {self.current_file} has failed\t[{self.job_id}]"
|
297
|
+
)
|
259
298
|
if self.current_file not in self.failed_files:
|
260
299
|
self.failed_files.append(self.current_file)
|
261
300
|
else:
|
262
301
|
if not self.overwrite and self.current_file not in self.processed_files:
|
263
302
|
self.processed_files.append(self.current_file)
|
264
303
|
|
265
|
-
def
|
304
|
+
def _run_cdfconvert(self, cdf_file):
|
266
305
|
"""
|
267
|
-
Run
|
306
|
+
Run cdfconvert tool for input CDF file
|
268
307
|
|
269
|
-
:param cdf_file: cdf file to process with
|
308
|
+
:param cdf_file: cdf file to process with cdfconvert
|
270
309
|
:return: CompletedProcess object returned by subprocess.run()
|
271
310
|
"""
|
272
311
|
|
273
|
-
# Check if
|
274
|
-
if not os.path.isfile(self.
|
275
|
-
raise FileNotFoundError(f
|
276
|
-
|
277
|
-
# Build command to run with subprocess.run
|
278
|
-
|
279
|
-
|
280
|
-
cmd
|
281
|
-
|
282
|
-
cmd.append(
|
283
|
-
cmd.append(
|
284
|
-
cmd.append(
|
285
|
-
cmd
|
312
|
+
# Check if cdfconvert tool path exists
|
313
|
+
if not os.path.isfile(self.cdfconvert):
|
314
|
+
raise FileNotFoundError(f"{self.cdfconvert} not found\t[{self.job_id}]")
|
315
|
+
|
316
|
+
# Build command to run cdfconvert with subprocess.run
|
317
|
+
cmd = list([self.cdfconvert, cdf_file, cdf_file])
|
318
|
+
# overwrite existing file
|
319
|
+
cmd.append("-delete")
|
320
|
+
# Force some CDF features
|
321
|
+
cmd.append("-network")
|
322
|
+
cmd.append("-single")
|
323
|
+
cmd.append("-compression \"var:'Epoch':none\"")
|
324
|
+
cmd.append("-checksum md5")
|
325
|
+
cmd = " ".join(cmd)
|
286
326
|
|
287
327
|
# run cdfexport
|
288
|
-
logger.info(f
|
289
|
-
completed = subprocess.run(cmd,
|
290
|
-
shell=True,
|
291
|
-
check=True,
|
292
|
-
timeout=TIMEOUT)
|
293
|
-
|
294
|
-
new_cdf += '.cdf'
|
295
|
-
if os.path.isfile(new_cdf):
|
296
|
-
# First check that both files are the same
|
297
|
-
if cdf_compare(new_cdf, cdf_file):
|
298
|
-
os.remove(new_cdf)
|
299
|
-
raise L1PostProError(f'Running cdfexport on {cdf_file} has failed!')
|
300
|
-
else:
|
301
|
-
logger.debug(f'{cdf_file} and {new_cdf} are identical')
|
302
|
-
os.remove(cdf_file)
|
303
|
-
os.rename(new_cdf, cdf_file)
|
304
|
-
else:
|
305
|
-
raise FileNotFoundError(f'{new_cdf} not found')
|
328
|
+
logger.info(f"Running --> {cmd}\t[{self.job_id}]")
|
329
|
+
completed = subprocess.run(cmd, shell=True, check=True, timeout=TIMEOUT)
|
306
330
|
|
307
331
|
return completed
|
308
332
|
|
@@ -315,53 +339,57 @@ class CdfPostPro(Task):
|
|
315
339
|
To make sure to have the final CDF size,
|
316
340
|
run cdf_post_pro with cdf_export option
|
317
341
|
|
318
|
-
:param
|
342
|
+
:param cdf_obj: CDF to update (passed as a spacepy.pycdf.CDF class instance)
|
319
343
|
:return: True if resizing has succeeded, False otherwise
|
320
344
|
"""
|
321
345
|
is_succeeded = True
|
322
346
|
|
323
347
|
# pycdf.lib.set_backward(False)
|
324
348
|
|
325
|
-
logger.info(
|
349
|
+
logger.info(
|
350
|
+
f"Resizing waveform data array in {self.current_file} ...\t[{self.job_id}]"
|
351
|
+
)
|
326
352
|
try:
|
327
353
|
# Get max number of data samples in the file
|
328
|
-
max_samp_per_ch = np.max(cdf_obj[
|
354
|
+
max_samp_per_ch = np.max(cdf_obj["SAMPS_PER_CH"][...])
|
329
355
|
|
330
356
|
# Loop over old CDF zVariables
|
331
357
|
for varname in cdf_obj:
|
332
|
-
if (
|
333
|
-
|
334
|
-
|
358
|
+
if (
|
359
|
+
varname == "WAVEFORM_DATA"
|
360
|
+
or varname == "WAVEFORM_DATA_VOLTAGE"
|
361
|
+
or varname == "B"
|
362
|
+
):
|
335
363
|
old_var = cdf_obj[varname]
|
336
364
|
# Re-size waveform data array
|
337
365
|
if len(old_var.shape) == 2:
|
338
|
-
new_var_data = old_var[
|
339
|
-
:, :max_samp_per_ch]
|
366
|
+
new_var_data = old_var[:, :max_samp_per_ch]
|
340
367
|
new_var_dims = [new_var_data.shape[1]]
|
341
368
|
elif len(old_var.shape) == 3:
|
342
|
-
new_var_data = old_var[
|
343
|
-
|
344
|
-
new_var_dims = [new_var_data.shape[
|
345
|
-
1], new_var_data.shape[2]]
|
369
|
+
new_var_data = old_var[:, :, :max_samp_per_ch]
|
370
|
+
new_var_dims = [new_var_data.shape[1], new_var_data.shape[2]]
|
346
371
|
else:
|
347
372
|
raise IndexError
|
348
373
|
|
349
|
-
logger.debug(
|
350
|
-
|
351
|
-
|
374
|
+
logger.debug(
|
375
|
+
f"Resizing {varname} zVar "
|
376
|
+
f"from {old_var.shape} to {new_var_data.shape} "
|
377
|
+
f"in {self.current_file}\t[{self.job_id}]"
|
378
|
+
)
|
352
379
|
|
353
380
|
# Create temporary new zVar with the new shape
|
354
|
-
temp_varname = f
|
355
|
-
cdf_obj.new(
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
381
|
+
temp_varname = f"{varname}__TMP"
|
382
|
+
cdf_obj.new(
|
383
|
+
temp_varname,
|
384
|
+
data=new_var_data,
|
385
|
+
recVary=old_var.rv(),
|
386
|
+
dimVarys=old_var.dv(),
|
387
|
+
type=old_var.type(),
|
388
|
+
dims=new_var_dims,
|
389
|
+
n_elements=old_var.nelems(),
|
390
|
+
compress=old_var.compress()[0],
|
391
|
+
compress_param=old_var.compress()[1],
|
392
|
+
)
|
365
393
|
|
366
394
|
# Copy zVar attributes
|
367
395
|
cdf_obj[temp_varname].attrs = cdf_obj[varname].attrs
|
@@ -372,8 +400,10 @@ class CdfPostPro(Task):
|
|
372
400
|
# Rename temporary zVar with expected name
|
373
401
|
cdf_obj[temp_varname].rename(varname)
|
374
402
|
|
375
|
-
except:
|
376
|
-
raise L1PostProError(
|
403
|
+
except Exception:
|
404
|
+
raise L1PostProError(
|
405
|
+
f"Resizing {self.current_file} has failed!\t[{self.job_id}]"
|
406
|
+
)
|
377
407
|
else:
|
378
408
|
# make sure to save the change
|
379
409
|
cdf_obj.save()
|
@@ -388,29 +418,46 @@ class CdfPostPro(Task):
|
|
388
418
|
:return: True if SOOP_TYPE has been set, False otherwise
|
389
419
|
"""
|
390
420
|
|
391
|
-
logger.info(
|
421
|
+
logger.info(
|
422
|
+
"Setting SOOP_TYPE global attribute "
|
423
|
+
f"in {self.current_file} ...\t[{self.job_id}]"
|
424
|
+
)
|
392
425
|
|
393
426
|
# Get list of SOOP type from RPW soopkitchen observation json files
|
394
427
|
if not self.soop_type_list:
|
395
428
|
logger.info(
|
396
|
-
|
397
|
-
|
398
|
-
|
429
|
+
"Extracting soopType elements from input "
|
430
|
+
f"list of {len(self.rpw_obs_json_list)} RPW SoopKitchen JSON files...\t[{self.job_id}]"
|
431
|
+
)
|
432
|
+
self.soop_type_list = CdfPostPro.get_soop_type(self.rpw_obs_json_list)
|
399
433
|
|
400
434
|
# Only keep list of soop type betwen time_min and time_max
|
401
|
-
soop_type_list = [
|
402
|
-
|
403
|
-
|
404
|
-
|
435
|
+
soop_type_list = [
|
436
|
+
current_soop_type["soopType"]
|
437
|
+
for current_soop_type in self.soop_type_list
|
438
|
+
if (
|
439
|
+
datetime.strptime(current_soop_type["startDate"], TIME_JSON_STRFORMAT)
|
440
|
+
<= self.time_max
|
441
|
+
and datetime.strptime(current_soop_type["endDate"], TIME_JSON_STRFORMAT)
|
442
|
+
>= self.time_min
|
443
|
+
)
|
444
|
+
]
|
405
445
|
|
406
446
|
soop_type_len = len(soop_type_list)
|
407
447
|
if soop_type_len == 0:
|
408
|
-
logger.
|
448
|
+
logger.info(
|
449
|
+
"No Soop Type value found "
|
450
|
+
f"between {self.time_min} and {self.time_max}\t[{self.job_id}]"
|
451
|
+
)
|
452
|
+
cdf_obj.attrs["SOOP_TYPE"] = "none"
|
409
453
|
return False
|
410
454
|
else:
|
411
|
-
cdf_obj.attrs[
|
412
|
-
logger.debug(f
|
413
|
-
logger.info(
|
455
|
+
cdf_obj.attrs["SOOP_TYPE"] = list(set(soop_type_list))
|
456
|
+
logger.debug(f"SOOP_TYPE = {soop_type_list} in {self.current_file}")
|
457
|
+
logger.info(
|
458
|
+
f"{soop_type_len} entries set for "
|
459
|
+
f"SOOP_TYPE in {self.current_file}\t[{self.job_id}]"
|
460
|
+
)
|
414
461
|
|
415
462
|
# make sure to save the change
|
416
463
|
cdf_obj.save()
|
@@ -431,18 +478,17 @@ class CdfPostPro(Task):
|
|
431
478
|
"""Extract soopType from input JSON"""
|
432
479
|
|
433
480
|
# Open JSON file
|
434
|
-
with open(json_file,
|
481
|
+
with open(json_file, "r") as json_buff:
|
435
482
|
data = json.load(json_buff)
|
436
483
|
|
437
484
|
# Retrieve all "soopType" field from file
|
438
|
-
return data[
|
485
|
+
return data["soops"]
|
439
486
|
|
440
487
|
# Initialize output list
|
441
488
|
soop_type_list = []
|
442
489
|
|
443
490
|
for current_json in rpw_obs_json_list:
|
444
|
-
soop_type_list.extend(extract_soop_type(
|
445
|
-
current_json))
|
491
|
+
soop_type_list.extend(extract_soop_type(current_json))
|
446
492
|
|
447
493
|
return soop_type_list
|
448
494
|
|
@@ -454,29 +500,46 @@ class CdfPostPro(Task):
|
|
454
500
|
:return: True if OBS_ID has been set, False otherwise
|
455
501
|
"""
|
456
502
|
|
457
|
-
logger.info(
|
503
|
+
logger.info(
|
504
|
+
f"Setting OBS_ID global attribute "
|
505
|
+
f"in {self.current_file} ...\t[{self.job_id}]"
|
506
|
+
)
|
458
507
|
|
459
508
|
# Get list of RPW TC obs id values
|
460
509
|
if not self.obs_id_list:
|
461
510
|
logger.info(
|
462
|
-
f
|
511
|
+
f"Extracting uniqueID elements from "
|
512
|
+
f"input list of {len(self.rpw_ior_xml_list)} RPW IOR files...\t[{self.job_id}]"
|
513
|
+
)
|
463
514
|
self.obs_id_list = CdfPostPro.get_ior_obs_id(self.rpw_ior_xml_list)
|
464
515
|
|
465
516
|
# Keep only obs_id between time_min and time_max
|
466
|
-
obs_id_list =
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
517
|
+
obs_id_list = list(
|
518
|
+
set(
|
519
|
+
[
|
520
|
+
current_tc[1]
|
521
|
+
for current_tc in self.obs_id_list
|
522
|
+
if self.time_max >= current_tc[0] >= self.time_min and current_tc[1]
|
523
|
+
]
|
524
|
+
)
|
525
|
+
)
|
471
526
|
|
472
527
|
obs_id_len = len(obs_id_list)
|
473
528
|
if obs_id_len == 0:
|
474
|
-
logger.
|
529
|
+
logger.info(
|
530
|
+
"No OBS_ID value found "
|
531
|
+
f"between {self.time_min} and {self.time_max}\t[{self.job_id}]"
|
532
|
+
)
|
533
|
+
# Force value to "none"
|
534
|
+
cdf_obj.attrs["OBS_ID"] = ["none"]
|
475
535
|
return False
|
476
536
|
else:
|
477
|
-
cdf_obj.attrs[
|
478
|
-
logger.debug(f
|
479
|
-
logger.info(
|
537
|
+
cdf_obj.attrs["OBS_ID"] = sorted(list(set(obs_id_list)))
|
538
|
+
logger.debug(f"OBS_ID = {obs_id_list} in {self.current_file}")
|
539
|
+
logger.info(
|
540
|
+
f"{obs_id_len} entries set for OBS_ID "
|
541
|
+
f"in {self.current_file}\t[{self.job_id}]"
|
542
|
+
)
|
480
543
|
|
481
544
|
# make sure to save the change
|
482
545
|
cdf_obj.save()
|
@@ -506,13 +569,13 @@ class CdfPostPro(Task):
|
|
506
569
|
import xmltodict
|
507
570
|
|
508
571
|
# Convert input IOR XML stream into dictionary
|
509
|
-
logger.debug(f
|
572
|
+
logger.debug(f"Parsing {xml.name} ...")
|
510
573
|
data = xmltodict.parse(xml.read())
|
511
574
|
|
512
575
|
# Extract list of sequences
|
513
|
-
sequence_list = data[
|
514
|
-
|
515
|
-
|
576
|
+
sequence_list = data["planningData"]["commandRequests"]["occurrenceList"][
|
577
|
+
"sequence"
|
578
|
+
]
|
516
579
|
|
517
580
|
# Make sure that returned sequence_list is a list
|
518
581
|
# (If only one sequence tag is found in the XML
|
@@ -529,11 +592,11 @@ class CdfPostPro(Task):
|
|
529
592
|
# right time format (two are possible)
|
530
593
|
for current_strtformat in [TIME_DOY1_STRFORMAT, TIME_DOY2_STRFORMAT]:
|
531
594
|
current_time = cast_ior_seq_datetime(
|
532
|
-
current_seq, current_strtformat
|
595
|
+
current_seq, current_strtformat
|
596
|
+
)
|
533
597
|
if current_time is not None:
|
534
598
|
break
|
535
|
-
current_obsid = current_seq[
|
536
|
-
'observationID'] is not None else ' '
|
599
|
+
current_obsid = current_seq["observationID"]
|
537
600
|
|
538
601
|
ior_seq_list.append((current_time, current_obsid))
|
539
602
|
|
@@ -543,20 +606,24 @@ class CdfPostPro(Task):
|
|
543
606
|
obs_id_list = []
|
544
607
|
|
545
608
|
for current_file in rpw_ior_xml_list:
|
546
|
-
if not os.path.basename(current_file).startswith(
|
547
|
-
logger.debug(f
|
609
|
+
if not os.path.basename(current_file).startswith("IOR"):
|
610
|
+
logger.debug(f"{current_file} not a valid RPW IOR file, skip it")
|
548
611
|
continue
|
549
612
|
|
550
613
|
if zipfile.is_zipfile(current_file):
|
551
|
-
with zipfile.ZipFile(current_file,
|
614
|
+
with zipfile.ZipFile(current_file, "r") as zip_stream:
|
552
615
|
for current_xml in zip_stream.namelist():
|
553
|
-
with zip_stream.open(current_xml,
|
554
|
-
if ior_xml.name.startswith(
|
616
|
+
with zip_stream.open(current_xml, "r") as ior_xml:
|
617
|
+
if ior_xml.name.startswith("IOR") and ior_xml.name.endswith(
|
618
|
+
".SOL"
|
619
|
+
):
|
555
620
|
obs_id_list.extend(extract_obs_id(ior_xml))
|
556
621
|
else:
|
557
|
-
logger.debug(
|
622
|
+
logger.debug(
|
623
|
+
f"{current_xml} is not a valid RPW IOR XML file, skip it"
|
624
|
+
)
|
558
625
|
else:
|
559
|
-
with open(current_file,
|
626
|
+
with open(current_file, "r") as ior_xml:
|
560
627
|
obs_id_list.extend(extract_obs_id(ior_xml))
|
561
628
|
|
562
629
|
return obs_id_list
|
@@ -568,60 +635,92 @@ class CdfPostPro(Task):
|
|
568
635
|
:param cdf_obj: spacepy.pycdf.CDF object containing input file data
|
569
636
|
:return:
|
570
637
|
"""
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
update_data = json.load(jsonfile)
|
575
|
-
update_data = update_data['updates']
|
576
|
-
except:
|
577
|
-
raise L1PostProError(f'Cannot parsing {self.update_json}')
|
578
|
-
|
579
|
-
# Loop over each updates item
|
580
|
-
for key, val in update_data.items():
|
638
|
+
is_succeeded = True
|
639
|
+
|
640
|
+
for item in self.update_data:
|
581
641
|
# Filter dataset to update
|
582
|
-
if
|
583
|
-
logger.
|
642
|
+
if item.get("include") and self.dataset_id not in item["include"]:
|
643
|
+
logger.debug(
|
644
|
+
f"Skipping {self.current_file} "
|
645
|
+
f"for updating CDF: {self.dataset_id} not concerned\t[{self.job_id}]"
|
646
|
+
)
|
584
647
|
continue
|
585
648
|
|
649
|
+
if item.get("exclude") and self.dataset_id in item["exclude"]:
|
650
|
+
logger.debug(
|
651
|
+
f"Skipping {self.current_file} "
|
652
|
+
f"for updating CDF: {self.dataset_id} not concerned\t[{self.job_id}]"
|
653
|
+
)
|
654
|
+
continue
|
655
|
+
|
656
|
+
# Retrieve validity time ranges
|
657
|
+
validity_start = datetime.strptime(
|
658
|
+
item["validity_range"]["start_time"], INPUT_DATETIME_STRFTIME
|
659
|
+
)
|
660
|
+
validity_end = datetime.strptime(
|
661
|
+
item["validity_range"]["end_time"], INPUT_DATETIME_STRFTIME
|
662
|
+
)
|
586
663
|
# Filter time range
|
587
|
-
validity_start = datetime.strptime(val['validity_range']['start_time'],
|
588
|
-
TIME_DAILY_STRFORMAT)
|
589
|
-
validity_end = datetime.strptime(val['validity_range']['end_time'],
|
590
|
-
TIME_DAILY_STRFORMAT)
|
591
664
|
if self.datetime.date() < validity_start.date():
|
592
|
-
logger.
|
665
|
+
logger.debug(
|
666
|
+
f"Skipping {self.current_file} "
|
667
|
+
f"for updating CDF: older than {validity_start.date()}\t[{self.job_id}]"
|
668
|
+
)
|
593
669
|
continue
|
594
670
|
|
595
671
|
if self.datetime.date() > validity_end.date():
|
596
|
-
logger.
|
672
|
+
logger.debug(
|
673
|
+
f"Skipping {self.current_file} for updating CDF: "
|
674
|
+
f"newer than {validity_end.date()}\t[{self.job_id}]"
|
675
|
+
)
|
597
676
|
continue
|
598
677
|
|
599
|
-
#
|
600
|
-
for gattr in
|
601
|
-
gname = gattr[
|
602
|
-
for gvalue in gattr['values']:
|
603
|
-
try:
|
604
|
-
cdf_obj.attrs[gname] = gvalue
|
605
|
-
except:
|
606
|
-
raise L1PostProError(f'Cannot update global attribute {gname} in {self.current_file}')
|
607
|
-
else:
|
608
|
-
logger.info(f'Global attribute {gname} updated in {self.current_file} with values {gvalue}')
|
609
|
-
|
610
|
-
# Run update for zVariables if any
|
611
|
-
for zvar in val['zvars']:
|
612
|
-
zname = zvar['name']
|
613
|
-
zvalues = zvar['values']
|
614
|
-
nrec = cdf_obj[zname].shape[0]
|
678
|
+
# Update global attributes if any
|
679
|
+
for gattr in item["gattrs"]:
|
680
|
+
gname = gattr["name"]
|
615
681
|
try:
|
616
|
-
cdf_obj[
|
617
|
-
|
618
|
-
|
682
|
+
gvalues = list(set(cdf_obj.attrs[gname][...] + gattr["values"]))
|
683
|
+
cdf_obj.attrs[gname] = gvalues
|
684
|
+
except Exception as e:
|
685
|
+
logger.exception(
|
686
|
+
f"Cannot update global attribute {gname} "
|
687
|
+
f"in {self.current_file}\t[{self.job_id}]"
|
688
|
+
)
|
689
|
+
logger.debug(e)
|
690
|
+
is_succeeded = False
|
619
691
|
else:
|
620
|
-
logger.info(
|
692
|
+
logger.info(
|
693
|
+
f"Global attribute {gname} updated in "
|
694
|
+
f"{self.current_file} with values {gvalues}\t[{self.job_id}]"
|
695
|
+
)
|
696
|
+
|
697
|
+
# Update zVariables if any
|
698
|
+
where_dt = (validity_start <= self.epoch) & (self.epoch <= validity_end)
|
699
|
+
if any(where_dt):
|
700
|
+
for zvar in item["zvars"]:
|
701
|
+
zname = zvar["name"]
|
702
|
+
new_zvalues = cdf_obj[zname][...]
|
703
|
+
new_zvalues[where_dt] = zvar["value"]
|
704
|
+
try:
|
705
|
+
cdf_obj[zname] = new_zvalues
|
706
|
+
except Exception as e:
|
707
|
+
logger.exception(
|
708
|
+
f"Cannot update zVariable {zname} "
|
709
|
+
f"in {self.current_file}\t[{self.job_id}]"
|
710
|
+
)
|
711
|
+
logger.debug(e)
|
712
|
+
is_succeeded = False
|
713
|
+
else:
|
714
|
+
logger.info(
|
715
|
+
f"{zname} updated "
|
716
|
+
f"in {self.current_file} with value {zvar['value']}\t[{self.job_id}]"
|
717
|
+
)
|
621
718
|
|
622
719
|
# make sure to save the change
|
623
720
|
cdf_obj.save()
|
624
721
|
|
722
|
+
return is_succeeded
|
723
|
+
|
625
724
|
def _set_bitmask(self, cdf_obj):
|
626
725
|
"""
|
627
726
|
Set the QUALITY_BITMASK zVariable in RPW L1 CDF.
|
@@ -630,96 +729,126 @@ class CdfPostPro(Task):
|
|
630
729
|
:param cdf_obj: spacepy.pycdf.CDF object containing input file data
|
631
730
|
:return: None
|
632
731
|
"""
|
732
|
+
logger.info(
|
733
|
+
f"Setting QUALITY_BITMASK zVar in {self.current_file}...\t[{self.job_id}]"
|
734
|
+
)
|
633
735
|
# Restore Epoch values and get number of records in CDF
|
634
|
-
epoch =
|
635
|
-
nrec =
|
736
|
+
epoch = self.epoch
|
737
|
+
nrec = self.nrec
|
636
738
|
|
637
739
|
# Initialize quality_bitmask
|
638
740
|
bitmask = np.zeros(nrec, dtype=np.uint16)
|
639
|
-
#bitmask[:] = 65535
|
640
741
|
|
641
742
|
# Get list of events to store in bitmask between time_min and time_max
|
642
743
|
# Define filters
|
643
|
-
|
644
|
-
|
645
|
-
|
646
|
-
|
647
|
-
|
648
|
-
|
649
|
-
|
744
|
+
if self.event_log is None:
|
745
|
+
logger.debug(f"Querying event_log table...\t[{self.job_id}]")
|
746
|
+
model = EventLog
|
747
|
+
filters = [
|
748
|
+
model.start_time >= self.time_min,
|
749
|
+
model.end_time <= self.time_max,
|
750
|
+
]
|
751
|
+
self.event_log = query_db(
|
752
|
+
self.session,
|
753
|
+
model,
|
754
|
+
filters=and_(*filters),
|
755
|
+
)
|
756
|
+
n_event = self.event_log.shape[0]
|
757
|
+
if n_event == 0:
|
758
|
+
logger.warning(
|
759
|
+
f"No event_log entry found "
|
760
|
+
f"between {self.time_min} and {self.time_max}\t[{self.job_id}]"
|
761
|
+
)
|
650
762
|
else:
|
651
|
-
|
652
|
-
|
653
|
-
|
654
|
-
|
763
|
+
logger.debug(
|
764
|
+
f"{n_event} entries found in event_log table...\t[{self.job_id}]"
|
765
|
+
)
|
766
|
+
# Loop over events to fill quality_bitmask
|
767
|
+
for i, row in self.event_log.iterrows():
|
768
|
+
# Filter events
|
769
|
+
if row["label"] not in [
|
770
|
+
"BIA_SWEEP_ANT1",
|
771
|
+
"BIA_SWEEP_ANT2",
|
772
|
+
"BIA_SWEEP_ANT3",
|
773
|
+
"EMC_MAND_QUIET",
|
774
|
+
"EMC_PREF_NOISY",
|
775
|
+
"TCM",
|
776
|
+
"SLEW",
|
777
|
+
"WOL",
|
778
|
+
"ROLL",
|
779
|
+
]:
|
780
|
+
continue
|
781
|
+
|
782
|
+
# Get time range covering the event
|
783
|
+
w = (row["start_time"] <= epoch) & (row["end_time"] >= epoch)
|
784
|
+
if not any(w):
|
785
|
+
continue
|
655
786
|
|
656
787
|
# BIAS SWEEP on ANT1
|
657
|
-
if
|
658
|
-
event_log['start_time'] <= current_epoch and
|
659
|
-
event_log['end_time'] >= current_epoch):
|
788
|
+
if row["label"] == "BIA_SWEEP_ANT1":
|
660
789
|
# Set 1st bit (X)
|
661
|
-
|
790
|
+
bitmask[w] = bitmask[w] | 1
|
662
791
|
|
663
792
|
# BIAS SWEEP on ANT2
|
664
|
-
|
665
|
-
event_log['start_time'] <= current_epoch and
|
666
|
-
event_log['end_time'] >= current_epoch):
|
793
|
+
elif row["label"] == "BIA_SWEEP_ANT2":
|
667
794
|
# Set 2nd bit (X0)
|
668
|
-
|
795
|
+
bitmask[w] = bitmask[w] | 2
|
669
796
|
|
670
797
|
# BIAS SWEEP on ANT3
|
671
|
-
|
672
|
-
event_log['start_time'] <= current_epoch and
|
673
|
-
event_log['end_time'] >= current_epoch):
|
798
|
+
elif row["label"] == "BIA_SWEEP_ANT3":
|
674
799
|
# Set 3rd bit (X00)
|
675
|
-
|
800
|
+
bitmask[w] = bitmask[w] | 4
|
676
801
|
|
677
802
|
# EMC_MAND_QUIET
|
678
|
-
|
679
|
-
event_log['start_time'] <= current_epoch and
|
680
|
-
event_log['end_time'] >= current_epoch):
|
803
|
+
elif row["label"] == "EMC_MAND_QUIET":
|
681
804
|
# Set 4th bit (X000)
|
682
|
-
|
805
|
+
bitmask[w] = bitmask[w] | 8
|
683
806
|
|
684
807
|
# EMC_PREF_NOISY
|
685
|
-
|
686
|
-
event_log['start_time'] <= current_epoch and
|
687
|
-
event_log['end_time'] >= current_epoch):
|
808
|
+
elif row["label"] == "EMC_PREF_NOISY":
|
688
809
|
# Set 5th bit (X0000)
|
689
|
-
|
810
|
+
bitmask[w] = bitmask[w] | 16
|
690
811
|
|
691
812
|
# Spacecraft roll manoeuvre
|
692
|
-
|
693
|
-
event_log['start_time'] <= current_epoch and
|
694
|
-
event_log['end_time'] >= current_epoch):
|
813
|
+
elif "ROLL" in row["label"]:
|
695
814
|
# Set 6th bit (X00000)
|
696
|
-
|
697
|
-
|
698
|
-
# Thruster firing
|
699
|
-
if (event_log['label'].isin(['TCM', 'WOL']) and
|
700
|
-
event_log['start_time'] <= current_epoch and
|
701
|
-
event_log['end_time'] >= current_epoch):
|
702
|
-
current_bitmask = current_bitmask | 64
|
815
|
+
bitmask[w] = bitmask[w] | 32
|
703
816
|
|
704
|
-
#
|
705
|
-
|
817
|
+
# Spacecraft "slew" roll manoeuvre
|
818
|
+
elif "SLEW" in row["label"]:
|
819
|
+
# Set 6th bit (X00000)
|
820
|
+
bitmask[w] = bitmask[w] | 32
|
706
821
|
|
707
|
-
|
708
|
-
|
822
|
+
# Thruster firing
|
823
|
+
elif row["label"] in ["TCM", "WOL"]:
|
824
|
+
# Set 7th bit (X000000)
|
825
|
+
bitmask[w] = bitmask[w] | 64
|
709
826
|
|
827
|
+
logger.debug(
|
828
|
+
f"Set {len(w)} QUALITY_BITMASK records for {row['label']} "
|
829
|
+
f"between {row['start_time']} "
|
830
|
+
f"and {row['end_time']}\t[{self.job_id}]"
|
831
|
+
)
|
710
832
|
|
833
|
+
# Save quality_bitmask
|
834
|
+
cdf_obj["QUALITY_BITMASK"] = bitmask[...]
|
711
835
|
|
712
836
|
# make sure to save the change
|
713
837
|
cdf_obj.save()
|
714
838
|
|
839
|
+
return True
|
840
|
+
|
841
|
+
|
715
842
|
def cast_ior_seq_datetime(current_seq, strtformat):
|
716
843
|
"""
|
717
844
|
cast the execution time of the input IOR sequence element into datetime object
|
718
845
|
"""
|
719
846
|
try:
|
720
|
-
seq_datetime = datetime.strptime(
|
721
|
-
|
722
|
-
|
847
|
+
seq_datetime = datetime.strptime(
|
848
|
+
current_seq["executionTime"]["actionTime"], strtformat
|
849
|
+
)
|
850
|
+
except Exception:
|
851
|
+
# logger.debug(e)
|
723
852
|
seq_datetime = None
|
724
853
|
|
725
854
|
return seq_datetime
|