imap-processing 0.18.0__py3-none-any.whl → 0.19.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/ancillary/ancillary_dataset_combiner.py +161 -1
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -0
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +221 -1057
- imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +307 -283
- imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1044 -203
- imap_processing/cdf/config/imap_constant_attrs.yaml +4 -2
- imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +11 -0
- imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +15 -1
- imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +5 -0
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +10 -4
- imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +33 -4
- imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +8 -91
- imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +106 -16
- imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +5 -4
- imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +4 -15
- imap_processing/cdf/config/imap_lo_l1c_variable_attrs.yaml +189 -98
- imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +85 -2
- imap_processing/cdf/config/imap_mag_l1c_variable_attrs.yaml +24 -1
- imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +20 -8
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +45 -35
- imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +110 -7
- imap_processing/cli.py +138 -93
- imap_processing/codice/codice_l0.py +2 -1
- imap_processing/codice/codice_l1a.py +167 -69
- imap_processing/codice/codice_l1b.py +42 -32
- imap_processing/codice/codice_l2.py +215 -9
- imap_processing/codice/constants.py +790 -603
- imap_processing/codice/data/lo_stepping_values.csv +1 -1
- imap_processing/decom.py +1 -4
- imap_processing/ena_maps/ena_maps.py +71 -43
- imap_processing/ena_maps/utils/corrections.py +291 -0
- imap_processing/ena_maps/utils/map_utils.py +20 -4
- imap_processing/ena_maps/utils/naming.py +8 -2
- imap_processing/glows/ancillary/imap_glows_exclusions-by-instr-team_20250923_v002.dat +10 -0
- imap_processing/glows/ancillary/imap_glows_map-of-excluded-regions_20250923_v002.dat +393 -0
- imap_processing/glows/ancillary/imap_glows_map-of-uv-sources_20250923_v002.dat +593 -0
- imap_processing/glows/ancillary/imap_glows_pipeline-settings_20250923_v002.json +54 -0
- imap_processing/glows/ancillary/imap_glows_suspected-transients_20250923_v002.dat +10 -0
- imap_processing/glows/l1b/glows_l1b.py +123 -18
- imap_processing/glows/l1b/glows_l1b_data.py +358 -47
- imap_processing/glows/l2/glows_l2.py +11 -0
- imap_processing/hi/hi_l1a.py +124 -3
- imap_processing/hi/hi_l1b.py +154 -71
- imap_processing/hi/hi_l1c.py +4 -109
- imap_processing/hi/hi_l2.py +104 -60
- imap_processing/hi/utils.py +262 -8
- imap_processing/hit/l0/constants.py +3 -0
- imap_processing/hit/l0/decom_hit.py +3 -6
- imap_processing/hit/l1a/hit_l1a.py +311 -21
- imap_processing/hit/l1b/hit_l1b.py +54 -126
- imap_processing/hit/l2/hit_l2.py +6 -6
- imap_processing/ialirt/calculate_ingest.py +219 -0
- imap_processing/ialirt/constants.py +12 -2
- imap_processing/ialirt/generate_coverage.py +15 -2
- imap_processing/ialirt/l0/ialirt_spice.py +6 -2
- imap_processing/ialirt/l0/parse_mag.py +293 -42
- imap_processing/ialirt/l0/process_hit.py +5 -3
- imap_processing/ialirt/l0/process_swapi.py +41 -25
- imap_processing/ialirt/process_ephemeris.py +70 -14
- imap_processing/ialirt/utils/create_xarray.py +1 -1
- imap_processing/idex/idex_l0.py +2 -2
- imap_processing/idex/idex_l1a.py +2 -3
- imap_processing/idex/idex_l1b.py +2 -3
- imap_processing/idex/idex_l2a.py +130 -4
- imap_processing/idex/idex_l2b.py +158 -143
- imap_processing/idex/idex_utils.py +1 -3
- imap_processing/lo/ancillary_data/imap_lo_hydrogen-geometric-factor_v001.csv +75 -0
- imap_processing/lo/ancillary_data/imap_lo_oxygen-geometric-factor_v001.csv +75 -0
- imap_processing/lo/l0/lo_science.py +25 -24
- imap_processing/lo/l1b/lo_l1b.py +93 -19
- imap_processing/lo/l1c/lo_l1c.py +273 -93
- imap_processing/lo/l2/lo_l2.py +949 -135
- imap_processing/lo/lo_ancillary.py +55 -0
- imap_processing/mag/l1a/mag_l1a.py +1 -0
- imap_processing/mag/l1a/mag_l1a_data.py +26 -0
- imap_processing/mag/l1b/mag_l1b.py +3 -2
- imap_processing/mag/l1c/interpolation_methods.py +14 -15
- imap_processing/mag/l1c/mag_l1c.py +23 -6
- imap_processing/mag/l1d/mag_l1d.py +57 -14
- imap_processing/mag/l1d/mag_l1d_data.py +202 -32
- imap_processing/mag/l2/mag_l2.py +2 -0
- imap_processing/mag/l2/mag_l2_data.py +14 -5
- imap_processing/quality_flags.py +23 -1
- imap_processing/spice/geometry.py +89 -39
- imap_processing/spice/pointing_frame.py +4 -8
- imap_processing/spice/repoint.py +78 -2
- imap_processing/spice/spin.py +28 -8
- imap_processing/spice/time.py +12 -22
- imap_processing/swapi/l1/swapi_l1.py +10 -4
- imap_processing/swapi/l2/swapi_l2.py +15 -17
- imap_processing/swe/l1b/swe_l1b.py +1 -2
- imap_processing/ultra/constants.py +30 -24
- imap_processing/ultra/l0/ultra_utils.py +9 -11
- imap_processing/ultra/l1a/ultra_l1a.py +1 -2
- imap_processing/ultra/l1b/badtimes.py +35 -11
- imap_processing/ultra/l1b/de.py +95 -31
- imap_processing/ultra/l1b/extendedspin.py +31 -16
- imap_processing/ultra/l1b/goodtimes.py +112 -0
- imap_processing/ultra/l1b/lookup_utils.py +281 -28
- imap_processing/ultra/l1b/quality_flag_filters.py +10 -1
- imap_processing/ultra/l1b/ultra_l1b.py +7 -7
- imap_processing/ultra/l1b/ultra_l1b_culling.py +169 -7
- imap_processing/ultra/l1b/ultra_l1b_extended.py +311 -69
- imap_processing/ultra/l1c/helio_pset.py +139 -37
- imap_processing/ultra/l1c/l1c_lookup_utils.py +289 -0
- imap_processing/ultra/l1c/spacecraft_pset.py +140 -29
- imap_processing/ultra/l1c/ultra_l1c.py +33 -24
- imap_processing/ultra/l1c/ultra_l1c_culling.py +92 -0
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +400 -292
- imap_processing/ultra/l2/ultra_l2.py +54 -11
- imap_processing/ultra/utils/ultra_l1_utils.py +37 -7
- imap_processing/utils.py +3 -4
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/METADATA +2 -2
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/RECORD +118 -109
- imap_processing/idex/idex_l2c.py +0 -84
- imap_processing/spice/kernels.py +0 -187
- imap_processing/ultra/l1b/cullingmask.py +0 -87
- imap_processing/ultra/l1c/histogram.py +0 -36
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/LICENSE +0 -0
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/WHEEL +0 -0
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/entry_points.txt +0 -0
|
@@ -4,12 +4,227 @@ import dataclasses
|
|
|
4
4
|
import json
|
|
5
5
|
from dataclasses import InitVar, dataclass, field
|
|
6
6
|
from pathlib import Path
|
|
7
|
-
from typing import Optional
|
|
8
7
|
|
|
9
8
|
import numpy as np
|
|
9
|
+
import xarray as xr
|
|
10
|
+
from scipy.stats import circmean, circstd
|
|
10
11
|
|
|
11
12
|
from imap_processing.glows import FLAG_LENGTH
|
|
12
13
|
from imap_processing.glows.utils.constants import TimeTuple
|
|
14
|
+
from imap_processing.spice import geometry
|
|
15
|
+
from imap_processing.spice.geometry import SpiceBody, SpiceFrame
|
|
16
|
+
from imap_processing.spice.spin import (
|
|
17
|
+
get_instrument_spin_phase,
|
|
18
|
+
get_spin_angle,
|
|
19
|
+
get_spin_data,
|
|
20
|
+
)
|
|
21
|
+
from imap_processing.spice.time import met_to_datetime64, met_to_sclkticks, sct_to_et
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class PipelineSettings: # numpydoc ignore=PR02
|
|
26
|
+
"""
|
|
27
|
+
GLOWS L1B Pipeline Settings for controlling bad-angle and bad-time flag processing.
|
|
28
|
+
|
|
29
|
+
This class extracts pipeline settings from the JSON configuration file processed
|
|
30
|
+
through GlowsAncillaryCombiner.
|
|
31
|
+
|
|
32
|
+
Based on Section 3.12 of the GLOWS algorithm document, the pipeline settings
|
|
33
|
+
file contains parameters for the ground-processing pipeline including thresholds,
|
|
34
|
+
bad-time flags to be activated, bad-angle flags to be activated, and other
|
|
35
|
+
processing controls.
|
|
36
|
+
|
|
37
|
+
Parameters
|
|
38
|
+
----------
|
|
39
|
+
pipeline_dataset : xr.Dataset
|
|
40
|
+
Dataset from GlowsAncillaryCombiner.combined_dataset containing the
|
|
41
|
+
pipeline settings data extracted from the JSON file.
|
|
42
|
+
|
|
43
|
+
Attributes
|
|
44
|
+
----------
|
|
45
|
+
active_bad_angle_flags : list[bool]
|
|
46
|
+
Binary mask determining which of the 4 bad-angle flags are active:
|
|
47
|
+
[is_close_to_uv_source, is_inside_excluded_region,
|
|
48
|
+
is_excluded_by_instr_team, is_suspected_transient]
|
|
49
|
+
Default: All flags set to True (all active).
|
|
50
|
+
|
|
51
|
+
active_bad_time_flags : list[bool]
|
|
52
|
+
Binary mask determining which bad-time flags from onboard processing
|
|
53
|
+
should be used for quality control to identify "good time" L1B blocks.
|
|
54
|
+
|
|
55
|
+
sunrise_offset : float
|
|
56
|
+
Offset in hours to adjust sunrise time relative to onboard settings
|
|
57
|
+
for fine-tuning the day/night boundary determination.
|
|
58
|
+
|
|
59
|
+
sunset_offset : float
|
|
60
|
+
Offset in hours to adjust sunset time relative to onboard settings
|
|
61
|
+
for fine-tuning the day/night boundary determination.
|
|
62
|
+
|
|
63
|
+
processing_thresholds : dict
|
|
64
|
+
Various thresholds and parameters for ground processing pipeline
|
|
65
|
+
that control sensitivity and quality criteria for L1B data processing.
|
|
66
|
+
|
|
67
|
+
Notes
|
|
68
|
+
-----
|
|
69
|
+
Usage example:
|
|
70
|
+
|
|
71
|
+
.. code-block:: python
|
|
72
|
+
|
|
73
|
+
# Create combiner for pipeline settings file
|
|
74
|
+
pipeline_combiner = GlowsAncillaryCombiner(pipeline_settings_files, end_date)
|
|
75
|
+
|
|
76
|
+
# Create PipelineSettings object
|
|
77
|
+
pipeline_settings = PipelineSettings(pipeline_combiner.combined_dataset)
|
|
78
|
+
|
|
79
|
+
# Use the settings
|
|
80
|
+
if pipeline_settings.active_bad_angle_flags[0]: # is_close_to_uv_source
|
|
81
|
+
# Process UV source exclusions
|
|
82
|
+
pass
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
pipeline_dataset: InitVar[xr.Dataset]
|
|
86
|
+
|
|
87
|
+
# Extracted pipeline settings attributes
|
|
88
|
+
active_bad_angle_flags: list[bool] = field(init=False)
|
|
89
|
+
active_bad_time_flags: list[bool] = field(init=False)
|
|
90
|
+
sunrise_offset: float = field(init=False)
|
|
91
|
+
sunset_offset: float = field(init=False)
|
|
92
|
+
processing_thresholds: dict = field(init=False)
|
|
93
|
+
|
|
94
|
+
def __post_init__(self, pipeline_dataset: xr.Dataset) -> None:
|
|
95
|
+
"""
|
|
96
|
+
Extract pipeline settings from the dataset.
|
|
97
|
+
|
|
98
|
+
Parameters
|
|
99
|
+
----------
|
|
100
|
+
pipeline_dataset : xr.Dataset
|
|
101
|
+
Dataset containing pipeline settings data variables.
|
|
102
|
+
"""
|
|
103
|
+
# Extract active bad-angle flags (default to all True if not present)
|
|
104
|
+
if "active_bad_angle_flags" in pipeline_dataset.data_vars:
|
|
105
|
+
self.active_bad_angle_flags = list(
|
|
106
|
+
pipeline_dataset["active_bad_angle_flags"].values
|
|
107
|
+
)
|
|
108
|
+
else:
|
|
109
|
+
# Default: all 4 bad-angle flags are active
|
|
110
|
+
self.active_bad_angle_flags = [True, True, True, True]
|
|
111
|
+
|
|
112
|
+
# Extract active bad-time flags (default to all True if not present)
|
|
113
|
+
if "active_bad_time_flags" in pipeline_dataset.data_vars:
|
|
114
|
+
self.active_bad_time_flags = list(
|
|
115
|
+
pipeline_dataset["active_bad_time_flags"].values
|
|
116
|
+
)
|
|
117
|
+
else:
|
|
118
|
+
# Default: assume all bad-time flags are active
|
|
119
|
+
self.active_bad_time_flags = [True] * 16 # Typical number of bad-time flags
|
|
120
|
+
|
|
121
|
+
# Extract sunrise/sunset offsets (default to 0.0 if not present)
|
|
122
|
+
self.sunrise_offset = float(pipeline_dataset.get("sunrise_offset", 0.0))
|
|
123
|
+
self.sunset_offset = float(pipeline_dataset.get("sunset_offset", 0.0))
|
|
124
|
+
|
|
125
|
+
# Extract processing thresholds (collect all threshold-related variables)
|
|
126
|
+
self.processing_thresholds = {}
|
|
127
|
+
for var_name in pipeline_dataset.data_vars:
|
|
128
|
+
if "threshold" in var_name.lower() or "limit" in var_name.lower():
|
|
129
|
+
self.processing_thresholds[var_name] = pipeline_dataset[var_name].item()
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
@dataclass
|
|
133
|
+
class AncillaryExclusions:
|
|
134
|
+
"""
|
|
135
|
+
Organize input ancillary files for GLOWS L1B bad-angle flag processing.
|
|
136
|
+
|
|
137
|
+
This class holds the four types of ancillary datasets required for computing
|
|
138
|
+
bad-angle flags in GLOWS L1B histogram processing. All datasets should be
|
|
139
|
+
obtained from the GlowsAncillaryCombiner.combined_dataset property after
|
|
140
|
+
processing the respective ancillary files.
|
|
141
|
+
|
|
142
|
+
Attributes
|
|
143
|
+
----------
|
|
144
|
+
excluded_regions : xr.Dataset
|
|
145
|
+
Dataset containing excluded sky regions with ecliptic coordinates.
|
|
146
|
+
Expected structure from GlowsAncillaryCombiner:
|
|
147
|
+
- 'ecliptic_longitude_deg': DataArray with dimension ('epoch', 'region')
|
|
148
|
+
- 'ecliptic_latitude_deg': DataArray with dimension ('epoch', 'region')
|
|
149
|
+
|
|
150
|
+
uv_sources : xr.Dataset
|
|
151
|
+
Dataset containing UV sources (stars) with coordinates and masking radii.
|
|
152
|
+
Expected structure from GlowsAncillaryCombiner:
|
|
153
|
+
- 'object_name': DataArray with dimension ('epoch', 'source')
|
|
154
|
+
- 'ecliptic_longitude_deg': DataArray with dimension ('epoch', 'source')
|
|
155
|
+
- 'ecliptic_latitude_deg': DataArray with dimension ('epoch', 'source')
|
|
156
|
+
- 'angular_radius_for_masking': DataArray with dimension ('epoch', 'source')
|
|
157
|
+
|
|
158
|
+
suspected_transients : xr.Dataset
|
|
159
|
+
Dataset containing suspected transient signals with time-based masks.
|
|
160
|
+
Expected structure from GlowsAncillaryCombiner:
|
|
161
|
+
- 'l1b_unique_block_identifier', dimensions ('epoch', 'time_block')
|
|
162
|
+
- 'histogram_mask_array', dimensions ('epoch', 'time_block')
|
|
163
|
+
|
|
164
|
+
exclusions_by_instr_team : xr.Dataset
|
|
165
|
+
Dataset containing manual exclusions by instrument team with time-based masks.
|
|
166
|
+
Expected structure from GlowsAncillaryCombiner:
|
|
167
|
+
- 'l1b_unique_block_identifier', dimensions ('epoch', 'time_block')
|
|
168
|
+
- 'histogram_mask_array', dimensions ('epoch', 'time_block')
|
|
169
|
+
|
|
170
|
+
Notes
|
|
171
|
+
-----
|
|
172
|
+
Usage example:
|
|
173
|
+
|
|
174
|
+
.. code-block:: python
|
|
175
|
+
|
|
176
|
+
# Create combiners for each ancillary file type
|
|
177
|
+
excluded_regions_combiner = GlowsAncillaryCombiner(
|
|
178
|
+
excluded_regions_files, end_date)
|
|
179
|
+
uv_sources_combiner = GlowsAncillaryCombiner(uv_sources_files, end_date)
|
|
180
|
+
suspected_transients_combiner = GlowsAncillaryCombiner(
|
|
181
|
+
suspected_transients_files, end_date)
|
|
182
|
+
exclusions_combiner = GlowsAncillaryCombiner(exclusions_files, end_date)
|
|
183
|
+
|
|
184
|
+
# Create AncillaryExclusions object
|
|
185
|
+
exclusions = AncillaryExclusions(
|
|
186
|
+
excluded_regions=excluded_regions_combiner.combined_dataset,
|
|
187
|
+
uv_sources=uv_sources_combiner.combined_dataset,
|
|
188
|
+
suspected_transients=suspected_transients_combiner.combined_dataset,
|
|
189
|
+
exclusions_by_instr_team=exclusions_combiner.combined_dataset
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
# Filter for a specific day using limit_by_day method
|
|
193
|
+
day_exclusions = exclusions.limit_by_day(np.datetime64('2025-09-23'))
|
|
194
|
+
"""
|
|
195
|
+
|
|
196
|
+
excluded_regions: xr.Dataset
|
|
197
|
+
uv_sources: xr.Dataset
|
|
198
|
+
suspected_transients: xr.Dataset
|
|
199
|
+
exclusions_by_instr_team: xr.Dataset
|
|
200
|
+
|
|
201
|
+
def limit_by_day(self, day: np.datetime64) -> "AncillaryExclusions":
|
|
202
|
+
"""
|
|
203
|
+
Return a new AncillaryExclusions object with data filtered for a specified day.
|
|
204
|
+
|
|
205
|
+
This method does not mutate the original object and can be called multiple times
|
|
206
|
+
with different days.
|
|
207
|
+
|
|
208
|
+
Parameters
|
|
209
|
+
----------
|
|
210
|
+
day : np.datetime64
|
|
211
|
+
The day to filter data for.
|
|
212
|
+
|
|
213
|
+
Returns
|
|
214
|
+
-------
|
|
215
|
+
AncillaryExclusions
|
|
216
|
+
New instance with data filtered for the specified day.
|
|
217
|
+
"""
|
|
218
|
+
return AncillaryExclusions(
|
|
219
|
+
excluded_regions=self.excluded_regions.sel(epoch=day, method="nearest"),
|
|
220
|
+
uv_sources=self.uv_sources.sel(epoch=day, method="nearest"),
|
|
221
|
+
suspected_transients=self.suspected_transients.sel(
|
|
222
|
+
epoch=day, method="nearest"
|
|
223
|
+
),
|
|
224
|
+
exclusions_by_instr_team=self.exclusions_by_instr_team.sel(
|
|
225
|
+
epoch=day, method="nearest"
|
|
226
|
+
),
|
|
227
|
+
)
|
|
13
228
|
|
|
14
229
|
|
|
15
230
|
class AncillaryParameters:
|
|
@@ -246,11 +461,11 @@ class DirectEventL1B:
|
|
|
246
461
|
pulse_test_in_progress: InitVar[np.double]
|
|
247
462
|
memory_error_detected: InitVar[np.double]
|
|
248
463
|
# The following variables are created from the InitVar data
|
|
249
|
-
de_flags:
|
|
464
|
+
de_flags: np.ndarray | None = field(init=False, default=None)
|
|
250
465
|
# TODO: First two values of DE are sec/subsec
|
|
251
|
-
direct_event_glows_times:
|
|
466
|
+
direct_event_glows_times: np.ndarray | None = field(init=False, default=None)
|
|
252
467
|
# 3rd value is pulse length
|
|
253
|
-
direct_event_pulse_lengths:
|
|
468
|
+
direct_event_pulse_lengths: np.ndarray | None = field(init=False, default=None)
|
|
254
469
|
# TODO: where does the multi-event flag go?
|
|
255
470
|
|
|
256
471
|
def __post_init__(
|
|
@@ -466,7 +681,6 @@ class HistogramL1B:
|
|
|
466
681
|
histogram: np.ndarray
|
|
467
682
|
flight_software_version: str
|
|
468
683
|
seq_count_in_pkts_file: int
|
|
469
|
-
# ancillary_data_files: np.ndarray TODO Add this
|
|
470
684
|
first_spin_id: int
|
|
471
685
|
last_spin_id: int
|
|
472
686
|
flags_set_onboard: int # TODO: this should be renamed in L1B
|
|
@@ -490,26 +704,26 @@ class HistogramL1B:
|
|
|
490
704
|
imap_time_offset: np.double # No conversion needed from l1a->l1b
|
|
491
705
|
glows_start_time: np.double # No conversion needed from l1a->l1b
|
|
492
706
|
glows_time_offset: np.double # No conversion needed from l1a->l1b
|
|
493
|
-
|
|
494
|
-
# init=False
|
|
495
|
-
# ) # Could be datetime TODO: Can't put a string in data
|
|
707
|
+
unique_block_identifier: str = field(init=False)
|
|
496
708
|
imap_spin_angle_bin_cntr: np.ndarray = field(init=False) # Same size as bins
|
|
497
709
|
histogram_flag_array: np.ndarray = field(init=False)
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
710
|
+
# These two are retrieved from spin data
|
|
711
|
+
spin_period_ground_average: np.double = field(init=False)
|
|
712
|
+
spin_period_ground_std_dev: np.double = field(init=False)
|
|
713
|
+
position_angle_offset_average: np.double = field(init=False) # from SPICE
|
|
501
714
|
position_angle_offset_std_dev: np.double = field(init=False) # from SPICE
|
|
502
|
-
spin_axis_orientation_std_dev: np.
|
|
503
|
-
spin_axis_orientation_average: np.
|
|
504
|
-
spacecraft_location_average: np.ndarray = field(init=False) #
|
|
505
|
-
spacecraft_location_std_dev: np.ndarray = field(init=False) #
|
|
506
|
-
spacecraft_velocity_average: np.ndarray = field(init=False) #
|
|
507
|
-
spacecraft_velocity_std_dev: np.ndarray = field(init=False) #
|
|
715
|
+
spin_axis_orientation_std_dev: np.ndarray = field(init=False) # from SPICE
|
|
716
|
+
spin_axis_orientation_average: np.ndarray = field(init=False) # from SPICE
|
|
717
|
+
spacecraft_location_average: np.ndarray = field(init=False) # from SPICE
|
|
718
|
+
spacecraft_location_std_dev: np.ndarray = field(init=False) # from SPICE
|
|
719
|
+
spacecraft_velocity_average: np.ndarray = field(init=False) # from SPICE
|
|
720
|
+
spacecraft_velocity_std_dev: np.ndarray = field(init=False) # from SPICE
|
|
508
721
|
flags: np.ndarray = field(init=False)
|
|
722
|
+
ancillary_exclusions: InitVar[AncillaryExclusions]
|
|
723
|
+
ancillary_parameters: InitVar[AncillaryParameters]
|
|
724
|
+
pipeline_settings: InitVar[PipelineSettings]
|
|
509
725
|
# TODO:
|
|
510
726
|
# - Determine a good way to output flags as "human readable"
|
|
511
|
-
# - Add spice pieces
|
|
512
|
-
# - also unique identifiers
|
|
513
727
|
# - Bad angle algorithm using SPICE locations
|
|
514
728
|
# - Move ancillary file to AWS
|
|
515
729
|
|
|
@@ -519,6 +733,9 @@ class HistogramL1B:
|
|
|
519
733
|
hv_voltage_variance: np.double,
|
|
520
734
|
spin_period_variance: np.double,
|
|
521
735
|
pulse_length_variance: np.double,
|
|
736
|
+
ancillary_exclusions: AncillaryExclusions,
|
|
737
|
+
ancillary_parameters: AncillaryParameters,
|
|
738
|
+
pipeline_settings: PipelineSettings,
|
|
522
739
|
) -> None:
|
|
523
740
|
"""
|
|
524
741
|
Will process data.
|
|
@@ -535,63 +752,132 @@ class HistogramL1B:
|
|
|
535
752
|
Encoded spin period variance.
|
|
536
753
|
pulse_length_variance : numpy.double
|
|
537
754
|
Encoded pulse length variance.
|
|
755
|
+
ancillary_exclusions : AncillaryExclusions
|
|
756
|
+
Ancillary exclusions data for bad-angle flag processing.
|
|
757
|
+
ancillary_parameters : AncillaryParameters
|
|
758
|
+
Ancillary parameters for decoding histogram data.
|
|
759
|
+
pipeline_settings : PipelineSettings
|
|
760
|
+
Pipeline settings for processing thresholds and flags.
|
|
538
761
|
"""
|
|
539
762
|
# self.histogram_flag_array = np.zeros((2,))
|
|
763
|
+
day = met_to_datetime64(self.imap_start_time)
|
|
540
764
|
|
|
541
|
-
#
|
|
542
|
-
|
|
543
|
-
self.spin_period_ground_average = np.double(-999.9)
|
|
544
|
-
self.spin_period_ground_std_dev = np.double(-999.9)
|
|
545
|
-
self.position_angle_offset_average = np.double(-999.9)
|
|
546
|
-
self.position_angle_offset_std_dev = np.double(-999.9)
|
|
547
|
-
self.spin_axis_orientation_std_dev = np.double(-999.9)
|
|
548
|
-
self.spin_axis_orientation_average = np.double(-999.9)
|
|
549
|
-
self.spacecraft_location_average = np.array([-999.9, -999.9, -999.9])
|
|
550
|
-
self.spacecraft_location_std_dev = np.array([-999.9, -999.9, -999.9])
|
|
551
|
-
self.spacecraft_velocity_average = np.array([-999.9, -999.9, -999.9])
|
|
552
|
-
self.spacecraft_velocity_std_dev = np.array([-999.9, -999.9, -999.9])
|
|
765
|
+
# Add SPICE related variables
|
|
766
|
+
self.update_spice_parameters()
|
|
553
767
|
# Will require some additional inputs
|
|
554
768
|
self.imap_spin_angle_bin_cntr = np.zeros((3600,))
|
|
555
769
|
|
|
556
770
|
# TODO: This should probably be an AWS file
|
|
557
771
|
# TODO Pass in AncillaryParameters object instead of reading here.
|
|
558
|
-
with open(
|
|
559
|
-
Path(__file__).parents[1] / "ancillary" / "l1b_conversion_table_v001.json"
|
|
560
|
-
) as f:
|
|
561
|
-
self.ancillary_parameters = AncillaryParameters(json.loads(f.read()))
|
|
562
772
|
|
|
563
|
-
self.filter_temperature_average =
|
|
773
|
+
self.filter_temperature_average = ancillary_parameters.decode(
|
|
564
774
|
"filter_temperature", self.filter_temperature_average
|
|
565
775
|
)
|
|
566
|
-
self.filter_temperature_std_dev =
|
|
776
|
+
self.filter_temperature_std_dev = ancillary_parameters.decode_std_dev(
|
|
567
777
|
"filter_temperature", filter_temperature_variance
|
|
568
778
|
)
|
|
569
779
|
|
|
570
|
-
self.hv_voltage_average =
|
|
780
|
+
self.hv_voltage_average = ancillary_parameters.decode(
|
|
571
781
|
"hv_voltage", self.hv_voltage_average
|
|
572
782
|
)
|
|
573
|
-
self.hv_voltage_std_dev =
|
|
783
|
+
self.hv_voltage_std_dev = ancillary_parameters.decode_std_dev(
|
|
574
784
|
"hv_voltage", hv_voltage_variance
|
|
575
785
|
)
|
|
576
|
-
self.spin_period_average =
|
|
786
|
+
self.spin_period_average = ancillary_parameters.decode(
|
|
577
787
|
"spin_period", self.spin_period_average
|
|
578
788
|
)
|
|
579
|
-
self.spin_period_std_dev =
|
|
789
|
+
self.spin_period_std_dev = ancillary_parameters.decode_std_dev(
|
|
580
790
|
"spin_period", spin_period_variance
|
|
581
791
|
)
|
|
582
|
-
self.pulse_length_average =
|
|
792
|
+
self.pulse_length_average = ancillary_parameters.decode(
|
|
583
793
|
"pulse_length", self.pulse_length_average
|
|
584
794
|
)
|
|
585
|
-
self.pulse_length_std_dev =
|
|
795
|
+
self.pulse_length_std_dev = ancillary_parameters.decode_std_dev(
|
|
586
796
|
"pulse_length", pulse_length_variance
|
|
587
797
|
)
|
|
588
798
|
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
#
|
|
799
|
+
# get the data for the correct day
|
|
800
|
+
day_exclusions = ancillary_exclusions.limit_by_day(day)
|
|
801
|
+
|
|
802
|
+
# Initialize histogram flag array: [is_close_to_uv_source,
|
|
803
|
+
# is_inside_excluded_region, is_excluded_by_instr_team,
|
|
804
|
+
# is_suspected_transient] x 3600 bins
|
|
805
|
+
self.histogram_flag_array = self._compute_histogram_flag_array(day_exclusions)
|
|
806
|
+
# Generate ISO datetime string using SPICE functions
|
|
807
|
+
datetime64_time = met_to_datetime64(self.imap_start_time)
|
|
808
|
+
self.unique_block_identifier = np.datetime_as_string(datetime64_time, "s")
|
|
593
809
|
self.flags = np.ones((FLAG_LENGTH,), dtype=np.uint8)
|
|
594
810
|
|
|
811
|
+
def update_spice_parameters(self) -> None:
|
|
812
|
+
"""Update SPICE parameters based on the current state."""
|
|
813
|
+
data_start_met = self.imap_start_time
|
|
814
|
+
# use of imap_start_time and glows_time_offset is correct.
|
|
815
|
+
data_end_met = np.double(self.imap_start_time) + np.double(
|
|
816
|
+
self.glows_time_offset
|
|
817
|
+
)
|
|
818
|
+
data_start_time_et = sct_to_et(met_to_sclkticks(data_start_met))
|
|
819
|
+
data_end_time_et = sct_to_et(met_to_sclkticks(data_end_met))
|
|
820
|
+
|
|
821
|
+
time_range = np.arange(data_start_time_et, data_end_time_et)
|
|
822
|
+
|
|
823
|
+
# Calculate spin period
|
|
824
|
+
# ---------------------
|
|
825
|
+
spin_data = get_spin_data()
|
|
826
|
+
# select spin data within the range from data start time to end time
|
|
827
|
+
spin_data = spin_data[
|
|
828
|
+
(spin_data["spin_start_met"] >= data_start_met)
|
|
829
|
+
& (spin_data["spin_start_met"] <= data_end_met)
|
|
830
|
+
]
|
|
831
|
+
|
|
832
|
+
self.spin_period_ground_average = np.average(spin_data["spin_period_sec"])
|
|
833
|
+
self.spin_period_ground_std_dev = np.std(spin_data["spin_period_sec"])
|
|
834
|
+
|
|
835
|
+
# Calculate position angle offset
|
|
836
|
+
# --------------------------------
|
|
837
|
+
angle_offset = 360 - get_spin_angle(
|
|
838
|
+
get_instrument_spin_phase(
|
|
839
|
+
self.imap_start_time, instrument=geometry.SpiceFrame.IMAP_GLOWS
|
|
840
|
+
),
|
|
841
|
+
degrees=True,
|
|
842
|
+
)
|
|
843
|
+
self.position_angle_offset_average = np.double(angle_offset)
|
|
844
|
+
self.position_angle_offset_std_dev = np.double(
|
|
845
|
+
0.0
|
|
846
|
+
) # Set to zero per algorithm document
|
|
847
|
+
|
|
848
|
+
# Calculate spin axis orientation
|
|
849
|
+
|
|
850
|
+
spin_axis_all_times = geometry.cartesian_to_latitudinal(
|
|
851
|
+
geometry.frame_transform(
|
|
852
|
+
time_range,
|
|
853
|
+
np.array([0, 0, 1]),
|
|
854
|
+
SpiceFrame.IMAP_SPACECRAFT,
|
|
855
|
+
SpiceFrame.ECLIPJ2000,
|
|
856
|
+
)
|
|
857
|
+
)
|
|
858
|
+
# Calculate circular statistics for longitude (wraps around)
|
|
859
|
+
lon_mean = circmean(spin_axis_all_times[..., 1], low=-np.pi, high=np.pi)
|
|
860
|
+
lon_std = circstd(spin_axis_all_times[..., 1], low=-np.pi, high=np.pi)
|
|
861
|
+
lat_mean = circmean(spin_axis_all_times[..., 2], low=-np.pi, high=np.pi)
|
|
862
|
+
lat_std = circstd(spin_axis_all_times[..., 2], low=-np.pi, high=np.pi)
|
|
863
|
+
self.spin_axis_orientation_average = np.array([lon_mean, lat_mean])
|
|
864
|
+
self.spin_axis_orientation_std_dev = np.array([lon_std, lat_std])
|
|
865
|
+
|
|
866
|
+
# Calculate spacecraft location and velocity
|
|
867
|
+
# ------------------------------------------
|
|
868
|
+
# imap_state returns [x, y, z, vx, vy, vz].
|
|
869
|
+
# First three columns for position and last three for velocity.
|
|
870
|
+
imap_state = geometry.imap_state(
|
|
871
|
+
et=time_range, ref_frame=SpiceFrame.ECLIPJ2000, observer=SpiceBody.SUN
|
|
872
|
+
)
|
|
873
|
+
position = imap_state[:, :3]
|
|
874
|
+
velocity = imap_state[:, 3:]
|
|
875
|
+
# average and standard deviation over time (rows)
|
|
876
|
+
self.spacecraft_location_average = np.average(position, axis=0)
|
|
877
|
+
self.spacecraft_location_std_dev = np.std(position, axis=0)
|
|
878
|
+
self.spacecraft_velocity_average = np.average(velocity, axis=0)
|
|
879
|
+
self.spacecraft_velocity_std_dev = np.std(velocity, axis=0)
|
|
880
|
+
|
|
595
881
|
def output_data(self) -> tuple:
|
|
596
882
|
"""
|
|
597
883
|
Output the L1B DataArrays as a tuple.
|
|
@@ -628,3 +914,28 @@ class HistogramL1B:
|
|
|
628
914
|
)
|
|
629
915
|
|
|
630
916
|
return flags
|
|
917
|
+
|
|
918
|
+
def _compute_histogram_flag_array(
|
|
919
|
+
self, exclusions: AncillaryExclusions
|
|
920
|
+
) -> np.ndarray:
|
|
921
|
+
"""
|
|
922
|
+
Compute the histogram flag array for bad-angle flags.
|
|
923
|
+
|
|
924
|
+
Creates a (4, 3600) array where each row represents a different flag type:
|
|
925
|
+
- Row 0: is_close_to_uv_source
|
|
926
|
+
- Row 1: is_inside_excluded_region
|
|
927
|
+
- Row 2: is_excluded_by_instr_team
|
|
928
|
+
- Row 3: is_suspected_transient
|
|
929
|
+
|
|
930
|
+
Parameters
|
|
931
|
+
----------
|
|
932
|
+
exclusions : AncillaryExclusions
|
|
933
|
+
Ancillary exclusions data filtered for the current day.
|
|
934
|
+
|
|
935
|
+
Returns
|
|
936
|
+
-------
|
|
937
|
+
np.ndarray
|
|
938
|
+
Array of shape (4, 3600) with bad-angle flags for each bin.
|
|
939
|
+
"""
|
|
940
|
+
# TODO: fill out once spice data is available
|
|
941
|
+
return np.zeros((4, 3600), dtype=np.uint8)
|
|
@@ -218,6 +218,11 @@ def create_l2_dataset(
|
|
|
218
218
|
"spacecraft_velocity_std_dev",
|
|
219
219
|
]
|
|
220
220
|
|
|
221
|
+
longitudinal_variables = [
|
|
222
|
+
"spin_axis_orientation_average",
|
|
223
|
+
"spin_axis_orientation_std_dev",
|
|
224
|
+
]
|
|
225
|
+
|
|
221
226
|
for key, value in dataclasses.asdict(histogram_l2).items():
|
|
222
227
|
if key in ecliptic_variables:
|
|
223
228
|
output[key] = xr.DataArray(
|
|
@@ -225,6 +230,12 @@ def create_l2_dataset(
|
|
|
225
230
|
dims=["epoch", "ecliptic"],
|
|
226
231
|
attrs=attrs.get_variable_attributes(key),
|
|
227
232
|
)
|
|
233
|
+
elif key in longitudinal_variables:
|
|
234
|
+
output[key] = xr.DataArray(
|
|
235
|
+
value,
|
|
236
|
+
dims=["epoch", "latitudinal"],
|
|
237
|
+
attrs=attrs.get_variable_attributes(key),
|
|
238
|
+
)
|
|
228
239
|
elif key == "bad_time_flag_occurrences":
|
|
229
240
|
output[key] = xr.DataArray(
|
|
230
241
|
value,
|
imap_processing/hi/hi_l1a.py
CHANGED
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|
import logging
|
|
4
4
|
from collections import defaultdict
|
|
5
5
|
from pathlib import Path
|
|
6
|
-
from typing import Union
|
|
7
6
|
|
|
8
7
|
import numpy as np
|
|
9
8
|
import xarray as xr
|
|
@@ -54,10 +53,52 @@ LONG_COUNTERS = (
|
|
|
54
53
|
)
|
|
55
54
|
TOTAL_COUNTERS = ("a_total", "b_total", "c_total", "fee_de_recd", "fee_de_sent")
|
|
56
55
|
|
|
56
|
+
# MEMDMP Packet definition of uint32 fields
|
|
57
|
+
# This is a mapping of variable name to index when the dump_data in the
|
|
58
|
+
# HVSCI MEMDMP packet is interpreted as an array of uint32 values.
|
|
59
|
+
MEMDMP_DATA_INDS = {
|
|
60
|
+
"lastbin_shorten": 9,
|
|
61
|
+
"coinc_length": 60,
|
|
62
|
+
"de_timetag": 65,
|
|
63
|
+
"ab_min": 67,
|
|
64
|
+
"ab_max": 68,
|
|
65
|
+
"ac_min": 69,
|
|
66
|
+
"ac_max": 70,
|
|
67
|
+
"ba_min": 71,
|
|
68
|
+
"ba_max": 72,
|
|
69
|
+
"bc_min": 73,
|
|
70
|
+
"bc_max": 74,
|
|
71
|
+
"ca_min": 75,
|
|
72
|
+
"ca_max": 76,
|
|
73
|
+
"cb_min": 77,
|
|
74
|
+
"cb_max": 78,
|
|
75
|
+
"cc_min": 79,
|
|
76
|
+
"cc_max": 80,
|
|
77
|
+
"cfd_dac_a": 82,
|
|
78
|
+
"cfd_dac_b": 83,
|
|
79
|
+
"cfd_dac_c": 84,
|
|
80
|
+
"cfd_dac_d": 85,
|
|
81
|
+
"de_mask": 87,
|
|
82
|
+
"ab_rnk": 89,
|
|
83
|
+
"cc_rnk": 90,
|
|
84
|
+
"ac_rnk": 91,
|
|
85
|
+
"bc_rnk": 92,
|
|
86
|
+
"abc_rnk": 93,
|
|
87
|
+
"acc_rnk": 94,
|
|
88
|
+
"bcc_rnk": 95,
|
|
89
|
+
"abcc_rnk": 96,
|
|
90
|
+
"esa_table": 100,
|
|
91
|
+
"esa_steps": 101,
|
|
92
|
+
"sci_cull": 106,
|
|
93
|
+
"eng_cull": 107,
|
|
94
|
+
"spins_per_step": 108,
|
|
95
|
+
"spins_per_de": 109,
|
|
96
|
+
}
|
|
97
|
+
|
|
57
98
|
logger = logging.getLogger(__name__)
|
|
58
99
|
|
|
59
100
|
|
|
60
|
-
def hi_l1a(packet_file_path:
|
|
101
|
+
def hi_l1a(packet_file_path: str | Path) -> list[xr.Dataset]:
|
|
61
102
|
"""
|
|
62
103
|
Will process IMAP raw data to l1a.
|
|
63
104
|
|
|
@@ -95,6 +136,9 @@ def hi_l1a(packet_file_path: Union[str, Path]) -> list[xr.Dataset]:
|
|
|
95
136
|
elif apid_enum in [HIAPID.H45_DIAG_FEE, HIAPID.H90_DIAG_FEE]:
|
|
96
137
|
data = datasets_by_apid[apid]
|
|
97
138
|
gattr_key = "imap_hi_l1a_diagfee_attrs"
|
|
139
|
+
elif apid_enum in [HIAPID.H45_MEMDMP, HIAPID.H90_MEMDMP]:
|
|
140
|
+
data = finish_memdmp_dataset(datasets_by_apid[apid])
|
|
141
|
+
gattr_key = "imap_hi_l1a_memdmp_attrs"
|
|
98
142
|
|
|
99
143
|
# Update dataset global attributes
|
|
100
144
|
attr_mgr = ImapCdfAttributes()
|
|
@@ -111,7 +155,7 @@ def hi_l1a(packet_file_path: Union[str, Path]) -> list[xr.Dataset]:
|
|
|
111
155
|
|
|
112
156
|
|
|
113
157
|
def hi_packet_file_to_datasets(
|
|
114
|
-
packet_file_path:
|
|
158
|
+
packet_file_path: str | Path, use_derived_value: bool = False
|
|
115
159
|
) -> dict[int, xr.Dataset]:
|
|
116
160
|
"""
|
|
117
161
|
Extract hi datasets from packet file.
|
|
@@ -445,3 +489,80 @@ def unpack_hist_counter(counter_bytes: bytes) -> NDArray[np.uint16]:
|
|
|
445
489
|
odd_uint12 = ((split_unit8 & (2**4 - 1)) << 8) + lower_uint8
|
|
446
490
|
output_array = np.column_stack((even_uint12, odd_uint12)).reshape(-1, 90)
|
|
447
491
|
return output_array
|
|
492
|
+
|
|
493
|
+
|
|
494
|
+
def finish_memdmp_dataset(input_ds: xr.Dataset) -> xr.Dataset:
|
|
495
|
+
"""
|
|
496
|
+
Create dataset for a number of Hi Memory Dump packets.
|
|
497
|
+
|
|
498
|
+
Parameters
|
|
499
|
+
----------
|
|
500
|
+
input_ds : xarray.Dataset
|
|
501
|
+
Dataset of Hi-45 or Hi-90 MEMDMP packets generated using the
|
|
502
|
+
`imap_processing.utils.packet_file_to_datasets` function.
|
|
503
|
+
|
|
504
|
+
Returns
|
|
505
|
+
-------
|
|
506
|
+
dataset : xarray.Dataset
|
|
507
|
+
Dataset containing data from only MEMDMP packets generated upon entering
|
|
508
|
+
HVSCI. Specific memory items have been parsed out of the chunk of dumped
|
|
509
|
+
memory.
|
|
510
|
+
"""
|
|
511
|
+
attr_mgr = ImapCdfAttributes()
|
|
512
|
+
attr_mgr.add_instrument_global_attrs(instrument="hi")
|
|
513
|
+
attr_mgr.add_instrument_variable_attrs(instrument="hi", level=None)
|
|
514
|
+
|
|
515
|
+
# We only care about the MEMDMP packets that are generated upon
|
|
516
|
+
# entry to HVSCI mode. This is very hacky, but the suggested way
|
|
517
|
+
# to identify these MEMDMP packets is to check that pktlen == 521
|
|
518
|
+
# Here, we remove packets where pktlen != 521
|
|
519
|
+
dataset = input_ds.where(input_ds["pkt_len"] == 521, drop=True)
|
|
520
|
+
logger.debug(
|
|
521
|
+
f"After trimming MEMDMP packets with pkt_len != 521,"
|
|
522
|
+
f"{dataset['epoch'].data.size} packets remain with a set"
|
|
523
|
+
f"of MEMORY_IDs = {set(dataset['memory_id'].data)}"
|
|
524
|
+
)
|
|
525
|
+
|
|
526
|
+
# Rename shcoarse variable (do this first since it copies the input_ds)
|
|
527
|
+
dataset = dataset.rename_vars({"shcoarse": "ccsds_met"})
|
|
528
|
+
|
|
529
|
+
dataset.epoch.attrs.update(
|
|
530
|
+
attr_mgr.get_variable_attributes("epoch"),
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
# Update existing variable attributes
|
|
534
|
+
for var_name in [
|
|
535
|
+
"version",
|
|
536
|
+
"type",
|
|
537
|
+
"sec_hdr_flg",
|
|
538
|
+
"pkt_apid",
|
|
539
|
+
"seq_flgs",
|
|
540
|
+
"src_seq_ctr",
|
|
541
|
+
"pkt_len",
|
|
542
|
+
"ccsds_met",
|
|
543
|
+
"cksum",
|
|
544
|
+
]:
|
|
545
|
+
attrs = attr_mgr.get_variable_attributes(f"hi_hist_{var_name}")
|
|
546
|
+
dataset.data_vars[var_name].attrs.update(attrs)
|
|
547
|
+
|
|
548
|
+
new_vars = dict()
|
|
549
|
+
# Concatenate the dump_data from all packets into a single bytes string and
|
|
550
|
+
# interpret that bytes string as an array of uint32 values.
|
|
551
|
+
full_uint32_data = np.frombuffer(dataset["dump_data"].data.sum(), dtype=">u4")
|
|
552
|
+
# index_stride is the stride to traverse from packet to packet for a given
|
|
553
|
+
# item in the binary dump data.
|
|
554
|
+
index_stride = int(dataset["num_bytes"].data[0] // 4)
|
|
555
|
+
for new_var, offset in MEMDMP_DATA_INDS.items():
|
|
556
|
+
# The indices for each variable in the dump_data is the starting
|
|
557
|
+
# offset index with a stride of the number of bytes in the dump
|
|
558
|
+
# data divided by 4 (32-bit values).
|
|
559
|
+
new_vars[new_var] = xr.DataArray(
|
|
560
|
+
data=full_uint32_data[offset::index_stride],
|
|
561
|
+
dims=["epoch"],
|
|
562
|
+
)
|
|
563
|
+
|
|
564
|
+
# Remove binary memory dump data and add parsed variables
|
|
565
|
+
dataset = dataset.drop("dump_data")
|
|
566
|
+
dataset.update(new_vars)
|
|
567
|
+
|
|
568
|
+
return dataset
|