imap-processing 0.16.2__py3-none-any.whl → 0.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/ccsds/excel_to_xtce.py +12 -0
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -6
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +35 -0
- imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +35 -0
- imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +24 -0
- imap_processing/cdf/config/imap_hi_variable_attrs.yaml +8 -8
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +1 -1
- imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +163 -100
- imap_processing/cdf/config/imap_hit_l2_variable_attrs.yaml +398 -415
- imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +97 -54
- imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +9 -9
- imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +233 -57
- imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +16 -90
- imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +30 -0
- imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +15 -1
- imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +19 -0
- imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +20 -0
- imap_processing/cdf/config/imap_swe_l2_variable_attrs.yaml +39 -0
- imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +168 -0
- imap_processing/cdf/config/imap_ultra_l1a_variable_attrs.yaml +103 -2
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +91 -11
- imap_processing/cdf/utils.py +7 -1
- imap_processing/cli.py +42 -13
- imap_processing/codice/codice_l1a.py +125 -78
- imap_processing/codice/codice_l1b.py +1 -1
- imap_processing/codice/codice_l2.py +0 -9
- imap_processing/codice/constants.py +481 -498
- imap_processing/hi/hi_l1a.py +4 -4
- imap_processing/hi/hi_l1b.py +2 -2
- imap_processing/hi/packet_definitions/TLM_HI_COMBINED_SCI.xml +218 -38
- imap_processing/hit/hit_utils.py +2 -2
- imap_processing/hit/l0/decom_hit.py +4 -3
- imap_processing/hit/l1a/hit_l1a.py +64 -24
- imap_processing/hit/l1b/constants.py +5 -0
- imap_processing/hit/l1b/hit_l1b.py +18 -16
- imap_processing/hit/l2/constants.py +1 -1
- imap_processing/hit/l2/hit_l2.py +4 -4
- imap_processing/ialirt/constants.py +21 -0
- imap_processing/ialirt/generate_coverage.py +188 -0
- imap_processing/ialirt/l0/parse_mag.py +62 -5
- imap_processing/ialirt/l0/process_swapi.py +1 -1
- imap_processing/ialirt/l0/process_swe.py +23 -7
- imap_processing/ialirt/utils/constants.py +22 -16
- imap_processing/ialirt/utils/create_xarray.py +42 -19
- imap_processing/idex/idex_constants.py +8 -5
- imap_processing/idex/idex_l2b.py +554 -58
- imap_processing/idex/idex_l2c.py +30 -196
- imap_processing/lo/l0/lo_apid.py +1 -0
- imap_processing/lo/l0/lo_star_sensor.py +48 -0
- imap_processing/lo/l1a/lo_l1a.py +74 -30
- imap_processing/lo/packet_definitions/lo_xtce.xml +5359 -106
- imap_processing/mag/constants.py +1 -0
- imap_processing/mag/l0/decom_mag.py +9 -6
- imap_processing/mag/l0/mag_l0_data.py +46 -0
- imap_processing/mag/l1d/__init__.py +0 -0
- imap_processing/mag/l1d/mag_l1d.py +133 -0
- imap_processing/mag/l1d/mag_l1d_data.py +588 -0
- imap_processing/mag/l2/__init__.py +0 -0
- imap_processing/mag/l2/mag_l2.py +25 -20
- imap_processing/mag/l2/mag_l2_data.py +191 -130
- imap_processing/quality_flags.py +20 -2
- imap_processing/spice/geometry.py +25 -3
- imap_processing/spice/pointing_frame.py +1 -1
- imap_processing/spice/spin.py +4 -0
- imap_processing/spice/time.py +51 -0
- imap_processing/swapi/l1/swapi_l1.py +12 -2
- imap_processing/swapi/l2/swapi_l2.py +59 -14
- imap_processing/swapi/swapi_utils.py +1 -1
- imap_processing/swe/l1b/swe_l1b.py +11 -4
- imap_processing/swe/l2/swe_l2.py +111 -17
- imap_processing/ultra/constants.py +49 -1
- imap_processing/ultra/l0/decom_tools.py +28 -14
- imap_processing/ultra/l0/decom_ultra.py +225 -15
- imap_processing/ultra/l0/ultra_utils.py +281 -8
- imap_processing/ultra/l1a/ultra_l1a.py +77 -8
- imap_processing/ultra/l1b/cullingmask.py +3 -3
- imap_processing/ultra/l1b/de.py +53 -15
- imap_processing/ultra/l1b/extendedspin.py +26 -2
- imap_processing/ultra/l1b/lookup_utils.py +171 -50
- imap_processing/ultra/l1b/quality_flag_filters.py +14 -0
- imap_processing/ultra/l1b/ultra_l1b_culling.py +198 -5
- imap_processing/ultra/l1b/ultra_l1b_extended.py +304 -66
- imap_processing/ultra/l1c/helio_pset.py +54 -7
- imap_processing/ultra/l1c/spacecraft_pset.py +9 -1
- imap_processing/ultra/l1c/ultra_l1c.py +2 -0
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +106 -109
- imap_processing/ultra/packet_definitions/ULTRA_SCI_COMBINED.xml +3 -3
- imap_processing/ultra/utils/ultra_l1_utils.py +13 -1
- imap_processing/utils.py +20 -42
- {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/METADATA +2 -2
- {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/RECORD +95 -103
- imap_processing/lo/l0/data_classes/star_sensor.py +0 -98
- imap_processing/lo/l0/utils/lo_base.py +0 -57
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_LeftSlit.csv +0 -526
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_RightSlit.csv +0 -526
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_LeftSlit.csv +0 -526
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +0 -524
- imap_processing/ultra/lookup_tables/EgyNorm.mem.csv +0 -32769
- imap_processing/ultra/lookup_tables/FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
- imap_processing/ultra/lookup_tables/FM90_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
- imap_processing/ultra/lookup_tables/dps_grid45_compressed.cdf +0 -0
- imap_processing/ultra/lookup_tables/ultra45_back-pos-luts.csv +0 -4097
- imap_processing/ultra/lookup_tables/ultra45_tdc_norm.csv +0 -2050
- imap_processing/ultra/lookup_tables/ultra90_back-pos-luts.csv +0 -4097
- imap_processing/ultra/lookup_tables/ultra90_tdc_norm.csv +0 -2050
- imap_processing/ultra/lookup_tables/yadjust.csv +0 -257
- {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/LICENSE +0 -0
- {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/WHEEL +0 -0
- {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/entry_points.txt +0 -0
imap_processing/cli.py
CHANGED
|
@@ -68,6 +68,7 @@ from imap_processing.mag.constants import DataMode
|
|
|
68
68
|
from imap_processing.mag.l1a.mag_l1a import mag_l1a
|
|
69
69
|
from imap_processing.mag.l1b.mag_l1b import mag_l1b
|
|
70
70
|
from imap_processing.mag.l1c.mag_l1c import mag_l1c
|
|
71
|
+
from imap_processing.mag.l1d.mag_l1d import mag_l1d
|
|
71
72
|
from imap_processing.mag.l2.mag_l2 import mag_l2
|
|
72
73
|
from imap_processing.spacecraft import quaternions
|
|
73
74
|
from imap_processing.spice import pointing_frame, repoint, spin
|
|
@@ -516,7 +517,7 @@ class ProcessInstrument(ABC):
|
|
|
516
517
|
self,
|
|
517
518
|
processed_data: list[xr.Dataset | Path],
|
|
518
519
|
dependencies: ProcessingInputCollection,
|
|
519
|
-
) ->
|
|
520
|
+
) -> list[Path]:
|
|
520
521
|
"""
|
|
521
522
|
Complete post-processing.
|
|
522
523
|
|
|
@@ -544,10 +545,17 @@ class ProcessInstrument(ABC):
|
|
|
544
545
|
method.
|
|
545
546
|
dependencies : ProcessingInputCollection
|
|
546
547
|
Object containing dependencies to process.
|
|
548
|
+
|
|
549
|
+
Returns
|
|
550
|
+
-------
|
|
551
|
+
list[Path]
|
|
552
|
+
List of paths to CDF files produced.
|
|
547
553
|
"""
|
|
554
|
+
products: list[Path] = []
|
|
555
|
+
|
|
548
556
|
if len(processed_data) == 0:
|
|
549
557
|
logger.info("No products to write to CDF file.")
|
|
550
|
-
return
|
|
558
|
+
return products
|
|
551
559
|
|
|
552
560
|
logger.info("Writing products to local storage")
|
|
553
561
|
|
|
@@ -567,7 +575,6 @@ class ProcessInstrument(ABC):
|
|
|
567
575
|
# start_date.
|
|
568
576
|
# If it is start_date, skip repointing in the output filename.
|
|
569
577
|
|
|
570
|
-
products = []
|
|
571
578
|
for ds in processed_data:
|
|
572
579
|
if isinstance(ds, xr.Dataset):
|
|
573
580
|
ds.attrs["Data_version"] = self.version[1:] # Strip 'v' from version
|
|
@@ -581,6 +588,7 @@ class ProcessInstrument(ABC):
|
|
|
581
588
|
products.append(ds)
|
|
582
589
|
|
|
583
590
|
self.upload_products(products)
|
|
591
|
+
return products
|
|
584
592
|
|
|
585
593
|
@final
|
|
586
594
|
def cleanup(self) -> None:
|
|
@@ -896,29 +904,28 @@ class Idex(ProcessInstrument):
|
|
|
896
904
|
dependency = load_cdf(science_files[0])
|
|
897
905
|
datasets = [idex_l2a(dependency)]
|
|
898
906
|
elif self.data_level == "l2b":
|
|
899
|
-
if len(dependency_list)
|
|
907
|
+
if len(dependency_list) < 3 or len(dependency_list) > 4:
|
|
900
908
|
raise ValueError(
|
|
901
909
|
f"Unexpected dependencies found for IDEX L2B:"
|
|
902
|
-
f"{dependency_list}. Expected
|
|
910
|
+
f"{dependency_list}. Expected three or four dependencies."
|
|
903
911
|
)
|
|
904
912
|
sci_files = dependencies.get_file_paths(
|
|
905
913
|
source="idex", descriptor="sci-1week"
|
|
906
914
|
)
|
|
907
|
-
|
|
915
|
+
sci_dependencies = [load_cdf(f) for f in sci_files]
|
|
908
916
|
hk_files = dependencies.get_file_paths(source="idex", descriptor="evt")
|
|
909
|
-
|
|
910
|
-
|
|
917
|
+
# Remove duplicate housekeeping files
|
|
918
|
+
hk_dependencies = [load_cdf(dep) for dep in list(set(hk_files))]
|
|
919
|
+
datasets = [idex_l2b(sci_dependencies, hk_dependencies)]
|
|
911
920
|
elif self.data_level == "l2c":
|
|
912
921
|
if len(dependency_list) != 1:
|
|
913
922
|
raise ValueError(
|
|
914
923
|
f"Unexpected dependencies found for IDEX L2C:"
|
|
915
924
|
f"{dependency_list}. Expected only one dependency."
|
|
916
925
|
)
|
|
917
|
-
sci_files = dependencies.get_file_paths(
|
|
918
|
-
|
|
919
|
-
)
|
|
920
|
-
dependency = load_cdf(sci_files[0])
|
|
921
|
-
datasets = idex_l2c(dependency)
|
|
926
|
+
sci_files = dependencies.get_file_paths(source="idex", descriptor="sci-1mo")
|
|
927
|
+
dependencies = [load_cdf(f) for f in sci_files]
|
|
928
|
+
datasets = [idex_l2c(dependencies)]
|
|
922
929
|
return datasets
|
|
923
930
|
|
|
924
931
|
|
|
@@ -1074,6 +1081,21 @@ class Mag(ProcessInstrument):
|
|
|
1074
1081
|
f"Invalid dependencies found for MAG L1C:"
|
|
1075
1082
|
f"{dependencies}. Expected one or two dependencies."
|
|
1076
1083
|
)
|
|
1084
|
+
if self.data_level == "l1d":
|
|
1085
|
+
science_files = dependencies.get_file_paths(source="mag", data_type="l1c")
|
|
1086
|
+
science_files.extend(
|
|
1087
|
+
dependencies.get_file_paths(source="mag", data_type="l1b")
|
|
1088
|
+
)
|
|
1089
|
+
input_data = [load_cdf(dep) for dep in science_files]
|
|
1090
|
+
calibration = dependencies.get_processing_inputs(
|
|
1091
|
+
descriptor="l1d-calibration"
|
|
1092
|
+
)
|
|
1093
|
+
combined_calibration = MagAncillaryCombiner(calibration[0], day_buffer)
|
|
1094
|
+
datasets = mag_l1d(
|
|
1095
|
+
input_data,
|
|
1096
|
+
combined_calibration.combined_dataset,
|
|
1097
|
+
current_day,
|
|
1098
|
+
)
|
|
1077
1099
|
|
|
1078
1100
|
if self.data_level == "l2":
|
|
1079
1101
|
science_files = dependencies.get_file_paths(source="mag", data_type="l1b")
|
|
@@ -1122,6 +1144,13 @@ class Mag(ProcessInstrument):
|
|
|
1122
1144
|
mode=DataMode(descriptor_no_frame.upper()),
|
|
1123
1145
|
)
|
|
1124
1146
|
|
|
1147
|
+
for ds in datasets:
|
|
1148
|
+
if "raw" not in ds.attrs["Logical_source"] and not np.all(
|
|
1149
|
+
ds["epoch"].values[1:] > ds["epoch"].values[:-1]
|
|
1150
|
+
):
|
|
1151
|
+
raise ValueError(
|
|
1152
|
+
"Timestamps for output file are not monotonically increasing."
|
|
1153
|
+
)
|
|
1125
1154
|
return datasets
|
|
1126
1155
|
|
|
1127
1156
|
|
|
@@ -54,8 +54,6 @@ class CoDICEL1aPipeline:
|
|
|
54
54
|
|
|
55
55
|
Methods
|
|
56
56
|
-------
|
|
57
|
-
calculate_epoch_values()
|
|
58
|
-
Calculate and return the values to be used for `epoch`.
|
|
59
57
|
decompress_data(science_values)
|
|
60
58
|
Perform decompression on the data.
|
|
61
59
|
define_coordinates()
|
|
@@ -89,28 +87,6 @@ class CoDICEL1aPipeline:
|
|
|
89
87
|
self.plan_step = plan_step
|
|
90
88
|
self.view_id = view_id
|
|
91
89
|
|
|
92
|
-
def calculate_epoch_values(self) -> NDArray[int]:
|
|
93
|
-
"""
|
|
94
|
-
Calculate and return the values to be used for `epoch`.
|
|
95
|
-
|
|
96
|
-
On CoDICE, the epoch values are derived from the `acq_start_seconds` and
|
|
97
|
-
`acq_start_subseconds` fields in the packet.
|
|
98
|
-
|
|
99
|
-
Note that the `acq_start_subseconds` field needs to be converted from
|
|
100
|
-
microseconds to seconds.
|
|
101
|
-
|
|
102
|
-
Returns
|
|
103
|
-
-------
|
|
104
|
-
epoch : NDArray[int]
|
|
105
|
-
List of epoch values.
|
|
106
|
-
"""
|
|
107
|
-
epoch = met_to_ttj2000ns(
|
|
108
|
-
self.dataset["acq_start_seconds"]
|
|
109
|
-
+ self.dataset["acq_start_subseconds"] / 1e6
|
|
110
|
-
)
|
|
111
|
-
|
|
112
|
-
return epoch
|
|
113
|
-
|
|
114
90
|
def decompress_data(self, science_values: list[NDArray[str]] | list[str]) -> None:
|
|
115
91
|
"""
|
|
116
92
|
Perform decompression on the data.
|
|
@@ -167,17 +143,30 @@ class CoDICEL1aPipeline:
|
|
|
167
143
|
self.coords = {}
|
|
168
144
|
|
|
169
145
|
coord_names = [
|
|
170
|
-
"
|
|
171
|
-
*self.config["
|
|
172
|
-
*[key + "_label" for key in self.config["output_dims"].keys()],
|
|
146
|
+
*self.config["dims"].keys(),
|
|
147
|
+
*[key + "_label" for key in self.config["dims"].keys()],
|
|
173
148
|
]
|
|
174
149
|
|
|
150
|
+
# Define epoch coordinates
|
|
151
|
+
epochs, epoch_delta_minus, epoch_delta_plus = calculate_epoch_values(
|
|
152
|
+
self.dataset.acq_start_seconds, self.dataset.acq_start_subseconds
|
|
153
|
+
)
|
|
154
|
+
for name, var in [
|
|
155
|
+
("epoch", epochs),
|
|
156
|
+
("epoch_delta_minus", epoch_delta_minus),
|
|
157
|
+
("epoch_delta_plus", epoch_delta_plus),
|
|
158
|
+
]:
|
|
159
|
+
coord = xr.DataArray(
|
|
160
|
+
var,
|
|
161
|
+
name=name,
|
|
162
|
+
dims=[name],
|
|
163
|
+
attrs=self.cdf_attrs.get_variable_attributes(name, check_schema=False),
|
|
164
|
+
)
|
|
165
|
+
self.coords[name] = coord
|
|
166
|
+
|
|
175
167
|
# Define the values for the coordinates
|
|
176
168
|
for name in coord_names:
|
|
177
|
-
if name
|
|
178
|
-
values = self.calculate_epoch_values()
|
|
179
|
-
dims = [name]
|
|
180
|
-
elif name in [
|
|
169
|
+
if name in [
|
|
181
170
|
"esa_step",
|
|
182
171
|
"inst_az",
|
|
183
172
|
"spin_sector",
|
|
@@ -185,7 +174,7 @@ class CoDICEL1aPipeline:
|
|
|
185
174
|
"spin_sector_index",
|
|
186
175
|
"ssd_index",
|
|
187
176
|
]:
|
|
188
|
-
values = np.arange(self.config["
|
|
177
|
+
values = np.arange(self.config["dims"][name])
|
|
189
178
|
dims = [name]
|
|
190
179
|
elif name == "spin_sector_pairs_label":
|
|
191
180
|
values = np.array(
|
|
@@ -199,15 +188,22 @@ class CoDICEL1aPipeline:
|
|
|
199
188
|
]
|
|
200
189
|
)
|
|
201
190
|
dims = [name]
|
|
191
|
+
elif name == "inst_az_label":
|
|
192
|
+
if self.config["dataset_name"] == "imap_codice_l1a_lo-nsw-angular":
|
|
193
|
+
values = [str(x) for x in range(4, 23)]
|
|
194
|
+
elif self.config["dataset_name"] == "imap_codice_l1a_lo-sw-angular":
|
|
195
|
+
values = ["1", "2", "3", "23", "24"]
|
|
196
|
+
else:
|
|
197
|
+
values = np.arange(self.config["dims"]["inst_az"]).astype(str)
|
|
198
|
+
dims = ["inst_az"]
|
|
202
199
|
elif name in [
|
|
203
200
|
"spin_sector_label",
|
|
204
201
|
"esa_step_label",
|
|
205
|
-
"inst_az_label",
|
|
206
202
|
"spin_sector_index_label",
|
|
207
203
|
"ssd_index_label",
|
|
208
204
|
]:
|
|
209
205
|
key = name.removesuffix("_label")
|
|
210
|
-
values = np.arange(self.config["
|
|
206
|
+
values = np.arange(self.config["dims"][key]).astype(str)
|
|
211
207
|
dims = [key]
|
|
212
208
|
|
|
213
209
|
coord = xr.DataArray(
|
|
@@ -241,16 +237,16 @@ class CoDICEL1aPipeline:
|
|
|
241
237
|
# Stack the data so that it is easier to reshape and iterate over
|
|
242
238
|
all_data = np.stack(self.data)
|
|
243
239
|
|
|
244
|
-
# The dimension of all_data is something like (epoch,
|
|
245
|
-
#
|
|
240
|
+
# The dimension of all_data is something like (epoch, num_energy_steps,
|
|
241
|
+
# num_positions, num_spin_sectors, num_counters) (or may be slightly
|
|
246
242
|
# different depending on the data product). In any case, iterate over
|
|
247
243
|
# the num_counters dimension to isolate the data for each counter so
|
|
248
244
|
# each counter's data can be placed in a separate CDF data variable.
|
|
249
245
|
for counter, variable_name in zip(
|
|
250
|
-
range(all_data.shape[1]), self.config["variable_names"]
|
|
246
|
+
range(all_data.shape[-1]), self.config["variable_names"]
|
|
251
247
|
):
|
|
252
248
|
# Extract the counter data
|
|
253
|
-
counter_data = all_data[
|
|
249
|
+
counter_data = all_data[..., counter]
|
|
254
250
|
|
|
255
251
|
# Get the CDF attributes
|
|
256
252
|
descriptor = self.config["dataset_name"].split("imap_codice_l1a_")[-1]
|
|
@@ -260,7 +256,7 @@ class CoDICEL1aPipeline:
|
|
|
260
256
|
# For most products, the final CDF dimensions always has "epoch" as
|
|
261
257
|
# the first dimension followed by the dimensions for the specific
|
|
262
258
|
# data product
|
|
263
|
-
dims = ["epoch", *list(self.config["
|
|
259
|
+
dims = ["epoch", *list(self.config["dims"].keys())]
|
|
264
260
|
|
|
265
261
|
# However, CoDICE-Hi products use specific energy bins for the
|
|
266
262
|
# energy dimension
|
|
@@ -409,6 +405,12 @@ class CoDICEL1aPipeline:
|
|
|
409
405
|
dims = ["epoch"]
|
|
410
406
|
attrs = self.cdf_attrs.get_variable_attributes("spin_period")
|
|
411
407
|
|
|
408
|
+
# The k-factor is a constant that maps voltages to energies
|
|
409
|
+
elif variable_name == "k_factor":
|
|
410
|
+
variable_data = np.array([constants.K_FACTOR], dtype=np.float32)
|
|
411
|
+
dims = [""]
|
|
412
|
+
attrs = self.cdf_attrs.get_variable_attributes("k_factor")
|
|
413
|
+
|
|
412
414
|
# Add variable to the dataset
|
|
413
415
|
dataset[variable_name] = xr.DataArray(
|
|
414
416
|
variable_data,
|
|
@@ -600,52 +602,27 @@ class CoDICEL1aPipeline:
|
|
|
600
602
|
|
|
601
603
|
These data need to be divided up by species or priorities (or
|
|
602
604
|
what I am calling "counters" as a general term), and re-arranged into
|
|
603
|
-
|
|
604
|
-
and energies (depending on the data product).
|
|
605
|
+
multidimensional arrays representing dimensions such as time,
|
|
606
|
+
spin sectors, positions, and energies (depending on the data product).
|
|
605
607
|
|
|
606
608
|
However, the existence and order of these dimensions can vary depending
|
|
607
|
-
on the specific data product, so we define this in the "
|
|
608
|
-
|
|
609
|
-
defines how the dimensions are written into the packet data, while
|
|
610
|
-
"output_dims" defines how the dimensions should be written to the final
|
|
611
|
-
CDF product.
|
|
609
|
+
on the specific data product, so we define this in the "dims" key of the
|
|
610
|
+
configuration dictionary.
|
|
612
611
|
"""
|
|
613
612
|
# This will contain the reshaped data for all counters
|
|
614
613
|
self.data = []
|
|
615
614
|
|
|
616
|
-
#
|
|
617
|
-
# the packet data. The number of counters is the
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
self.config["num_counters"],
|
|
623
|
-
*self.config["input_dims"].values(),
|
|
624
|
-
)
|
|
625
|
-
else:
|
|
626
|
-
reshape_dims = (
|
|
627
|
-
*self.config["input_dims"].values(),
|
|
628
|
-
self.config["num_counters"],
|
|
629
|
-
)
|
|
630
|
-
|
|
631
|
-
# Then, transpose the data based on how the dimensions should be written
|
|
632
|
-
# to the CDF file. Since this is specific to each data product, we need
|
|
633
|
-
# to determine this dynamically based on the "output_dims" config.
|
|
634
|
-
# Again, lo-counters-aggregated is treated slightly differently
|
|
635
|
-
input_keys = ["num_counters", *self.config["input_dims"].keys()]
|
|
636
|
-
output_keys = ["num_counters", *self.config["output_dims"].keys()]
|
|
637
|
-
if self.config["dataset_name"] != "imap_codice_l1a_lo-counters-aggregated":
|
|
638
|
-
transpose_axes = [input_keys.index(dim) for dim in output_keys]
|
|
639
|
-
else:
|
|
640
|
-
transpose_axes = [1, 2, 0] # [esa_step, spin_sector_pairs, num_counters]
|
|
641
|
-
|
|
615
|
+
# Reshape the data based on how it is written to the data array of
|
|
616
|
+
# the packet data. The number of counters is the last dimension / axis.
|
|
617
|
+
reshape_dims = (
|
|
618
|
+
*self.config["dims"].values(),
|
|
619
|
+
self.config["num_counters"],
|
|
620
|
+
)
|
|
642
621
|
for packet_data in self.raw_data:
|
|
643
622
|
reshaped_packet_data = np.array(packet_data, dtype=np.uint32).reshape(
|
|
644
623
|
reshape_dims
|
|
645
624
|
)
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
self.data.append(reshaped_cdf_data)
|
|
625
|
+
self.data.append(reshaped_packet_data)
|
|
649
626
|
|
|
650
627
|
# No longer need to keep the raw data around
|
|
651
628
|
del self.raw_data
|
|
@@ -674,6 +651,54 @@ class CoDICEL1aPipeline:
|
|
|
674
651
|
self.cdf_attrs.add_instrument_variable_attrs("codice", "l1a")
|
|
675
652
|
|
|
676
653
|
|
|
654
|
+
def calculate_epoch_values(
|
|
655
|
+
acq_start_seconds: xr.DataArray, acq_start_subseconds: xr.DataArray
|
|
656
|
+
) -> tuple[NDArray[int], NDArray[int], NDArray[int]]:
|
|
657
|
+
"""
|
|
658
|
+
Calculate and return the values to be used for `epoch`.
|
|
659
|
+
|
|
660
|
+
On CoDICE, the epoch values are derived from the `acq_start_seconds` and
|
|
661
|
+
`acq_start_subseconds` fields in the packet.
|
|
662
|
+
|
|
663
|
+
Note that the `acq_start_subseconds` field needs to be converted from
|
|
664
|
+
microseconds to seconds.
|
|
665
|
+
|
|
666
|
+
Parameters
|
|
667
|
+
----------
|
|
668
|
+
acq_start_seconds : xarray.DataArray
|
|
669
|
+
The acquisition times to calculate the epoch values from.
|
|
670
|
+
acq_start_subseconds : xarray.DataArray
|
|
671
|
+
The subseconds portion of the acquisition times.
|
|
672
|
+
|
|
673
|
+
Returns
|
|
674
|
+
-------
|
|
675
|
+
epoch : NDArray[int]
|
|
676
|
+
List of centered epoch values.
|
|
677
|
+
epoch_delta_minus: NDArray[int]
|
|
678
|
+
List of values that represent the length of time from acquisition
|
|
679
|
+
start to the center of the acquisition time bin.
|
|
680
|
+
epoch_delta_plus: NDArray[int]
|
|
681
|
+
List of values that represent the length of time from the center of
|
|
682
|
+
the acquisition time bin to the end of acquisition.
|
|
683
|
+
"""
|
|
684
|
+
# First calculate an epoch value based on the acquisition start
|
|
685
|
+
acq_start = met_to_ttj2000ns(acq_start_seconds + acq_start_subseconds / 1e6)
|
|
686
|
+
|
|
687
|
+
# Apply correction to center the epoch bin
|
|
688
|
+
epoch = (acq_start[:-1] + acq_start[1:]) // 2
|
|
689
|
+
epoch_delta_minus = epoch - acq_start[:-1]
|
|
690
|
+
epoch_delta_plus = acq_start[1:] - epoch
|
|
691
|
+
|
|
692
|
+
# Since the centers and deltas are determined by averaging sequential bins,
|
|
693
|
+
# the last elements must be calculated differently. For this, we just use
|
|
694
|
+
# the last acquisition start and the previous deltas
|
|
695
|
+
epoch = np.concatenate([epoch, [acq_start[-1]]])
|
|
696
|
+
epoch_delta_minus = np.concatenate([epoch_delta_minus, [epoch_delta_minus[-1]]])
|
|
697
|
+
epoch_delta_plus = np.concatenate([epoch_delta_plus, [epoch_delta_plus[-1]]])
|
|
698
|
+
|
|
699
|
+
return epoch, epoch_delta_minus, epoch_delta_plus
|
|
700
|
+
|
|
701
|
+
|
|
677
702
|
def group_ialirt_data(
|
|
678
703
|
packets: xr.Dataset, data_field_range: range, prefix: str
|
|
679
704
|
) -> list[bytearray]:
|
|
@@ -777,6 +802,8 @@ def create_binned_dataset(
|
|
|
777
802
|
dims=["epoch"],
|
|
778
803
|
attrs=pipeline.cdf_attrs.get_variable_attributes("epoch", check_schema=False),
|
|
779
804
|
)
|
|
805
|
+
# TODO: Figure out how to calculate epoch centers and deltas and store them
|
|
806
|
+
# in variables here
|
|
780
807
|
dataset = xr.Dataset(
|
|
781
808
|
coords={"epoch": coord},
|
|
782
809
|
attrs=pipeline.cdf_attrs.get_global_attributes(pipeline.config["dataset_name"]),
|
|
@@ -869,7 +896,11 @@ def create_direct_event_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
|
|
|
869
896
|
)[0]
|
|
870
897
|
acq_start_seconds = packets.acq_start_seconds[epoch_indices]
|
|
871
898
|
acq_start_subseconds = packets.acq_start_subseconds[epoch_indices]
|
|
872
|
-
|
|
899
|
+
|
|
900
|
+
# Calculate epoch variables
|
|
901
|
+
epochs, epochs_delta_minus, epochs_delta_plus = calculate_epoch_values(
|
|
902
|
+
acq_start_seconds, acq_start_subseconds
|
|
903
|
+
)
|
|
873
904
|
|
|
874
905
|
# Define coordinates
|
|
875
906
|
epoch = xr.DataArray(
|
|
@@ -878,6 +909,20 @@ def create_direct_event_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
|
|
|
878
909
|
dims=["epoch"],
|
|
879
910
|
attrs=cdf_attrs.get_variable_attributes("epoch", check_schema=False),
|
|
880
911
|
)
|
|
912
|
+
epoch_delta_minus = xr.DataArray(
|
|
913
|
+
epochs_delta_minus,
|
|
914
|
+
name="epoch_delta_minus",
|
|
915
|
+
dims=["epoch_delta_minus"],
|
|
916
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
917
|
+
"epoch_delta_minus", check_schema=False
|
|
918
|
+
),
|
|
919
|
+
)
|
|
920
|
+
epoch_delta_plus = xr.DataArray(
|
|
921
|
+
epochs_delta_plus,
|
|
922
|
+
name="epoch_delta_plus",
|
|
923
|
+
dims=["epoch_delta_plus"],
|
|
924
|
+
attrs=cdf_attrs.get_variable_attributes("epoch_delta_plus", check_schema=False),
|
|
925
|
+
)
|
|
881
926
|
event_num = xr.DataArray(
|
|
882
927
|
np.arange(10000),
|
|
883
928
|
name="event_num",
|
|
@@ -893,12 +938,14 @@ def create_direct_event_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
|
|
|
893
938
|
|
|
894
939
|
# Create the dataset to hold the data variables
|
|
895
940
|
if apid == CODICEAPID.COD_LO_PHA:
|
|
896
|
-
attrs = cdf_attrs.get_global_attributes("imap_codice_l1a_lo-
|
|
941
|
+
attrs = cdf_attrs.get_global_attributes("imap_codice_l1a_lo-direct-events")
|
|
897
942
|
elif apid == CODICEAPID.COD_HI_PHA:
|
|
898
|
-
attrs = cdf_attrs.get_global_attributes("imap_codice_l1a_hi-
|
|
943
|
+
attrs = cdf_attrs.get_global_attributes("imap_codice_l1a_hi-direct-events")
|
|
899
944
|
dataset = xr.Dataset(
|
|
900
945
|
coords={
|
|
901
946
|
"epoch": epoch,
|
|
947
|
+
"epoch_delta_minus": epoch_delta_minus,
|
|
948
|
+
"epoch_delta_plus": epoch_delta_plus,
|
|
902
949
|
"event_num": event_num,
|
|
903
950
|
"event_num_label": event_num_label,
|
|
904
951
|
},
|
|
@@ -116,7 +116,7 @@ def process_codice_l1b(file_path: Path) -> xr.Dataset:
|
|
|
116
116
|
descriptor = dataset_name.removeprefix("imap_codice_l1b_")
|
|
117
117
|
|
|
118
118
|
# Direct event data products do not have a level L1B
|
|
119
|
-
if descriptor in ["lo-
|
|
119
|
+
if descriptor in ["lo-direct-events", "hi-direct-events"]:
|
|
120
120
|
logger.warning("Encountered direct event data product. Skipping L1b processing")
|
|
121
121
|
return None
|
|
122
122
|
|
|
@@ -45,17 +45,8 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
|
|
|
45
45
|
# TODO: Could clean this up by using imap-data-access methods?
|
|
46
46
|
dataset_name = l1_dataset.attrs["Logical_source"]
|
|
47
47
|
data_level = dataset_name.removeprefix("imap_codice_").split("_")[0]
|
|
48
|
-
descriptor = dataset_name.removeprefix(f"imap_codice_{data_level}_")
|
|
49
48
|
dataset_name = dataset_name.replace(data_level, "l2")
|
|
50
49
|
|
|
51
|
-
# TODO: Temporary work-around to replace "PHA" naming convention with
|
|
52
|
-
# "direct events" This will eventually be changed at the L1 level and
|
|
53
|
-
# thus this will eventually be removed.
|
|
54
|
-
if descriptor == "lo-pha":
|
|
55
|
-
dataset_name = dataset_name.replace("lo-pha", "lo-direct-events")
|
|
56
|
-
elif descriptor == "hi-pha":
|
|
57
|
-
dataset_name = dataset_name.replace("hi-pha", "hi-direct-events")
|
|
58
|
-
|
|
59
50
|
# Use the L1 data product as a starting point for L2
|
|
60
51
|
l2_dataset = l1_dataset.copy()
|
|
61
52
|
|