imap-processing 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +13 -1
- imap_processing/cdf/config/imap_codice_l2-hi-omni_variable_attrs.yaml +635 -0
- imap_processing/cdf/config/imap_codice_l2-hi-sectored_variable_attrs.yaml +422 -0
- imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +28 -21
- imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +2 -0
- imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +12 -2
- imap_processing/cli.py +6 -11
- imap_processing/codice/codice_l2.py +640 -127
- imap_processing/codice/constants.py +61 -0
- imap_processing/ena_maps/ena_maps.py +111 -60
- imap_processing/ena_maps/utils/coordinates.py +5 -0
- imap_processing/ena_maps/utils/corrections.py +268 -0
- imap_processing/ena_maps/utils/map_utils.py +143 -42
- imap_processing/hi/hi_l2.py +3 -8
- imap_processing/ialirt/constants.py +7 -1
- imap_processing/ialirt/generate_coverage.py +1 -1
- imap_processing/ialirt/l0/process_codice.py +66 -0
- imap_processing/ialirt/utils/create_xarray.py +1 -0
- imap_processing/idex/idex_l2a.py +2 -2
- imap_processing/idex/idex_l2b.py +1 -1
- imap_processing/lo/l1c/lo_l1c.py +61 -3
- imap_processing/lo/l2/lo_l2.py +79 -11
- imap_processing/mag/l1a/mag_l1a.py +2 -2
- imap_processing/mag/l1a/mag_l1a_data.py +71 -13
- imap_processing/mag/l1c/interpolation_methods.py +34 -13
- imap_processing/mag/l1c/mag_l1c.py +117 -67
- imap_processing/mag/l1d/mag_l1d_data.py +3 -1
- imap_processing/spice/geometry.py +11 -9
- imap_processing/spice/pointing_frame.py +77 -50
- imap_processing/swapi/l1/swapi_l1.py +12 -4
- imap_processing/swe/utils/swe_constants.py +7 -7
- imap_processing/ultra/l1b/extendedspin.py +1 -1
- imap_processing/ultra/l1b/ultra_l1b_culling.py +2 -2
- imap_processing/ultra/l1b/ultra_l1b_extended.py +1 -1
- imap_processing/ultra/l1c/helio_pset.py +1 -1
- imap_processing/ultra/l1c/spacecraft_pset.py +2 -2
- imap_processing-1.0.1.dist-info/METADATA +121 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.1.dist-info}/RECORD +42 -40
- imap_processing-1.0.0.dist-info/METADATA +0 -120
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.1.dist-info}/LICENSE +0 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.1.dist-info}/WHEEL +0 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.1.dist-info}/entry_points.txt +0 -0
|
@@ -10,51 +10,627 @@ dataset = process_codice_l2(l1_filename)
|
|
|
10
10
|
"""
|
|
11
11
|
|
|
12
12
|
import logging
|
|
13
|
-
from pathlib import Path
|
|
14
13
|
|
|
15
14
|
import numpy as np
|
|
15
|
+
import pandas as pd
|
|
16
16
|
import xarray as xr
|
|
17
|
+
from imap_data_access import ProcessingInputCollection, ScienceFilePath
|
|
17
18
|
|
|
18
19
|
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
|
|
19
20
|
from imap_processing.cdf.utils import load_cdf
|
|
20
|
-
from imap_processing.codice.constants import
|
|
21
|
+
from imap_processing.codice.constants import (
|
|
22
|
+
HALF_SPIN_LUT,
|
|
23
|
+
HI_L2_ELEVATION_ANGLE,
|
|
24
|
+
HI_OMNI_VARIABLE_NAMES,
|
|
25
|
+
HI_SECTORED_VARIABLE_NAMES,
|
|
26
|
+
L2_GEOMETRIC_FACTOR,
|
|
27
|
+
L2_HI_NUMBER_OF_SSD,
|
|
28
|
+
L2_HI_SECTORED_ANGLE,
|
|
29
|
+
LO_NSW_SPECIES_VARIABLE_NAMES,
|
|
30
|
+
LO_SW_PICKUP_ION_SPECIES_VARIABLE_NAMES,
|
|
31
|
+
LO_SW_SPECIES_VARIABLE_NAMES,
|
|
32
|
+
NSW_POSITIONS,
|
|
33
|
+
PUI_POSITIONS,
|
|
34
|
+
SW_POSITIONS,
|
|
35
|
+
)
|
|
21
36
|
|
|
22
37
|
logger = logging.getLogger(__name__)
|
|
23
38
|
logger.setLevel(logging.INFO)
|
|
24
39
|
|
|
25
40
|
|
|
26
|
-
def
|
|
41
|
+
def get_geometric_factor_lut(dependencies: ProcessingInputCollection) -> dict:
|
|
27
42
|
"""
|
|
28
|
-
|
|
43
|
+
Get the geometric factor lookup table.
|
|
29
44
|
|
|
30
45
|
Parameters
|
|
31
46
|
----------
|
|
32
|
-
|
|
33
|
-
|
|
47
|
+
dependencies : ProcessingInputCollection
|
|
48
|
+
The collection of processing input files.
|
|
34
49
|
|
|
35
50
|
Returns
|
|
36
51
|
-------
|
|
37
|
-
|
|
38
|
-
|
|
52
|
+
geometric_factor_lut : dict
|
|
53
|
+
A dict with a full and reduced mode array with shape (esa_steps, position).
|
|
54
|
+
"""
|
|
55
|
+
geometric_factors = pd.read_csv(
|
|
56
|
+
dependencies.get_file_paths(descriptor="l2-lo-gfactor")[0]
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
# sort by esa step. They should already be sorted, but just in case
|
|
60
|
+
full = geometric_factors[geometric_factors["mode"] == "full"].sort_values(
|
|
61
|
+
by="esa_step"
|
|
62
|
+
)
|
|
63
|
+
reduced = geometric_factors[geometric_factors["mode"] == "reduced"].sort_values(
|
|
64
|
+
by="esa_step"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# Sort position columns to ensure the correct order
|
|
68
|
+
position_names_sorted = sorted(
|
|
69
|
+
[col for col in full if col.startswith("position")],
|
|
70
|
+
key=lambda x: int(x.split("_")[-1]),
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
return {
|
|
74
|
+
"full": full[position_names_sorted].to_numpy(),
|
|
75
|
+
"reduced": reduced[position_names_sorted].to_numpy(),
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def get_efficiency_lut(dependencies: ProcessingInputCollection) -> pd.DataFrame:
|
|
39
80
|
"""
|
|
40
|
-
|
|
81
|
+
Get the efficiency lookup table.
|
|
82
|
+
|
|
83
|
+
Parameters
|
|
84
|
+
----------
|
|
85
|
+
dependencies : ProcessingInputCollection
|
|
86
|
+
The collection of processing input files.
|
|
87
|
+
|
|
88
|
+
Returns
|
|
89
|
+
-------
|
|
90
|
+
efficiency_lut : pandas.DataFrame
|
|
91
|
+
Contains the efficiency lookup table. Columns are:
|
|
92
|
+
species, product, esa_step, position_1, position_2, ..., position_24.
|
|
93
|
+
"""
|
|
94
|
+
return pd.read_csv(dependencies.get_file_paths(descriptor="l2-lo-efficiency")[0])
|
|
41
95
|
|
|
42
|
-
# Open the l1 file
|
|
43
|
-
l1_dataset = load_cdf(file_path)
|
|
44
96
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
dataset_name = l1_dataset.attrs["Logical_source"]
|
|
49
|
-
data_level = dataset_name.removeprefix("imap_codice_").split("_")[0]
|
|
50
|
-
dataset_name = dataset_name.replace(data_level, "l2")
|
|
97
|
+
def get_species_efficiency(species: str, efficiency: pd.DataFrame) -> np.ndarray:
|
|
98
|
+
"""
|
|
99
|
+
Get the efficiency values for a given species.
|
|
51
100
|
|
|
52
|
-
|
|
53
|
-
|
|
101
|
+
Parameters
|
|
102
|
+
----------
|
|
103
|
+
species : str
|
|
104
|
+
The species name.
|
|
105
|
+
efficiency : pandas.DataFrame
|
|
106
|
+
The efficiency lookup table.
|
|
54
107
|
|
|
55
|
-
|
|
108
|
+
Returns
|
|
109
|
+
-------
|
|
110
|
+
efficiency : np.ndarray
|
|
111
|
+
A 2D array of efficiencies with shape (epoch, esa_steps).
|
|
112
|
+
"""
|
|
113
|
+
species_efficiency = efficiency[efficiency["species"] == species].sort_values(
|
|
114
|
+
by="esa_step"
|
|
115
|
+
)
|
|
116
|
+
# Sort position columns to ensure the correct order
|
|
117
|
+
position_names_sorted = sorted(
|
|
118
|
+
[col for col in species_efficiency if col.startswith("position")],
|
|
119
|
+
key=lambda x: int(x.split("_")[-1]),
|
|
120
|
+
)
|
|
121
|
+
# Shape: (esa_steps, positions)
|
|
122
|
+
return species_efficiency[position_names_sorted].to_numpy()
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def compute_geometric_factors(
|
|
126
|
+
dataset: xr.Dataset, geometric_factor_lookup: dict
|
|
127
|
+
) -> np.ndarray:
|
|
128
|
+
"""
|
|
129
|
+
Calculate geometric factors needed for intensity calculations.
|
|
130
|
+
|
|
131
|
+
Geometric factors are determined by comparing the half-spin values per
|
|
132
|
+
esa_step in the HALF_SPIN_LUT to the rgfo_half_spin values in the provided
|
|
133
|
+
L2 dataset.
|
|
134
|
+
|
|
135
|
+
If the half-spin value is less than the corresponding rgfo_half_spin value,
|
|
136
|
+
the geometric factor is set to 0.75 (full mode); otherwise, it is set to 0.5
|
|
137
|
+
(reduced mode).
|
|
138
|
+
|
|
139
|
+
NOTE: Half spin values are associated with ESA steps which corresponds to the
|
|
140
|
+
index of the energy_per_charge dimension that is between 0 and 127.
|
|
141
|
+
|
|
142
|
+
Parameters
|
|
143
|
+
----------
|
|
144
|
+
dataset : xarray.Dataset
|
|
145
|
+
The L2 dataset containing rgfo_half_spin data variable.
|
|
146
|
+
geometric_factor_lookup : dict
|
|
147
|
+
A dict with a full and reduced mode array with shape (esa_steps, position).
|
|
148
|
+
|
|
149
|
+
Returns
|
|
150
|
+
-------
|
|
151
|
+
geometric_factors : np.ndarray
|
|
152
|
+
A 3D array of geometric factors with shape (epoch, esa_steps, positions).
|
|
153
|
+
"""
|
|
154
|
+
# Convert the HALF_SPIN_LUT to a reverse mapping of esa_step to half_spin
|
|
155
|
+
esa_step_to_half_spin_map = {
|
|
156
|
+
val: key for key, vals in HALF_SPIN_LUT.items() for val in vals
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
# Create a list of half_spin values corresponding to ESA steps (0 to 127)
|
|
160
|
+
half_spin_values = np.array(
|
|
161
|
+
[esa_step_to_half_spin_map[step] for step in range(128)]
|
|
162
|
+
)
|
|
163
|
+
# Expand dimensions to compare each rgfo_half_spin value against
|
|
164
|
+
# all half_spin_values
|
|
165
|
+
rgfo_half_spin = dataset.rgfo_half_spin.data[:, np.newaxis] # Shape: (epoch, 1)
|
|
166
|
+
# Perform the comparison and calculate modes
|
|
167
|
+
# Modes will be true (reduced mode) anywhere half_spin >= rgfo_half_spin otherwise
|
|
168
|
+
# false (full mode)
|
|
169
|
+
modes = half_spin_values >= rgfo_half_spin
|
|
170
|
+
|
|
171
|
+
# Get the geometric factors based on the modes
|
|
172
|
+
gf = np.where(
|
|
173
|
+
modes[:, :, np.newaxis], # Shape (epoch, esa_step, 1)
|
|
174
|
+
geometric_factor_lookup["reduced"], # Shape (1, esa_step, 24) - reduced mode
|
|
175
|
+
geometric_factor_lookup["full"], # Shape (1, esa_step, 24) - full mode
|
|
176
|
+
) # Shape: (epoch, esa_step, positions)
|
|
177
|
+
return gf
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def process_lo_species_intensity(
|
|
181
|
+
dataset: xr.Dataset,
|
|
182
|
+
species_list: list,
|
|
183
|
+
geometric_factors: np.ndarray,
|
|
184
|
+
efficiency: pd.DataFrame,
|
|
185
|
+
positions: list,
|
|
186
|
+
) -> xr.Dataset:
|
|
187
|
+
"""
|
|
188
|
+
Process the lo-species L2 dataset to calculate species intensities.
|
|
189
|
+
|
|
190
|
+
Parameters
|
|
191
|
+
----------
|
|
192
|
+
dataset : xarray.Dataset
|
|
193
|
+
The L2 dataset to process.
|
|
194
|
+
species_list : list
|
|
195
|
+
List of species variable names to calculate intensity.
|
|
196
|
+
geometric_factors : np.ndarray
|
|
197
|
+
The geometric factors array with shape (epoch, esa_steps).
|
|
198
|
+
efficiency : pandas.DataFrame
|
|
199
|
+
The efficiency lookup table.
|
|
200
|
+
positions : list
|
|
201
|
+
A list of position indices to select from the geometric factor and
|
|
202
|
+
efficiency lookup tables.
|
|
203
|
+
|
|
204
|
+
Returns
|
|
205
|
+
-------
|
|
206
|
+
xarray.Dataset
|
|
207
|
+
The updated L2 dataset with species intensities calculated.
|
|
208
|
+
"""
|
|
209
|
+
# Select the relevant positions from the geometric factors
|
|
210
|
+
geometric_factors = geometric_factors[:, :, positions]
|
|
211
|
+
# take the mean geometric factor across positions
|
|
212
|
+
geometric_factors = np.nanmean(geometric_factors, axis=-1)
|
|
213
|
+
scaler = len(positions)
|
|
214
|
+
# Calculate the species intensities using the provided geometric factors and
|
|
215
|
+
# efficiency. Species_intensity = species_rate / (gm * eff * esa_step)
|
|
216
|
+
for species in species_list:
|
|
217
|
+
# Select the relevant positions for the species from the efficiency LUT
|
|
218
|
+
# Shape: (epoch, esa_steps, positions)
|
|
219
|
+
species_eff = get_species_efficiency(species, efficiency)[
|
|
220
|
+
np.newaxis, :, positions
|
|
221
|
+
]
|
|
222
|
+
if species_eff.size == 0:
|
|
223
|
+
logger.warning("No efficiency data found for species {species}. Skipping.")
|
|
224
|
+
continue
|
|
225
|
+
# Take the mean efficiency across positions
|
|
226
|
+
species_eff = np.nanmean(species_eff, axis=-1)
|
|
227
|
+
denominator = (
|
|
228
|
+
scaler * geometric_factors * species_eff * dataset["energy_table"].data
|
|
229
|
+
)
|
|
230
|
+
if species not in dataset:
|
|
231
|
+
logger.warning(
|
|
232
|
+
f"Species {species} not found in dataset. Filling with NaNS."
|
|
233
|
+
)
|
|
234
|
+
dataset[species] = np.full(dataset["energy_table"].data.shape, np.nan)
|
|
235
|
+
else:
|
|
236
|
+
dataset[species] = dataset[species] / denominator[:, :, np.newaxis]
|
|
237
|
+
|
|
238
|
+
return dataset
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def process_hi_omni(dependencies: ProcessingInputCollection) -> xr.Dataset:
|
|
242
|
+
"""
|
|
243
|
+
Process the hi-omni L1B dataset to calculate omni-directional intensities.
|
|
244
|
+
|
|
245
|
+
See section 11.1.3 of the CoDICE algorithm document for details.
|
|
246
|
+
|
|
247
|
+
The formula for omni-directional intensities is::
|
|
248
|
+
|
|
249
|
+
l1B species data / (
|
|
250
|
+
geometric_factor * number_of_ssd * efficiency * energy_passband
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
Geometric factor is constant for all species which is 0.013.
|
|
254
|
+
Number of SSD is constant for all species which is 12.
|
|
255
|
+
Efficiency is provided in a CSV file for each species and energy bin.
|
|
256
|
+
Energy passband is calculated from L1B variables energy_bin_minus + energy_bin_plus
|
|
257
|
+
|
|
258
|
+
Parameters
|
|
259
|
+
----------
|
|
260
|
+
dependencies : ProcessingInputCollection
|
|
261
|
+
The collection of processing input files.
|
|
262
|
+
|
|
263
|
+
Returns
|
|
264
|
+
-------
|
|
265
|
+
xarray.Dataset
|
|
266
|
+
The updated L2 dataset with omni-directional intensities calculated.
|
|
267
|
+
"""
|
|
268
|
+
l1b_file = dependencies.get_file_paths(descriptor="hi-omni")[0]
|
|
269
|
+
l1b_dataset = load_cdf(l1b_file)
|
|
270
|
+
|
|
271
|
+
# Read the efficiencies data from the CSV file
|
|
272
|
+
efficiencies_file = dependencies.get_file_paths(descriptor="l2-hi-omni-efficiency")[
|
|
273
|
+
0
|
|
274
|
+
]
|
|
275
|
+
efficiencies_df = pd.read_csv(efficiencies_file)
|
|
276
|
+
# Omni product has 8 species and each species has different shape.
|
|
277
|
+
# Eg.
|
|
278
|
+
# h - (epoch, 15)
|
|
279
|
+
# c - (epoch, 18)
|
|
280
|
+
# uh - (epoch, 5)
|
|
281
|
+
# etc.
|
|
282
|
+
# Because of that, we need to loop over each species and calculate
|
|
283
|
+
# omni-directional intensities separately.
|
|
284
|
+
for species in HI_OMNI_VARIABLE_NAMES:
|
|
285
|
+
species_data = efficiencies_df[efficiencies_df["species"] == species]
|
|
286
|
+
# Read current species' effificiency
|
|
287
|
+
species_efficiencies = species_data["average_efficiency"].values[np.newaxis, :]
|
|
288
|
+
# Calculate energy passband from L1B data
|
|
289
|
+
energy_passbands = (
|
|
290
|
+
l1b_dataset[f"energy_{species}_plus"]
|
|
291
|
+
+ l1b_dataset[f"energy_{species}_minus"]
|
|
292
|
+
).values[np.newaxis, :]
|
|
293
|
+
# Calculate omni-directional intensities
|
|
294
|
+
omni_direction_intensities = l1b_dataset[species] / (
|
|
295
|
+
L2_GEOMETRIC_FACTOR
|
|
296
|
+
* L2_HI_NUMBER_OF_SSD
|
|
297
|
+
* species_efficiencies
|
|
298
|
+
* energy_passbands
|
|
299
|
+
)
|
|
300
|
+
# Store by replacing existing species data with omni-directional intensities
|
|
301
|
+
l1b_dataset[species].values = omni_direction_intensities
|
|
302
|
+
|
|
303
|
+
# TODO: this may go away once Joey and I fix L1B CDF
|
|
304
|
+
# Update global CDF attributes
|
|
56
305
|
cdf_attrs = ImapCdfAttributes()
|
|
57
|
-
|
|
306
|
+
cdf_attrs.add_instrument_global_attrs("codice")
|
|
307
|
+
cdf_attrs.add_instrument_variable_attrs("codice", "l2-hi-omni")
|
|
308
|
+
l1b_dataset.attrs = cdf_attrs.get_global_attributes("imap_codice_l2_hi-omni")
|
|
309
|
+
|
|
310
|
+
# TODO: ask Joey to add attrs for epoch_delta_plus and epoch_delta_minus
|
|
311
|
+
# and update dimension to be 'epoch' in L1B data
|
|
312
|
+
for variable in l1b_dataset.data_vars:
|
|
313
|
+
if variable in ["epoch_delta_plus", "epoch_delta_minus", "data_quality"]:
|
|
314
|
+
l1b_dataset[variable].attrs = cdf_attrs.get_variable_attributes(
|
|
315
|
+
variable, check_schema=False
|
|
316
|
+
)
|
|
317
|
+
else:
|
|
318
|
+
l1b_dataset[variable].attrs = cdf_attrs.get_variable_attributes(
|
|
319
|
+
variable, check_schema=False
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
# Add these new coordinates
|
|
323
|
+
new_coords = {
|
|
324
|
+
"energy_h": l1b_dataset["energy_h"],
|
|
325
|
+
"energy_h_label": xr.DataArray(
|
|
326
|
+
l1b_dataset["energy_h"].values.astype(str),
|
|
327
|
+
dims=("energy_h",),
|
|
328
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
329
|
+
"energy_h_label", check_schema=False
|
|
330
|
+
),
|
|
331
|
+
),
|
|
332
|
+
"energy_he3": l1b_dataset["energy_he3"],
|
|
333
|
+
"energy_he3_label": xr.DataArray(
|
|
334
|
+
l1b_dataset["energy_he3"].values.astype(str),
|
|
335
|
+
dims=("energy_he3",),
|
|
336
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
337
|
+
"energy_he3_label", check_schema=False
|
|
338
|
+
),
|
|
339
|
+
),
|
|
340
|
+
"energy_he4": l1b_dataset["energy_he4"],
|
|
341
|
+
"energy_he4_label": xr.DataArray(
|
|
342
|
+
l1b_dataset["energy_he4"].values.astype(str),
|
|
343
|
+
dims=("energy_he4",),
|
|
344
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
345
|
+
"energy_he4_label", check_schema=False
|
|
346
|
+
),
|
|
347
|
+
),
|
|
348
|
+
"energy_c": l1b_dataset["energy_c"],
|
|
349
|
+
"energy_c_label": xr.DataArray(
|
|
350
|
+
l1b_dataset["energy_c"].values.astype(str),
|
|
351
|
+
dims=("energy_c",),
|
|
352
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
353
|
+
"energy_c_label", check_schema=False
|
|
354
|
+
),
|
|
355
|
+
),
|
|
356
|
+
"energy_o": l1b_dataset["energy_o"],
|
|
357
|
+
"energy_o_label": xr.DataArray(
|
|
358
|
+
l1b_dataset["energy_o"].values.astype(str),
|
|
359
|
+
dims=("energy_o",),
|
|
360
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
361
|
+
"energy_o_label", check_schema=False
|
|
362
|
+
),
|
|
363
|
+
),
|
|
364
|
+
"energy_ne_mg_si": l1b_dataset["energy_ne_mg_si"],
|
|
365
|
+
"energy_ne_mg_si_label": xr.DataArray(
|
|
366
|
+
l1b_dataset["energy_ne_mg_si"].values.astype(str),
|
|
367
|
+
dims=("energy_ne_mg_si",),
|
|
368
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
369
|
+
"energy_ne_mg_si_label", check_schema=False
|
|
370
|
+
),
|
|
371
|
+
),
|
|
372
|
+
"energy_fe": l1b_dataset["energy_fe"],
|
|
373
|
+
"energy_fe_label": xr.DataArray(
|
|
374
|
+
l1b_dataset["energy_fe"].values.astype(str),
|
|
375
|
+
dims=("energy_fe",),
|
|
376
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
377
|
+
"energy_fe_label", check_schema=False
|
|
378
|
+
),
|
|
379
|
+
),
|
|
380
|
+
"energy_uh": l1b_dataset["energy_uh"],
|
|
381
|
+
"energy_uh_label": xr.DataArray(
|
|
382
|
+
l1b_dataset["energy_uh"].values.astype(str),
|
|
383
|
+
dims=("energy_uh",),
|
|
384
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
385
|
+
"energy_uh_label", check_schema=False
|
|
386
|
+
),
|
|
387
|
+
),
|
|
388
|
+
"energy_junk": l1b_dataset["energy_junk"],
|
|
389
|
+
"energy_junk_label": xr.DataArray(
|
|
390
|
+
l1b_dataset["energy_junk"].values.astype(str),
|
|
391
|
+
dims=("energy_junk",),
|
|
392
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
393
|
+
"energy_junk_label", check_schema=False
|
|
394
|
+
),
|
|
395
|
+
),
|
|
396
|
+
"epoch": xr.DataArray(
|
|
397
|
+
l1b_dataset["epoch"].data,
|
|
398
|
+
dims=("epoch",),
|
|
399
|
+
attrs=cdf_attrs.get_variable_attributes("epoch", check_schema=False),
|
|
400
|
+
),
|
|
401
|
+
}
|
|
402
|
+
l1b_dataset = l1b_dataset.assign_coords(new_coords)
|
|
403
|
+
|
|
404
|
+
return l1b_dataset
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
def process_hi_sectored(dependencies: ProcessingInputCollection) -> xr.Dataset:
|
|
408
|
+
"""
|
|
409
|
+
Process the hi-omni L1B dataset to calculate omni-directional intensities.
|
|
410
|
+
|
|
411
|
+
See section 11.1.2 of the CoDICE algorithm document for details.
|
|
412
|
+
|
|
413
|
+
The formula for omni-directional intensities is::
|
|
414
|
+
|
|
415
|
+
l1b species data / (geometric_factor * efficiency * energy_passband)
|
|
416
|
+
|
|
417
|
+
Geometric factor is constant for all species and is 0.013.
|
|
418
|
+
Efficiency is provided in a CSV file for each species and energy bin and
|
|
419
|
+
position.
|
|
420
|
+
Energy passband is calculated from energy_bin_minus + energy_bin_plus
|
|
421
|
+
|
|
422
|
+
Parameters
|
|
423
|
+
----------
|
|
424
|
+
dependencies : ProcessingInputCollection
|
|
425
|
+
The collection of processing input files.
|
|
426
|
+
|
|
427
|
+
Returns
|
|
428
|
+
-------
|
|
429
|
+
xarray.Dataset
|
|
430
|
+
The updated L2 dataset with omni-directional intensities calculated.
|
|
431
|
+
"""
|
|
432
|
+
file_path = dependencies.get_file_paths(descriptor="hi-sectored")[0]
|
|
433
|
+
l1b_dataset = load_cdf(file_path)
|
|
434
|
+
|
|
435
|
+
# Update global CDF attributes
|
|
436
|
+
cdf_attrs = ImapCdfAttributes()
|
|
437
|
+
cdf_attrs.add_instrument_global_attrs("codice")
|
|
438
|
+
cdf_attrs.add_instrument_variable_attrs("codice", "l2-hi-sectored")
|
|
439
|
+
|
|
440
|
+
# Overwrite L1B variable attributes with L2 variable attributes
|
|
441
|
+
l2_dataset = xr.Dataset(
|
|
442
|
+
coords={
|
|
443
|
+
"spin_sector": l1b_dataset["spin_sector"],
|
|
444
|
+
"spin_sector_label": xr.DataArray(
|
|
445
|
+
l1b_dataset["spin_sector"].values.astype(str),
|
|
446
|
+
dims=("spin_sector",),
|
|
447
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
448
|
+
"spin_sector_label", check_schema=False
|
|
449
|
+
),
|
|
450
|
+
),
|
|
451
|
+
"energy_h": l1b_dataset["energy_h"],
|
|
452
|
+
"energy_h_label": xr.DataArray(
|
|
453
|
+
l1b_dataset["energy_h"].values.astype(str),
|
|
454
|
+
dims=("energy_h",),
|
|
455
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
456
|
+
"energy_h_label", check_schema=False
|
|
457
|
+
),
|
|
458
|
+
),
|
|
459
|
+
"energy_he3he4": l1b_dataset["energy_he3he4"],
|
|
460
|
+
"energy_he3he4_label": xr.DataArray(
|
|
461
|
+
l1b_dataset["energy_he3he4"].values.astype(str),
|
|
462
|
+
dims=("energy_he3he4",),
|
|
463
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
464
|
+
"energy_he3he4_label", check_schema=False
|
|
465
|
+
),
|
|
466
|
+
),
|
|
467
|
+
"energy_cno": l1b_dataset["energy_cno"],
|
|
468
|
+
"energy_cno_label": xr.DataArray(
|
|
469
|
+
l1b_dataset["energy_cno"].values.astype(str),
|
|
470
|
+
dims=("energy_cno",),
|
|
471
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
472
|
+
"energy_cno_label", check_schema=False
|
|
473
|
+
),
|
|
474
|
+
),
|
|
475
|
+
"energy_fe": l1b_dataset["energy_fe"],
|
|
476
|
+
"energy_fe_label": xr.DataArray(
|
|
477
|
+
l1b_dataset["energy_fe"].values.astype(str),
|
|
478
|
+
dims=("energy_fe",),
|
|
479
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
480
|
+
"energy_fe_label", check_schema=False
|
|
481
|
+
),
|
|
482
|
+
),
|
|
483
|
+
"epoch": l1b_dataset["epoch"],
|
|
484
|
+
"elevation_angle": xr.DataArray(
|
|
485
|
+
HI_L2_ELEVATION_ANGLE,
|
|
486
|
+
dims=("elevation_angle",),
|
|
487
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
488
|
+
"elevation_angle", check_schema=False
|
|
489
|
+
),
|
|
490
|
+
),
|
|
491
|
+
"elevation_angle_label": xr.DataArray(
|
|
492
|
+
HI_L2_ELEVATION_ANGLE.astype(str),
|
|
493
|
+
dims=("elevation_angle",),
|
|
494
|
+
attrs=cdf_attrs.get_variable_attributes(
|
|
495
|
+
"elevation_angle_label", check_schema=False
|
|
496
|
+
),
|
|
497
|
+
),
|
|
498
|
+
},
|
|
499
|
+
attrs=cdf_attrs.get_global_attributes("imap_codice_l2_hi-sectored"),
|
|
500
|
+
)
|
|
501
|
+
|
|
502
|
+
efficiencies_file = dependencies.get_file_paths(
|
|
503
|
+
descriptor="l2-hi-sectored-efficiency"
|
|
504
|
+
)[0]
|
|
505
|
+
|
|
506
|
+
# Calculate sectored intensities
|
|
507
|
+
efficiencies_df = pd.read_csv(efficiencies_file)
|
|
508
|
+
# Similar to hi-omni, each species has different shape.
|
|
509
|
+
# Because of that, we need to loop over each species and calculate
|
|
510
|
+
# sectored intensities separately.
|
|
511
|
+
for species in HI_SECTORED_VARIABLE_NAMES:
|
|
512
|
+
# Efficiencies from dataframe maps to different dimension in L1B data.
|
|
513
|
+
# For example:
|
|
514
|
+
# l1b species 'h' has shape:
|
|
515
|
+
# (epoch, 8, 12, 12) -> (time, energy, spin_sector, inst_az)
|
|
516
|
+
# efficiencies 'h' has shape after reading from CSV:
|
|
517
|
+
# (8, 12) -> (energy, inst_az)
|
|
518
|
+
# NOTE: 12 here maps to last 12 in above l1b dimension.
|
|
519
|
+
# Because of this, it's easier to work with the data in xarray.
|
|
520
|
+
# Xarray automatically aligns dimensions and coordinates, making it easier
|
|
521
|
+
# to work with multi-dimensional data. Thus, we convert the efficiencies
|
|
522
|
+
# to xarray.DataArray with dimensions (energy, inst_az)
|
|
523
|
+
species_data = efficiencies_df[efficiencies_df["species"] == species].values
|
|
524
|
+
species_efficiencies = xr.DataArray(
|
|
525
|
+
species_data[:, 2:].astype(
|
|
526
|
+
float
|
|
527
|
+
), # Skip first two columns (species, energy_bin)
|
|
528
|
+
dims=(f"energy_{species}", "inst_az"),
|
|
529
|
+
coords=l1b_dataset[[f"energy_{species}", "inst_az"]],
|
|
530
|
+
)
|
|
531
|
+
|
|
532
|
+
# energy_passbands has shape:
|
|
533
|
+
# (8,) -> (energy)
|
|
534
|
+
energy_passbands = xr.DataArray(
|
|
535
|
+
l1b_dataset[f"energy_{species}_minus"]
|
|
536
|
+
+ l1b_dataset[f"energy_{species}_plus"],
|
|
537
|
+
dims=(f"energy_{species}",),
|
|
538
|
+
coords=l2_dataset[[f"energy_{species}"]],
|
|
539
|
+
name="passband",
|
|
540
|
+
)
|
|
541
|
+
|
|
542
|
+
sectored_intensities = l1b_dataset[species] / (
|
|
543
|
+
L2_GEOMETRIC_FACTOR * species_efficiencies * energy_passbands
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
# Replace existing species data with omni-directional intensities
|
|
547
|
+
l2_dataset[species] = xr.DataArray(
|
|
548
|
+
sectored_intensities.data,
|
|
549
|
+
dims=("epoch", f"energy_{species}", "spin_sector", "elevation_angle"),
|
|
550
|
+
attrs=cdf_attrs.get_variable_attributes(species, check_schema=False),
|
|
551
|
+
)
|
|
552
|
+
|
|
553
|
+
# Calculate spin angle
|
|
554
|
+
# Formula:
|
|
555
|
+
# θ_(k,n) = (θ_(k,0)+30°* n) mod 360°
|
|
556
|
+
# where
|
|
557
|
+
# n is size of L2_HI_SECTORED_ANGLE, 0 to 11,
|
|
558
|
+
# k is size of inst_az from l1b, 0 to 11,
|
|
559
|
+
# Calculate spin angle by adding a base angle from L2_HI_SECTORED_ANGLE
|
|
560
|
+
# for each SSD index and then adding multiple of 30 degrees for each elevation.
|
|
561
|
+
# Then mod by 360 to keep it within 0-360 range.
|
|
562
|
+
elevation_angles = np.arange(len(l2_dataset["elevation_angle"].values)) * 30.0
|
|
563
|
+
spin_angles = (L2_HI_SECTORED_ANGLE[:, np.newaxis] + elevation_angles) % 360.0
|
|
564
|
+
|
|
565
|
+
# Add spin angle variable using the new elevation_angle dimension
|
|
566
|
+
l2_dataset["spin_angles"] = (("spin_sector", "elevation_angle"), spin_angles)
|
|
567
|
+
l2_dataset["spin_angles"].attrs = cdf_attrs.get_variable_attributes(
|
|
568
|
+
"spin_angles", check_schema=False
|
|
569
|
+
)
|
|
570
|
+
|
|
571
|
+
# Now carry over other variables from L1B to L2 dataset
|
|
572
|
+
for variable in l1b_dataset.data_vars:
|
|
573
|
+
if variable.startswith("epoch_") and variable != "epoch":
|
|
574
|
+
# get attrs with just that name
|
|
575
|
+
l2_dataset[variable] = xr.DataArray(
|
|
576
|
+
l1b_dataset[variable].data,
|
|
577
|
+
dims=("epoch",),
|
|
578
|
+
attrs=cdf_attrs.get_variable_attributes(variable, check_schema=False),
|
|
579
|
+
)
|
|
580
|
+
elif variable.startswith("energy_"):
|
|
581
|
+
l2_dataset[variable] = xr.DataArray(
|
|
582
|
+
l1b_dataset[variable].data,
|
|
583
|
+
dims=(f"energy_{variable.split('_')[1]}",),
|
|
584
|
+
attrs=cdf_attrs.get_variable_attributes(variable, check_schema=False),
|
|
585
|
+
)
|
|
586
|
+
elif variable.startswith("unc_"):
|
|
587
|
+
l2_dataset[variable] = xr.DataArray(
|
|
588
|
+
l1b_dataset[variable].data,
|
|
589
|
+
dims=(
|
|
590
|
+
"epoch",
|
|
591
|
+
f"energy_{variable.split('_')[1]}",
|
|
592
|
+
"spin_sector",
|
|
593
|
+
"elevation_angle",
|
|
594
|
+
),
|
|
595
|
+
attrs=cdf_attrs.get_variable_attributes(variable),
|
|
596
|
+
)
|
|
597
|
+
elif variable == "data_quality":
|
|
598
|
+
l2_dataset[variable] = l1b_dataset[variable]
|
|
599
|
+
l2_dataset[variable].attrs.update(
|
|
600
|
+
cdf_attrs.get_variable_attributes(variable, check_schema=False)
|
|
601
|
+
)
|
|
602
|
+
|
|
603
|
+
l2_dataset["epoch"].attrs.update(
|
|
604
|
+
cdf_attrs.get_variable_attributes("epoch", check_schema=False)
|
|
605
|
+
)
|
|
606
|
+
return l2_dataset
|
|
607
|
+
|
|
608
|
+
|
|
609
|
+
def process_codice_l2(
|
|
610
|
+
descriptor: str, dependencies: ProcessingInputCollection
|
|
611
|
+
) -> xr.Dataset:
|
|
612
|
+
"""
|
|
613
|
+
Will process CoDICE l1 data to create l2 data products.
|
|
614
|
+
|
|
615
|
+
Parameters
|
|
616
|
+
----------
|
|
617
|
+
descriptor : str
|
|
618
|
+
The descriptor for the CoDICE L1 file to process.
|
|
619
|
+
dependencies : ProcessingInputCollection
|
|
620
|
+
Collection of processing inputs such as ancillary data files.
|
|
621
|
+
|
|
622
|
+
Returns
|
|
623
|
+
-------
|
|
624
|
+
l2_dataset : xarray.Dataset
|
|
625
|
+
The``xarray`` dataset containing the science data and supporting metadata.
|
|
626
|
+
"""
|
|
627
|
+
# This should get science files since ancillary or spice doesn't have data_type
|
|
628
|
+
# as data level.
|
|
629
|
+
file_path = dependencies.get_file_paths(descriptor=descriptor)[0]
|
|
630
|
+
|
|
631
|
+
# Now form product name from descriptor
|
|
632
|
+
descriptor = ScienceFilePath(file_path).descriptor
|
|
633
|
+
dataset_name = f"imap_codice_l2_{descriptor}"
|
|
58
634
|
|
|
59
635
|
# TODO: update list of datasets that need geometric factors (if needed)
|
|
60
636
|
# Compute geometric factors needed for intensity calculations
|
|
@@ -62,7 +638,46 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
|
|
|
62
638
|
"imap_codice_l2_lo-sw-species",
|
|
63
639
|
"imap_codice_l2_lo-nsw-species",
|
|
64
640
|
]:
|
|
65
|
-
|
|
641
|
+
l2_dataset = load_cdf(file_path).copy()
|
|
642
|
+
|
|
643
|
+
geometric_factor_lookup = get_geometric_factor_lut(dependencies)
|
|
644
|
+
efficiency_lookup = get_efficiency_lut(dependencies)
|
|
645
|
+
geometric_factors = compute_geometric_factors(
|
|
646
|
+
l2_dataset, geometric_factor_lookup
|
|
647
|
+
)
|
|
648
|
+
if dataset_name == "imap_codice_l2_lo-sw-species":
|
|
649
|
+
# Filter the efficiency lookup table for solar wind efficiencies
|
|
650
|
+
efficiencies = efficiency_lookup[efficiency_lookup["product"] == "sw"]
|
|
651
|
+
# Calculate the pickup ion sunward solar wind intensities using equation
|
|
652
|
+
# described in section 11.2.4 of algorithm document.
|
|
653
|
+
process_lo_species_intensity(
|
|
654
|
+
l2_dataset,
|
|
655
|
+
LO_SW_PICKUP_ION_SPECIES_VARIABLE_NAMES,
|
|
656
|
+
geometric_factors,
|
|
657
|
+
efficiencies,
|
|
658
|
+
PUI_POSITIONS,
|
|
659
|
+
)
|
|
660
|
+
# Calculate the sunward solar wind species intensities using equation
|
|
661
|
+
# described in section 11.2.4 of algorithm document.
|
|
662
|
+
process_lo_species_intensity(
|
|
663
|
+
l2_dataset,
|
|
664
|
+
LO_SW_SPECIES_VARIABLE_NAMES,
|
|
665
|
+
geometric_factors,
|
|
666
|
+
efficiencies,
|
|
667
|
+
SW_POSITIONS,
|
|
668
|
+
)
|
|
669
|
+
else:
|
|
670
|
+
# Filter the efficiency lookup table for non solar wind efficiencies
|
|
671
|
+
efficiencies = efficiency_lookup[efficiency_lookup["product"] == "nsw"]
|
|
672
|
+
# Calculate the non-sunward species intensities using equation
|
|
673
|
+
# described in section 11.2.4 of algorithm document.
|
|
674
|
+
process_lo_species_intensity(
|
|
675
|
+
l2_dataset,
|
|
676
|
+
LO_NSW_SPECIES_VARIABLE_NAMES,
|
|
677
|
+
geometric_factors,
|
|
678
|
+
efficiencies,
|
|
679
|
+
NSW_POSITIONS,
|
|
680
|
+
)
|
|
66
681
|
|
|
67
682
|
if dataset_name in [
|
|
68
683
|
"imap_codice_l2_hi-counters-singles",
|
|
@@ -91,13 +706,13 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
|
|
|
91
706
|
elif dataset_name == "imap_codice_l2_hi-sectored":
|
|
92
707
|
# Convert the sectored count rates using equation described in section
|
|
93
708
|
# 11.1.3 of algorithm document.
|
|
94
|
-
|
|
709
|
+
l2_dataset = process_hi_sectored(dependencies)
|
|
95
710
|
|
|
96
711
|
elif dataset_name == "imap_codice_l2_hi-omni":
|
|
97
712
|
# Calculate the omni-directional intensity for each species using
|
|
98
713
|
# equation described in section 11.1.4 of algorithm document
|
|
99
714
|
# hopefully this can also apply to hi-ialirt
|
|
100
|
-
|
|
715
|
+
l2_dataset = process_hi_omni(dependencies)
|
|
101
716
|
|
|
102
717
|
elif dataset_name == "imap_codice_l2_lo-direct-events":
|
|
103
718
|
# Convert the following data variables to physical units using
|
|
@@ -122,24 +737,7 @@ def process_codice_l2(file_path: Path) -> xr.Dataset:
|
|
|
122
737
|
# in section 11.2.3 of algorithm document.
|
|
123
738
|
pass
|
|
124
739
|
|
|
125
|
-
|
|
126
|
-
# Calculate the sunward solar wind species intensities using equation
|
|
127
|
-
# described in section 11.2.4 of algorithm document.
|
|
128
|
-
# Calculate the pickup ion sunward solar wind intensities using equation
|
|
129
|
-
# described in section 11.2.4 of algorithm document.
|
|
130
|
-
# Hopefully this can also apply to lo-ialirt
|
|
131
|
-
# TODO: WIP - needs to be completed
|
|
132
|
-
l2_dataset = process_lo_sw_species(l2_dataset, geometric_factors)
|
|
133
|
-
pass
|
|
134
|
-
|
|
135
|
-
elif dataset_name == "imap_codice_l2_lo-nsw-species":
|
|
136
|
-
# Calculate the non-sunward solar wind species intensities using
|
|
137
|
-
# equation described in section 11.2.4 of algorithm document.
|
|
138
|
-
# Calculate the pickup ion non-sunward solar wind intensities using
|
|
139
|
-
# equation described in section 11.2.4 of algorithm document.
|
|
140
|
-
pass
|
|
141
|
-
|
|
142
|
-
logger.info(f"\nFinal data product:\n{l2_dataset}\n")
|
|
740
|
+
# logger.info(f"\nFinal data product:\n{l2_dataset}\n")
|
|
143
741
|
|
|
144
742
|
return l2_dataset
|
|
145
743
|
|
|
@@ -191,88 +789,3 @@ def add_dataset_attributes(
|
|
|
191
789
|
f"attribute manager."
|
|
192
790
|
)
|
|
193
791
|
return dataset
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
def compute_geometric_factors(dataset: xr.Dataset) -> np.ndarray:
|
|
197
|
-
"""
|
|
198
|
-
Calculate geometric factors needed for intensity calculations.
|
|
199
|
-
|
|
200
|
-
Geometric factors are determined by comparing the half-spin values per
|
|
201
|
-
esa_step in the HALF_SPIN_LUT to the rgfo_half_spin values in the provided
|
|
202
|
-
L2 dataset.
|
|
203
|
-
|
|
204
|
-
If the half-spin value is less than the corresponding rgfo_half_spin value,
|
|
205
|
-
the geometric factor is set to 0.75 (full mode); otherwise, it is set to 0.5
|
|
206
|
-
(reduced mode).
|
|
207
|
-
|
|
208
|
-
NOTE: Half spin values are associated with ESA steps which corresponds to the
|
|
209
|
-
index of the energy_per_charge dimension that is between 0 and 127.
|
|
210
|
-
|
|
211
|
-
Parameters
|
|
212
|
-
----------
|
|
213
|
-
dataset : xarray.Dataset
|
|
214
|
-
The L2 dataset containing rgfo_half_spin data variable.
|
|
215
|
-
|
|
216
|
-
Returns
|
|
217
|
-
-------
|
|
218
|
-
geometric_factors : np.ndarray
|
|
219
|
-
A 2D array of geometric factors with shape (epoch, esa_steps).
|
|
220
|
-
"""
|
|
221
|
-
# Convert the HALF_SPIN_LUT to a reverse mapping of esa_step to half_spin
|
|
222
|
-
esa_step_to_half_spin_map = {
|
|
223
|
-
val: key for key, vals in HALF_SPIN_LUT.items() for val in vals
|
|
224
|
-
}
|
|
225
|
-
|
|
226
|
-
# Create a list of half_spin values corresponding to ESA steps (0 to 127)
|
|
227
|
-
half_spin_values = np.array(
|
|
228
|
-
[esa_step_to_half_spin_map[step] for step in range(128)]
|
|
229
|
-
)
|
|
230
|
-
|
|
231
|
-
# Expand dimensions to compare each rgfo_half_spin value against
|
|
232
|
-
# all half_spin_values
|
|
233
|
-
rgfo_half_spin = dataset.rgfo_half_spin.data[:, np.newaxis] # Shape: (epoch, 1)
|
|
234
|
-
|
|
235
|
-
# Perform the comparison and calculate geometric factors
|
|
236
|
-
geometric_factors = np.where(half_spin_values < rgfo_half_spin, 0.75, 0.5)
|
|
237
|
-
|
|
238
|
-
return geometric_factors
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
def process_lo_sw_species(
|
|
242
|
-
dataset: xr.Dataset, geometric_factors: np.ndarray
|
|
243
|
-
) -> xr.Dataset:
|
|
244
|
-
"""
|
|
245
|
-
Process the lo-sw-species L2 dataset to calculate species intensities.
|
|
246
|
-
|
|
247
|
-
Parameters
|
|
248
|
-
----------
|
|
249
|
-
dataset : xarray.Dataset
|
|
250
|
-
The L2 dataset to process.
|
|
251
|
-
geometric_factors : np.ndarray
|
|
252
|
-
The geometric factors array with shape (epoch, esa_steps).
|
|
253
|
-
|
|
254
|
-
Returns
|
|
255
|
-
-------
|
|
256
|
-
xarray.Dataset
|
|
257
|
-
The updated L2 dataset with species intensities calculated.
|
|
258
|
-
"""
|
|
259
|
-
# TODO: WIP - implement intensity calculations
|
|
260
|
-
# valid_solar_wind_vars = [
|
|
261
|
-
# "hplus",
|
|
262
|
-
# "heplusplus",
|
|
263
|
-
# "cplus4",
|
|
264
|
-
# "cplus5",
|
|
265
|
-
# "cplus6",
|
|
266
|
-
# "oplus5",
|
|
267
|
-
# "oplus6",
|
|
268
|
-
# "oplus7",
|
|
269
|
-
# "oplus8",
|
|
270
|
-
# "ne",
|
|
271
|
-
# "mg",
|
|
272
|
-
# "si",
|
|
273
|
-
# "fe_loq",
|
|
274
|
-
# "fe_hiq",
|
|
275
|
-
# ]
|
|
276
|
-
# valid_pick_up_ion_vars = ["heplus", "cnoplus"]
|
|
277
|
-
|
|
278
|
-
return dataset
|