imap-processing 0.15.0__py3-none-any.whl → 0.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (35) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +113 -130
  3. imap_processing/cli.py +1 -4
  4. imap_processing/codice/codice_l1a.py +87 -62
  5. imap_processing/codice/codice_l2.py +0 -8
  6. imap_processing/codice/constants.py +16 -5
  7. imap_processing/hi/hi_l1a.py +447 -0
  8. imap_processing/hi/{l1b/hi_l1b.py → hi_l1b.py} +1 -1
  9. imap_processing/hi/{l1c/hi_l1c.py → hi_l1c.py} +21 -21
  10. imap_processing/hi/{l2/hi_l2.py → hi_l2.py} +13 -13
  11. imap_processing/hi/utils.py +6 -6
  12. imap_processing/hit/l1b/hit_l1b.py +30 -11
  13. imap_processing/ialirt/constants.py +38 -0
  14. imap_processing/ialirt/l0/parse_mag.py +1 -1
  15. imap_processing/ialirt/l0/process_codice.py +91 -0
  16. imap_processing/ialirt/l0/process_hit.py +12 -21
  17. imap_processing/ialirt/l0/process_swapi.py +172 -23
  18. imap_processing/ialirt/l0/process_swe.py +3 -10
  19. imap_processing/ialirt/utils/constants.py +16 -2
  20. imap_processing/ialirt/utils/create_xarray.py +59 -11
  21. imap_processing/ultra/utils/ultra_l1_utils.py +4 -2
  22. {imap_processing-0.15.0.dist-info → imap_processing-0.16.0.dist-info}/METADATA +1 -1
  23. {imap_processing-0.15.0.dist-info → imap_processing-0.16.0.dist-info}/RECORD +26 -32
  24. imap_processing/hi/l1a/__init__.py +0 -0
  25. imap_processing/hi/l1a/hi_l1a.py +0 -98
  26. imap_processing/hi/l1a/histogram.py +0 -152
  27. imap_processing/hi/l1a/science_direct_event.py +0 -214
  28. imap_processing/hi/l1b/__init__.py +0 -0
  29. imap_processing/hi/l1c/__init__.py +0 -0
  30. imap_processing/hi/l2/__init__.py +0 -0
  31. imap_processing/ialirt/l0/process_codicehi.py +0 -156
  32. imap_processing/ialirt/l0/process_codicelo.py +0 -41
  33. {imap_processing-0.15.0.dist-info → imap_processing-0.16.0.dist-info}/LICENSE +0 -0
  34. {imap_processing-0.15.0.dist-info → imap_processing-0.16.0.dist-info}/WHEEL +0 -0
  35. {imap_processing-0.15.0.dist-info → imap_processing-0.16.0.dist-info}/entry_points.txt +0 -0
@@ -366,7 +366,8 @@ def subset_data_for_sectored_counts(
366
366
  A set of sectored data starts with hydrogen and ends with iron and correspond to
367
367
  the mod 10 values 0-9. The livetime values from the previous 10 minutes are used
368
368
  to calculate the rates for each set since those counts are transmitted 10 minutes
369
- after they were collected.
369
+ after they were collected. Therefore, only complete sets of sectored counts where
370
+ livetime from the previous 10 minutes are available are included in the output.
370
371
 
371
372
  Parameters
372
373
  ----------
@@ -378,7 +379,7 @@ def subset_data_for_sectored_counts(
378
379
  Returns
379
380
  -------
380
381
  tuple[xr.Dataset, xr.DataArray]
381
- Subsetted L1A counts dataset and corresponding livetime values.
382
+ Dataset of complete sectored counts and corresponding livetime values.
382
383
  """
383
384
  # Identify 10-minute intervals of complete sectored counts.
384
385
  bin_size = 10
@@ -392,16 +393,34 @@ def subset_data_for_sectored_counts(
392
393
  start_indices = np.where(matches)[0]
393
394
 
394
395
  # Filter out start indices that are less than or equal to the bin size
395
- # since the previous 10 minutes are needed
396
- start_indices = start_indices[start_indices > bin_size]
397
- data_slice = slice(start_indices[0], start_indices[-1] + bin_size)
398
-
399
- # Subset data to include only complete sets of sectored counts
400
- l1b_sectored_rates_dataset = l1a_counts_dataset.isel(epoch=data_slice)
396
+ # since the previous 10 minutes are needed for calculating rates
397
+ if start_indices.size == 0:
398
+ logger.error(
399
+ "No data to process - valid start indices not found for "
400
+ "complete sectored counts."
401
+ )
402
+ raise ValueError("No valid start indices found for complete sectored counts.")
403
+ else:
404
+ start_indices = start_indices[start_indices >= bin_size]
405
+
406
+ # Subset data for complete sets of sectored counts.
407
+ # Each set of sectored counts is 10 minutes long, so we take the indices
408
+ # starting from the start indices and extend to the bin size of 10.
409
+ # This creates a 1D array of indices that correspond to the complete
410
+ # sets of sectored counts which is used to filter the L1A dataset and
411
+ # create the L1B sectored rates dataset.
412
+ data_indices = np.concatenate(
413
+ [np.arange(idx, idx + bin_size) for idx in start_indices]
414
+ )
415
+ l1b_sectored_rates_dataset = l1a_counts_dataset.isel(epoch=data_indices)
401
416
 
402
- # Subset livetime staggered from sectored counts by 10 minutes
403
- livetime_slice = slice(start_indices[0] - bin_size, start_indices[-1])
404
- livetime = livetime[livetime_slice]
417
+ # Subset livetime values corresponding to the previous 10 minutes
418
+ # for each start index. This ensures the livetime data aligns correctly
419
+ # with the sectored counts for rate calculations.
420
+ livetime_indices = np.concatenate(
421
+ [np.arange(idx - bin_size, idx) for idx in start_indices]
422
+ )
423
+ livetime = livetime.isel(epoch=livetime_indices)
405
424
 
406
425
  return l1b_sectored_rates_dataset, livetime
407
426
 
@@ -0,0 +1,38 @@
1
+ """Module for constants and useful shared classes used in I-ALiRT processing."""
2
+
3
+ from dataclasses import dataclass
4
+
5
+ import numpy as np
6
+
7
+
8
+ @dataclass(frozen=True)
9
+ class IalirtSwapiConstants:
10
+ """
11
+ Constants for I-ALiRT SWAPI which can be used across different levels or classes.
12
+
13
+ Attributes
14
+ ----------
15
+ BOLTZ: float
16
+ Boltzmann constant [J/K]
17
+ AT_MASS: float
18
+ Atomic mass [kg]
19
+ PROT_MASS: float
20
+ Mass of proton [kg]
21
+ EFF_AREA: float
22
+ Instrument effective area [m^2]
23
+ AZ_FOV: float
24
+ Azimuthal width of the field of view for solar wind [radians]
25
+ FWHM_WIDTH: float
26
+ Full Width at Half Maximum of energy width [unitless]
27
+ SPEED_EW: float
28
+ Speed width of energy passband [unitless]
29
+ """
30
+
31
+ # Scientific constants used in optimization model
32
+ boltz = 1.380649e-23 # Boltzmann constant, J/K
33
+ at_mass = 1.6605390666e-27 # atomic mass, kg
34
+ prot_mass = 1.007276466621 * at_mass # mass of proton, kg
35
+ eff_area = 3.3e-5 * 1e-4 # effective area, meters squared
36
+ az_fov = np.deg2rad(30) # azimuthal width of the field of view, radians
37
+ fwhm_width = 0.085 # FWHM of energy width
38
+ speed_ew = 0.5 * fwhm_width # speed width of energy passband
@@ -390,7 +390,7 @@ def process_packet(
390
390
  {
391
391
  "apid": 478,
392
392
  "met": int(met.values.min()),
393
- "utc": met_to_utc(met.values.min()).split(".")[0],
393
+ "met_in_utc": met_to_utc(met.values.min()).split(".")[0],
394
394
  "ttj2000ns": int(met_to_ttj2000ns(met.values.min())),
395
395
  "mag_4s_b_gse": [Decimal("0.0") for _ in range(3)],
396
396
  "mag_4s_b_gsm": [Decimal("0.0") for _ in range(3)],
@@ -0,0 +1,91 @@
1
+ """Functions to support I-ALiRT CoDICE processing."""
2
+
3
+ import logging
4
+ from decimal import Decimal
5
+ from typing import Any
6
+
7
+ import xarray as xr
8
+
9
+ from imap_processing.codice import constants
10
+ from imap_processing.ialirt.utils.time import calculate_time
11
+ from imap_processing.spice.time import met_to_ttj2000ns, met_to_utc
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ FILLVAL_FLOAT32 = Decimal(str(-1.0e31))
16
+
17
+
18
+ def process_codice(
19
+ dataset: xr.Dataset,
20
+ ) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
21
+ """
22
+ Create final data products.
23
+
24
+ Parameters
25
+ ----------
26
+ dataset : xr.Dataset
27
+ Decommed L0 data.
28
+
29
+ Returns
30
+ -------
31
+ codice_data : list[dict]
32
+ Dictionary of final data product.
33
+
34
+ Notes
35
+ -----
36
+ This function is incomplete and will need to be updated to include the
37
+ necessary calculations and data products.
38
+ - Calculate rates (assume 4 minutes per group)
39
+ - Calculate L2 CoDICE pseudodensities (pg 37 of Algorithm Document)
40
+ - Calculate the public data products
41
+ """
42
+ # For I-ALiRT SIT, the test data being used has all zeros and thus no
43
+ # groups can be found, thus there is no data to process
44
+ # TODO: Once I-ALiRT test data is acquired that actually has data in it,
45
+ # this can be turned back on
46
+ # codicelo_data = create_ialirt_dataset(CODICEAPID.COD_LO_IAL, dataset)
47
+ # codicehi_data = create_ialirt_dataset(CODICEAPID.COD_HI_IAL, dataset)
48
+
49
+ # TODO: calculate rates
50
+ # This will be done in codice.codice_l1b
51
+
52
+ # TODO: calculate L2 CoDICE pseudodensities
53
+ # This will be done in codice.codice_l2
54
+
55
+ # TODO: calculate the public data products
56
+ # This will be done in this module
57
+
58
+ # Create mock dataset for I-ALiRT SIT
59
+ # TODO: Once I-ALiRT test data is acquired that actually has data in it,
60
+ # we should be able to properly populate the I-ALiRT data, but for
61
+ # now, just create lists of dicts with FILLVALs
62
+ cod_lo_data = []
63
+ cod_hi_data = []
64
+
65
+ for epoch in range(len(dataset.epoch.data)):
66
+ sc_sclk_sec = dataset.sc_sclk_sec.data[epoch]
67
+ sc_sclk_sub_sec = dataset.sc_sclk_sub_sec.data[epoch]
68
+ met = calculate_time(sc_sclk_sec, sc_sclk_sub_sec, 256)
69
+ utc = met_to_utc(met).split(".")[0]
70
+ ttj2000ns = int(met_to_ttj2000ns(met))
71
+
72
+ epoch_data = {
73
+ "apid": int(dataset.pkt_apid[epoch].data),
74
+ "met": met,
75
+ "met_to_utc": utc,
76
+ "ttj2000ns": ttj2000ns,
77
+ }
78
+
79
+ # Add in CoDICE-Lo specific data
80
+ cod_lo_epoch_data = epoch_data.copy()
81
+ for field in constants.CODICE_LO_IAL_DATA_FIELDS:
82
+ cod_lo_epoch_data[f"codicelo_{field}"] = FILLVAL_FLOAT32
83
+ cod_lo_data.append(cod_lo_epoch_data)
84
+
85
+ # Add in CoDICE-Hi specific data
86
+ cod_hi_epoch_data = epoch_data.copy()
87
+ for field in constants.CODICE_HI_IAL_DATA_FIELDS:
88
+ cod_hi_epoch_data[f"codicehi_{field}"] = FILLVAL_FLOAT32
89
+ cod_hi_data.append(cod_hi_epoch_data)
90
+
91
+ return cod_lo_data, cod_hi_data
@@ -1,7 +1,6 @@
1
1
  """Functions to support HIT processing."""
2
2
 
3
3
  import logging
4
- from decimal import Decimal
5
4
 
6
5
  import numpy as np
7
6
  import xarray as xr
@@ -170,27 +169,19 @@ def process_hit(xarray_data: xr.Dataset) -> list[dict]:
170
169
  {
171
170
  "apid": 478,
172
171
  "met": int(met),
173
- "utc": met_to_utc(met).split(".")[0],
172
+ "met_in_utc": met_to_utc(met).split(".")[0],
174
173
  "ttj2000ns": int(met_to_ttj2000ns(met)),
175
- "hit_e_a_side_low_en": Decimal(
176
- str(l1["IALRT_RATE_1"] + l1["IALRT_RATE_2"])
177
- ),
178
- "hit_e_a_side_med_en": Decimal(
179
- str(l1["IALRT_RATE_5"] + l1["IALRT_RATE_6"])
180
- ),
181
- "hit_e_a_side_high_en": Decimal(str(l1["IALRT_RATE_7"])),
182
- "hit_e_b_side_low_en": Decimal(
183
- str(l1["IALRT_RATE_11"] + l1["IALRT_RATE_12"])
184
- ),
185
- "hit_e_b_side_med_en": Decimal(
186
- str(l1["IALRT_RATE_15"] + l1["IALRT_RATE_16"])
187
- ),
188
- "hit_e_b_side_high_en": Decimal(str(l1["IALRT_RATE_17"])),
189
- "hit_h_omni_med_en": Decimal(str(l1["H_12_15"] + l1["H_15_70"])),
190
- "hit_h_a_side_high_en": Decimal(str(l1["IALRT_RATE_8"])),
191
- "hit_h_b_side_high_en": Decimal(str(l1["IALRT_RATE_18"])),
192
- "hit_he_omni_low_en": Decimal(str(l1["HE4_06_08"])),
193
- "hit_he_omni_high_en": Decimal(str(l1["HE4_15_70"])),
174
+ "hit_e_a_side_low_en": int(l1["IALRT_RATE_1"] + l1["IALRT_RATE_2"]),
175
+ "hit_e_a_side_med_en": int(l1["IALRT_RATE_5"] + l1["IALRT_RATE_6"]),
176
+ "hit_e_a_side_high_en": int(l1["IALRT_RATE_7"]),
177
+ "hit_e_b_side_low_en": int(l1["IALRT_RATE_11"] + l1["IALRT_RATE_12"]),
178
+ "hit_e_b_side_med_en": int(l1["IALRT_RATE_15"] + l1["IALRT_RATE_16"]),
179
+ "hit_e_b_side_high_en": int(l1["IALRT_RATE_17"]),
180
+ "hit_h_omni_med_en": int(l1["H_12_15"] + l1["H_15_70"]),
181
+ "hit_h_a_side_high_en": int(l1["IALRT_RATE_8"]),
182
+ "hit_h_b_side_high_en": int(l1["IALRT_RATE_18"]),
183
+ "hit_he_omni_low_en": int(l1["HE4_06_08"]),
184
+ "hit_he_omni_high_en": int(l1["HE4_15_70"]),
194
185
  }
195
186
  )
196
187
 
@@ -1,20 +1,144 @@
1
1
  """Functions to support I-ALiRT SWAPI processing."""
2
2
 
3
3
  import logging
4
+ from decimal import Decimal
5
+ from typing import Optional
4
6
 
5
7
  import numpy as np
8
+ import pandas as pd
6
9
  import xarray as xr
7
- from xarray import DataArray
10
+ from scipy.optimize import curve_fit
11
+ from scipy.special import erf
8
12
 
13
+ from imap_processing import imap_module_directory
14
+ from imap_processing.ialirt.constants import IalirtSwapiConstants as Consts
9
15
  from imap_processing.ialirt.utils.grouping import find_groups
10
-
11
- # from imap_processing.swapi.l1.swapi_l1 import process_sweep_data
12
- # from imap_processing.swapi.l2.swapi_l2 import TIME_PER_BIN
16
+ from imap_processing.ialirt.utils.time import calculate_time
17
+ from imap_processing.spice.time import met_to_ttj2000ns, met_to_utc
18
+ from imap_processing.swapi.l1.swapi_l1 import process_sweep_data
19
+ from imap_processing.swapi.l2.swapi_l2 import TIME_PER_BIN
13
20
 
14
21
  logger = logging.getLogger(__name__)
15
22
 
16
23
 
17
- def process_swapi_ialirt(unpacked_data: xr.Dataset) -> dict[str, DataArray]:
24
+ def count_rate(
25
+ energy_pass: float, speed: float, density: float, temp: float
26
+ ) -> float | np.ndarray:
27
+ """
28
+ Compute SWAPI count rate for provided energy passband, speed, density and temp.
29
+
30
+ This model for coincidence count rate was developed by the SWAPI instrument
31
+ science team, detailed on page 52 of the IMAP SWAPI Instrument Algorithms Document.
32
+
33
+ Parameters
34
+ ----------
35
+ energy_pass : float
36
+ Energy passband [eV].
37
+ speed : float
38
+ Bulk solar wind speed [km/s].
39
+ density : float
40
+ Proton density [cm^-3].
41
+ temp : float
42
+ Temperature [K].
43
+
44
+ Returns
45
+ -------
46
+ count_rate : float | np.ndarray
47
+ Particle coincidence count rate.
48
+ """
49
+ # thermal velocity of solar wind ions
50
+ thermal_velocity = np.sqrt(2 * Consts.boltz * temp / Consts.prot_mass)
51
+ beta = 1 / (thermal_velocity**2)
52
+ # convert energy to Joules
53
+ center_speed = np.sqrt(2 * energy_pass * 1.60218e-19 / Consts.prot_mass)
54
+ speed = speed * 1000 # convert km/s to m/s
55
+ density = density * 1e6 # convert 1/cm**3 to 1/m**3
56
+
57
+ return (
58
+ (density * Consts.eff_area * (beta / np.pi) ** (3 / 2))
59
+ * (np.exp(-beta * (center_speed**2 + speed**2 - 2 * center_speed * speed)))
60
+ * np.sqrt(np.pi / (beta * speed * center_speed))
61
+ * erf(np.sqrt(beta * speed * center_speed) * (Consts.az_fov / 2))
62
+ * (
63
+ center_speed**4
64
+ * Consts.speed_ew
65
+ * np.arcsin(thermal_velocity / center_speed)
66
+ )
67
+ )
68
+
69
+
70
+ def optimize_pseudo_parameters(
71
+ count_rates: np.ndarray,
72
+ count_rate_error: np.ndarray,
73
+ energy_passbands: Optional[np.ndarray] = None,
74
+ ) -> (dict)[str, list[float]]:
75
+ """
76
+ Find the pseudo speed (u), density (n) and temperature (T) of solar wind particles.
77
+
78
+ Fit a curve to calculated count rate values as a function of energy passband.
79
+
80
+ Parameters
81
+ ----------
82
+ count_rates : np.ndarray
83
+ Particle coincidence count rates.
84
+ count_rate_error : np.ndarray
85
+ Standard deviation of the coincidence count rates parameter.
86
+ energy_passbands : np.ndarray, default None
87
+ Energy passbands, passed in only for testing purposes.
88
+
89
+ Returns
90
+ -------
91
+ solution_dict : dict
92
+ Dictionary containing the optimized speed, density, and temperature values for
93
+ each sweep included in the input count_rates array.
94
+ """
95
+ if not energy_passbands:
96
+ # Read in energy passbands
97
+ energy_data = pd.read_csv(
98
+ f"{imap_module_directory}/tests/swapi/lut/imap_swapi_esa-unit"
99
+ f"-conversion_20250211_v000.csv"
100
+ )
101
+ energy_passbands = (
102
+ energy_data["Energy"][0:63]
103
+ .replace(",", "", regex=True)
104
+ .to_numpy()
105
+ .astype(float)
106
+ )
107
+
108
+ # Initial guess pulled from page 52 of the IMAP SWAPI Instrument Algorithms Document
109
+ initial_param_guess = np.array([550, 5.27, 1e5])
110
+ solution_dict = { # type: ignore
111
+ "pseudo_speed": [],
112
+ "pseudo_density": [],
113
+ "pseudo_temperature": [],
114
+ }
115
+
116
+ for sweep in np.arange(count_rates.shape[0]):
117
+ current_sweep_count_rates = count_rates[sweep, :]
118
+ current_sweep_count_rate_errors = count_rate_error[sweep, :]
119
+ # Find the max count rate, and use the 6 points surrounding it (inclusive)
120
+ max_index = np.argmax(current_sweep_count_rates)
121
+ sol = curve_fit(
122
+ f=count_rate,
123
+ xdata=energy_passbands.take(
124
+ range(max_index - 3, max_index + 3), mode="wrap"
125
+ ),
126
+ ydata=current_sweep_count_rates.take(
127
+ range(max_index - 3, max_index + 3), mode="wrap"
128
+ ),
129
+ sigma=current_sweep_count_rate_errors.take(
130
+ range(max_index - 3, max_index + 3), mode="wrap"
131
+ ),
132
+ p0=initial_param_guess,
133
+ )
134
+ solution_dict["pseudo_speed"].append(sol[0][0])
135
+ solution_dict["pseudo_density"].append(sol[0][1])
136
+ solution_dict["pseudo_temperature"].append(sol[0][2])
137
+
138
+ return solution_dict
139
+
140
+
141
+ def process_swapi_ialirt(unpacked_data: xr.Dataset) -> list[dict]:
18
142
  """
19
143
  Extract I-ALiRT variables and calculate coincidence count rate.
20
144
 
@@ -32,7 +156,21 @@ def process_swapi_ialirt(unpacked_data: xr.Dataset) -> dict[str, DataArray]:
32
156
 
33
157
  sci_dataset = unpacked_data.sortby("epoch", ascending=True)
34
158
 
35
- grouped_dataset = find_groups(sci_dataset, (0, 11), "swapi_seq_number", "swapi_acq")
159
+ met = calculate_time(
160
+ sci_dataset["sc_sclk_sec"], sci_dataset["sc_sclk_sub_sec"], 256
161
+ )
162
+
163
+ # Add required parameters.
164
+ sci_dataset["met"] = met
165
+ met_values = []
166
+
167
+ grouped_dataset = find_groups(sci_dataset, (0, 11), "swapi_seq_number", "met")
168
+
169
+ if grouped_dataset.group.size == 0:
170
+ logger.warning(
171
+ "There was an issue with the SWAPI grouping process, returning empty data."
172
+ )
173
+ return [{}]
36
174
 
37
175
  for group in np.unique(grouped_dataset["group"]):
38
176
  # Sequence values for the group should be 0-11 with no duplicates.
@@ -40,6 +178,10 @@ def process_swapi_ialirt(unpacked_data: xr.Dataset) -> dict[str, DataArray]:
40
178
  (grouped_dataset["group"] == group)
41
179
  ]
42
180
 
181
+ met_values.append(
182
+ int(grouped_dataset["met"][(grouped_dataset["group"] == group).values][0])
183
+ )
184
+
43
185
  # Ensure no duplicates and all values from 0 to 11 are present
44
186
  if not np.array_equal(seq_values.astype(int), np.arange(12)):
45
187
  logger.info(
@@ -48,22 +190,29 @@ def process_swapi_ialirt(unpacked_data: xr.Dataset) -> dict[str, DataArray]:
48
190
  )
49
191
  continue
50
192
 
51
- total_packets = len(grouped_dataset["swapi_seq_number"].data)
52
-
53
- # It takes 12 sequence data to make one full SWAPI sweep
54
- total_sequence = 12
55
- total_full_sweeps = total_packets // total_sequence
56
-
57
- met_values = grouped_dataset["swapi_shcoarse"].data.reshape(total_full_sweeps, 12)[
58
- :, 0
59
- ]
60
-
61
- # raw_coin_count = process_sweep_data(grouped_dataset, "coin_cnt")
62
- # raw_coin_rate = raw_coin_count / TIME_PER_BIN
63
-
64
- swapi_data = {
65
- "met": met_values
66
- # more variables to go here
67
- }
193
+ raw_coin_count = process_sweep_data(grouped_dataset, "swapi_coin_cnt")
194
+ raw_coin_rate = raw_coin_count / TIME_PER_BIN
195
+ count_rate_error = np.sqrt(raw_coin_count) / TIME_PER_BIN
196
+
197
+ solution = optimize_pseudo_parameters(raw_coin_rate, count_rate_error)
198
+
199
+ swapi_data = []
200
+
201
+ for entry in np.arange(0, len(solution["pseudo_speed"])):
202
+ swapi_data.append(
203
+ {
204
+ "apid": 478,
205
+ "met": met_values[entry],
206
+ "met_in_utc": met_to_utc(met_values[entry]).split(".")[0],
207
+ "ttj2000ns": int(met_to_ttj2000ns(met_values[entry])),
208
+ "swapi_pseudo_proton_speed": Decimal(solution["pseudo_speed"][entry]),
209
+ "swapi_pseudo_proton_density": Decimal(
210
+ solution["pseudo_density"][entry]
211
+ ),
212
+ "swapi_pseudo_proton_temperature": Decimal(
213
+ solution["pseudo_temperature"][entry]
214
+ ),
215
+ }
216
+ )
68
217
 
69
218
  return swapi_data
@@ -1,7 +1,6 @@
1
1
  """Functions to support I-ALiRT SWE processing."""
2
2
 
3
3
  import logging
4
- from decimal import Decimal
5
4
 
6
5
  import numpy as np
7
6
  import pandas as pd
@@ -547,16 +546,10 @@ def process_swe(accumulated_data: xr.Dataset, in_flight_cal_files: list) -> list
547
546
  {
548
547
  "apid": 478,
549
548
  "met": int(grouped["met"].min()),
550
- "utc": met_to_utc(grouped["met"].min()).split(".")[0],
549
+ "met_in_utc": met_to_utc(grouped["met"].min()).split(".")[0],
551
550
  "ttj2000ns": int(met_to_ttj2000ns(grouped["met"].min())),
552
- **{
553
- f"swe_normalized_counts_half_1_esa_{i}": Decimal(str(val))
554
- for i, val in enumerate(summed_first)
555
- },
556
- **{
557
- f"swe_normalized_counts_half_2_esa_{i}": Decimal(str(val))
558
- for i, val in enumerate(summed_second)
559
- },
551
+ "swe_normalized_counts_half_1_esa": [int(val) for val in summed_first],
552
+ "swe_normalized_counts_half_2_esa": [int(val) for val in summed_second],
560
553
  "swe_counterstreaming_electrons": max(bde_first_half, bde_second_half),
561
554
  }
562
555
  )
@@ -1,6 +1,20 @@
1
1
  """Keys for I-ALiRT data products."""
2
2
 
3
3
  IALIRT_KEYS = [
4
+ # H intensities in 15 energy ranges and binned into 4 azimuths and 4 spin angle bins
5
+ "codicehi_h",
6
+ # C/O abundance ratio
7
+ "codicelo_c_over_o_abundance",
8
+ # Mg/O abundance ratio
9
+ "codicelo_mg_over_o_abundance",
10
+ # Fe/O abundance ratio
11
+ "codicelo_fe_over_o_abundance",
12
+ # C+6/C+5 charge state ratio
13
+ "codicelo_c_plus_6_over_c_plus_5_ratio",
14
+ # O+7/O+6 charge state ratio
15
+ "codicelo_o_plus_7_over_o_plus_6_ratio",
16
+ # Fe low/Fe high charge state ratio
17
+ "codicelo_fe_low_over_fe_high_ratio",
4
18
  # Low energy (~300 keV) electrons (A-side)
5
19
  "hit_e_a_side_low_en",
6
20
  # Medium energy (~3 MeV) electrons (A-side)
@@ -40,9 +54,9 @@ IALIRT_KEYS = [
40
54
  # Pseudo temperature of solar wind protons in plasma frame
41
55
  "swapi_pseudo_proton_temperature",
42
56
  # SWE Normalized Counts - Half Cycle 1
43
- *[f"swe_normalized_counts_half_1_esa_{i}" for i in range(8)],
57
+ "swe_normalized_counts_half_1",
44
58
  # SWE Normalized Counts - Half Cycle 2
45
- *[f"swe_normalized_counts_half_2_esa_{i}" for i in range(8)],
59
+ "swe_normalized_counts_half_2",
46
60
  # SWE Counterstreaming flag
47
61
  "swe_counterstreaming_electrons",
48
62
  ]
@@ -7,7 +7,7 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
7
7
  from imap_processing.ialirt.utils.constants import IALIRT_KEYS
8
8
 
9
9
 
10
- def create_xarray_from_records(records: list[dict]) -> xr.Dataset:
10
+ def create_xarray_from_records(records: list[dict]) -> xr.Dataset: # noqa: PLR0912
11
11
  """
12
12
  Create dataset from a list of records.
13
13
 
@@ -39,16 +39,51 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset:
39
39
  data=ttj2000ns_values,
40
40
  name="epoch",
41
41
  dims=["epoch"],
42
- attrs=cdf_manager.get_variable_attributes("epoch"),
42
+ attrs=cdf_manager.get_variable_attributes("epoch", check_schema=False),
43
43
  )
44
44
  component = xr.DataArray(
45
45
  ["x", "y", "z"],
46
46
  name="component",
47
47
  dims=["component"],
48
- attrs=cdf_manager.get_variable_attributes("component"),
48
+ attrs=cdf_manager.get_variable_attributes("component", check_schema=False),
49
49
  )
50
50
 
51
- coords = {"epoch": epoch, "component": component}
51
+ esa_step = xr.DataArray(
52
+ data=np.arange(8, dtype=np.uint8),
53
+ name="esa_step",
54
+ dims=["esa_step"],
55
+ attrs=cdf_manager.get_variable_attributes("esa_step", check_schema=False),
56
+ )
57
+
58
+ energy_ranges = xr.DataArray(
59
+ data=np.arange(15, dtype=np.uint8),
60
+ name="energy_ranges",
61
+ dims=["energy_ranges"],
62
+ attrs=cdf_manager.get_variable_attributes("energy_ranges", check_schema=False),
63
+ )
64
+
65
+ azimuth = xr.DataArray(
66
+ data=np.arange(4, dtype=np.uint8),
67
+ name="azimuth",
68
+ dims=["azimuth"],
69
+ attrs=cdf_manager.get_variable_attributes("azimuth", check_schema=False),
70
+ )
71
+
72
+ spin_angle_bin = xr.DataArray(
73
+ data=np.arange(4, dtype=np.uint8),
74
+ name="spin_angle_bin",
75
+ dims=["spin_angle_bin"],
76
+ attrs=cdf_manager.get_variable_attributes("spin_angle_bin", check_schema=False),
77
+ )
78
+
79
+ coords = {
80
+ "epoch": epoch,
81
+ "component": component,
82
+ "esa_step": esa_step,
83
+ "energy_ranges": energy_ranges,
84
+ "azimuth": azimuth,
85
+ "spin_angle_bin": spin_angle_bin,
86
+ }
52
87
  dataset = xr.Dataset(
53
88
  coords=coords,
54
89
  attrs=cdf_manager.get_global_attributes("imap_ialirt_l1_realtime"),
@@ -56,17 +91,25 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset:
56
91
 
57
92
  # Create empty dataset for each key.
58
93
  for key in instrument_keys:
59
- attrs = cdf_manager.get_variable_attributes(key)
94
+ attrs = cdf_manager.get_variable_attributes(key, check_schema=False)
60
95
  fillval = attrs.get("FILLVAL")
61
96
  if key.startswith("mag"):
62
97
  data = np.full((n, 3), fillval, dtype=np.float32)
63
98
  dims = ["epoch", "component"]
64
99
  dataset[key] = xr.DataArray(data, dims=dims, attrs=attrs)
100
+ elif key.startswith("codicehi"):
101
+ data = np.full((n, 15, 4, 4), fillval, dtype=np.float32)
102
+ dims = ["epoch", "energy", "azimuth", "spin_angle_bin"]
103
+ dataset[key] = xr.DataArray(data, dims=dims, attrs=attrs)
65
104
  elif key == "swe_counterstreaming_electrons":
66
105
  data = np.full(n, fillval, dtype=np.uint8)
67
106
  dims = ["epoch"]
68
107
  dataset[key] = xr.DataArray(data, dims=dims, attrs=attrs)
69
- elif key.startswith(("hit", "swe")):
108
+ elif key.startswith("swe"):
109
+ data = np.full((n, 8), fillval, dtype=np.uint32)
110
+ dims = ["epoch", "esa_step"]
111
+ dataset[key] = xr.DataArray(data, dims=dims, attrs=attrs)
112
+ elif key.startswith("hit"):
70
113
  data = np.full(n, fillval, dtype=np.uint32)
71
114
  dims = ["epoch"]
72
115
  dataset[key] = xr.DataArray(data, dims=dims, attrs=attrs)
@@ -77,11 +120,16 @@ def create_xarray_from_records(records: list[dict]) -> xr.Dataset:
77
120
 
78
121
  # Populate the dataset variables
79
122
  for i, record in enumerate(records):
80
- for key in record.keys():
81
- val = record[key]
82
- if key.startswith("mag"):
83
- dataset[key].data[i] = [direction for direction in val]
84
- elif key in instrument_keys:
123
+ for key, val in record.items():
124
+ if key in ["apid", "met", "met_in_utc", "ttj2000ns"]:
125
+ continue
126
+ elif key.startswith("mag"):
127
+ dataset[key].data[i, :] = val
128
+ elif key.startswith("swe_normalized_counts"):
129
+ dataset[key].data[i, :] = val
130
+ elif key.startswith("codicehi"):
131
+ dataset[key].data[i, :, :, :] = val
132
+ else:
85
133
  dataset[key].data[i] = val
86
134
 
87
135
  return dataset