imap-processing 0.14.0__py3-none-any.whl → 0.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (81) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +60 -35
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +765 -287
  4. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +1577 -288
  5. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1004 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +28 -0
  7. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +1 -1
  8. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +18 -0
  9. imap_processing/cdf/config/imap_glows_l2_variable_attrs.yaml +39 -3
  10. imap_processing/cdf/config/imap_ialirt_global_cdf_attrs.yaml +18 -0
  11. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +353 -0
  12. imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +7 -0
  13. imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +11 -0
  14. imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +4 -0
  15. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +7 -3
  16. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +6 -0
  17. imap_processing/cdf/config/imap_mag_l2_variable_attrs.yaml +114 -0
  18. imap_processing/cdf/config/imap_swe_global_cdf_attrs.yaml +11 -5
  19. imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +23 -1
  20. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +4 -0
  21. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +2 -2
  22. imap_processing/cli.py +145 -80
  23. imap_processing/codice/codice_l1a.py +140 -84
  24. imap_processing/codice/codice_l1b.py +91 -18
  25. imap_processing/codice/codice_l2.py +81 -0
  26. imap_processing/codice/constants.py +68 -0
  27. imap_processing/ena_maps/ena_maps.py +43 -1
  28. imap_processing/glows/l2/glows_l2_data.py +3 -6
  29. imap_processing/hi/hi_l1a.py +447 -0
  30. imap_processing/hi/{l1b/hi_l1b.py → hi_l1b.py} +1 -1
  31. imap_processing/hi/{l1c/hi_l1c.py → hi_l1c.py} +21 -21
  32. imap_processing/hi/{l2/hi_l2.py → hi_l2.py} +13 -13
  33. imap_processing/hi/utils.py +6 -6
  34. imap_processing/hit/l1b/hit_l1b.py +30 -11
  35. imap_processing/ialirt/constants.py +38 -0
  36. imap_processing/ialirt/l0/parse_mag.py +1 -1
  37. imap_processing/ialirt/l0/process_codice.py +91 -0
  38. imap_processing/ialirt/l0/process_hit.py +12 -21
  39. imap_processing/ialirt/l0/process_swapi.py +172 -23
  40. imap_processing/ialirt/l0/process_swe.py +3 -10
  41. imap_processing/ialirt/utils/constants.py +62 -0
  42. imap_processing/ialirt/utils/create_xarray.py +135 -0
  43. imap_processing/idex/idex_l2c.py +9 -9
  44. imap_processing/lo/l1b/lo_l1b.py +6 -1
  45. imap_processing/lo/l1c/lo_l1c.py +22 -13
  46. imap_processing/lo/l2/lo_l2.py +213 -0
  47. imap_processing/mag/l1c/mag_l1c.py +8 -1
  48. imap_processing/mag/l2/mag_l2.py +6 -2
  49. imap_processing/mag/l2/mag_l2_data.py +7 -5
  50. imap_processing/swe/l1a/swe_l1a.py +6 -6
  51. imap_processing/swe/l1b/swe_l1b.py +70 -11
  52. imap_processing/ultra/l0/decom_ultra.py +1 -1
  53. imap_processing/ultra/l0/ultra_utils.py +0 -4
  54. imap_processing/ultra/l1b/badtimes.py +7 -3
  55. imap_processing/ultra/l1b/cullingmask.py +7 -2
  56. imap_processing/ultra/l1b/de.py +26 -12
  57. imap_processing/ultra/l1b/lookup_utils.py +8 -7
  58. imap_processing/ultra/l1b/ultra_l1b.py +59 -48
  59. imap_processing/ultra/l1b/ultra_l1b_culling.py +50 -18
  60. imap_processing/ultra/l1b/ultra_l1b_extended.py +4 -4
  61. imap_processing/ultra/l1c/helio_pset.py +53 -0
  62. imap_processing/ultra/l1c/spacecraft_pset.py +20 -12
  63. imap_processing/ultra/l1c/ultra_l1c.py +49 -26
  64. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +40 -2
  65. imap_processing/ultra/l2/ultra_l2.py +47 -2
  66. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +524 -526
  67. imap_processing/ultra/utils/ultra_l1_utils.py +51 -10
  68. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/METADATA +2 -2
  69. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/RECORD +72 -69
  70. imap_processing/hi/l1a/__init__.py +0 -0
  71. imap_processing/hi/l1a/hi_l1a.py +0 -98
  72. imap_processing/hi/l1a/histogram.py +0 -152
  73. imap_processing/hi/l1a/science_direct_event.py +0 -214
  74. imap_processing/hi/l1b/__init__.py +0 -0
  75. imap_processing/hi/l1c/__init__.py +0 -0
  76. imap_processing/hi/l2/__init__.py +0 -0
  77. imap_processing/ialirt/l0/process_codicehi.py +0 -156
  78. imap_processing/ialirt/l0/process_codicelo.py +0 -41
  79. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/LICENSE +0 -0
  80. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/WHEEL +0 -0
  81. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/entry_points.txt +0 -0
@@ -27,11 +27,11 @@ def hi_l2(
27
27
 
28
28
  Parameters
29
29
  ----------
30
- psets : list of str or Path
30
+ psets : list of str or pathlib.Path
31
31
  List of input PSETs to make a map from.
32
- geometric_factors_path : str or Path
32
+ geometric_factors_path : str or pathlib.Path
33
33
  Where to get the geometric factors from.
34
- esa_energies_path : str or Path
34
+ esa_energies_path : str or pathlib.Path
35
35
  Where to get the energies from.
36
36
  descriptor : str
37
37
  Output filename descriptor. Contains full configuration for the options
@@ -39,7 +39,7 @@ def hi_l2(
39
39
 
40
40
  Returns
41
41
  -------
42
- l2_dataset : list[xr.Dataset]
42
+ l2_dataset : list[xarray.Dataset]
43
43
  Level 2 IMAP-Hi dataset ready to be written to a CDF file.
44
44
  """
45
45
  # TODO: parse descriptor to determine map configuration
@@ -77,19 +77,19 @@ def generate_hi_map(
77
77
 
78
78
  Parameters
79
79
  ----------
80
- psets : list of str or Path
80
+ psets : list of str or pathlib.Path
81
81
  List of input PSETs to make a map from.
82
- geometric_factors_path : str or Path
82
+ geometric_factors_path : str or pathlib.Path
83
83
  Where to get the geometric factors from.
84
- esa_energies_path : str or Path
84
+ esa_energies_path : str or pathlib.Path
85
85
  Where to get the energies from.
86
- cg_corrected : bool, optional
86
+ cg_corrected : bool, Optional
87
87
  Whether to apply Compton-Getting correction to the energies. Defaults to
88
88
  False.
89
- direction : str, optional
89
+ direction : str, Optional
90
90
  Apply filtering to PSET data include ram or anti-ram or full spin data.
91
91
  Defaults to "full".
92
- map_spacing : int, optional
92
+ map_spacing : int, Optional
93
93
  Pixel spacing, in degrees, of the output map in degrees. Defaults to 4.
94
94
 
95
95
  Returns
@@ -209,11 +209,11 @@ def calculate_ena_intensity(
209
209
 
210
210
  Parameters
211
211
  ----------
212
- map_ds : xr.Dataset
212
+ map_ds : xarray.Dataset
213
213
  Map dataset that has ena_signal_rate fields calculated.
214
- geometric_factors_path : str or Path
214
+ geometric_factors_path : str or pathlib.Path
215
215
  Where to get the geometric factors from.
216
- esa_energies_path : str or Path
216
+ esa_energies_path : str or pathlib.Path
217
217
  Where to get the energies from.
218
218
 
219
219
  Returns
@@ -116,15 +116,15 @@ def full_dataarray(
116
116
  Variable name.
117
117
  attrs : dict
118
118
  CDF variable attributes. Usually retrieved from ImapCdfAttributes.
119
- coords : dict, optional
119
+ coords : dict, Optional
120
120
  Coordinate variables for the Dataset. This function will extract the
121
121
  sizes of each dimension defined by the attributes dictionary to determine
122
122
  the size of the DataArray to be created.
123
- shape : int or tuple, optional
123
+ shape : int or tuple, Optional
124
124
  Shape of ndarray data array to instantiate in the xarray.DataArray. If
125
125
  shape is provided, the DataArray created will have this shape regardless
126
126
  of whether coordinates are provided or not.
127
- fill_value : optional, float
127
+ fill_value : Optional, float
128
128
  Override the fill value that the DataArray will be filled with. If not
129
129
  supplied, the "FILLVAL" value from `attrs` will be used.
130
130
 
@@ -171,15 +171,15 @@ def create_dataset_variables(
171
171
  ----------
172
172
  variable_names : list[str]
173
173
  List of variable names to create.
174
- variable_shape : int or sequence of int, optional
174
+ variable_shape : int or Sequence of int, Optional
175
175
  Shape of the new variables data ndarray. If not provided the shape will
176
176
  attempt to be derived from the coords dictionary.
177
- coords : dict, optional
177
+ coords : dict, Optional
178
178
  Coordinate variables for the Dataset. If `variable_shape` is not provided
179
179
  the dataset variables created will use this dictionary along with variable
180
180
  attributes from the CdfAttributeManager to determine the shapes of the
181
181
  dataset variables created.
182
- fill_value : optional, number
182
+ fill_value : Optional, float
183
183
  Value to fill the new variables data arrays with. If not supplied,
184
184
  the fill value is pulled from the CDF variable attributes "FILLVAL"
185
185
  attribute.
@@ -366,7 +366,8 @@ def subset_data_for_sectored_counts(
366
366
  A set of sectored data starts with hydrogen and ends with iron and correspond to
367
367
  the mod 10 values 0-9. The livetime values from the previous 10 minutes are used
368
368
  to calculate the rates for each set since those counts are transmitted 10 minutes
369
- after they were collected.
369
+ after they were collected. Therefore, only complete sets of sectored counts where
370
+ livetime from the previous 10 minutes are available are included in the output.
370
371
 
371
372
  Parameters
372
373
  ----------
@@ -378,7 +379,7 @@ def subset_data_for_sectored_counts(
378
379
  Returns
379
380
  -------
380
381
  tuple[xr.Dataset, xr.DataArray]
381
- Subsetted L1A counts dataset and corresponding livetime values.
382
+ Dataset of complete sectored counts and corresponding livetime values.
382
383
  """
383
384
  # Identify 10-minute intervals of complete sectored counts.
384
385
  bin_size = 10
@@ -392,16 +393,34 @@ def subset_data_for_sectored_counts(
392
393
  start_indices = np.where(matches)[0]
393
394
 
394
395
  # Filter out start indices that are less than or equal to the bin size
395
- # since the previous 10 minutes are needed
396
- start_indices = start_indices[start_indices > bin_size]
397
- data_slice = slice(start_indices[0], start_indices[-1] + bin_size)
398
-
399
- # Subset data to include only complete sets of sectored counts
400
- l1b_sectored_rates_dataset = l1a_counts_dataset.isel(epoch=data_slice)
396
+ # since the previous 10 minutes are needed for calculating rates
397
+ if start_indices.size == 0:
398
+ logger.error(
399
+ "No data to process - valid start indices not found for "
400
+ "complete sectored counts."
401
+ )
402
+ raise ValueError("No valid start indices found for complete sectored counts.")
403
+ else:
404
+ start_indices = start_indices[start_indices >= bin_size]
405
+
406
+ # Subset data for complete sets of sectored counts.
407
+ # Each set of sectored counts is 10 minutes long, so we take the indices
408
+ # starting from the start indices and extend to the bin size of 10.
409
+ # This creates a 1D array of indices that correspond to the complete
410
+ # sets of sectored counts which is used to filter the L1A dataset and
411
+ # create the L1B sectored rates dataset.
412
+ data_indices = np.concatenate(
413
+ [np.arange(idx, idx + bin_size) for idx in start_indices]
414
+ )
415
+ l1b_sectored_rates_dataset = l1a_counts_dataset.isel(epoch=data_indices)
401
416
 
402
- # Subset livetime staggered from sectored counts by 10 minutes
403
- livetime_slice = slice(start_indices[0] - bin_size, start_indices[-1])
404
- livetime = livetime[livetime_slice]
417
+ # Subset livetime values corresponding to the previous 10 minutes
418
+ # for each start index. This ensures the livetime data aligns correctly
419
+ # with the sectored counts for rate calculations.
420
+ livetime_indices = np.concatenate(
421
+ [np.arange(idx - bin_size, idx) for idx in start_indices]
422
+ )
423
+ livetime = livetime.isel(epoch=livetime_indices)
405
424
 
406
425
  return l1b_sectored_rates_dataset, livetime
407
426
 
@@ -0,0 +1,38 @@
1
+ """Module for constants and useful shared classes used in I-ALiRT processing."""
2
+
3
+ from dataclasses import dataclass
4
+
5
+ import numpy as np
6
+
7
+
8
+ @dataclass(frozen=True)
9
+ class IalirtSwapiConstants:
10
+ """
11
+ Constants for I-ALiRT SWAPI which can be used across different levels or classes.
12
+
13
+ Attributes
14
+ ----------
15
+ BOLTZ: float
16
+ Boltzmann constant [J/K]
17
+ AT_MASS: float
18
+ Atomic mass [kg]
19
+ PROT_MASS: float
20
+ Mass of proton [kg]
21
+ EFF_AREA: float
22
+ Instrument effective area [m^2]
23
+ AZ_FOV: float
24
+ Azimuthal width of the field of view for solar wind [radians]
25
+ FWHM_WIDTH: float
26
+ Full Width at Half Maximum of energy width [unitless]
27
+ SPEED_EW: float
28
+ Speed width of energy passband [unitless]
29
+ """
30
+
31
+ # Scientific constants used in optimization model
32
+ boltz = 1.380649e-23 # Boltzmann constant, J/K
33
+ at_mass = 1.6605390666e-27 # atomic mass, kg
34
+ prot_mass = 1.007276466621 * at_mass # mass of proton, kg
35
+ eff_area = 3.3e-5 * 1e-4 # effective area, meters squared
36
+ az_fov = np.deg2rad(30) # azimuthal width of the field of view, radians
37
+ fwhm_width = 0.085 # FWHM of energy width
38
+ speed_ew = 0.5 * fwhm_width # speed width of energy passband
@@ -390,7 +390,7 @@ def process_packet(
390
390
  {
391
391
  "apid": 478,
392
392
  "met": int(met.values.min()),
393
- "utc": met_to_utc(met.values.min()).split(".")[0],
393
+ "met_in_utc": met_to_utc(met.values.min()).split(".")[0],
394
394
  "ttj2000ns": int(met_to_ttj2000ns(met.values.min())),
395
395
  "mag_4s_b_gse": [Decimal("0.0") for _ in range(3)],
396
396
  "mag_4s_b_gsm": [Decimal("0.0") for _ in range(3)],
@@ -0,0 +1,91 @@
1
+ """Functions to support I-ALiRT CoDICE processing."""
2
+
3
+ import logging
4
+ from decimal import Decimal
5
+ from typing import Any
6
+
7
+ import xarray as xr
8
+
9
+ from imap_processing.codice import constants
10
+ from imap_processing.ialirt.utils.time import calculate_time
11
+ from imap_processing.spice.time import met_to_ttj2000ns, met_to_utc
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ FILLVAL_FLOAT32 = Decimal(str(-1.0e31))
16
+
17
+
18
+ def process_codice(
19
+ dataset: xr.Dataset,
20
+ ) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
21
+ """
22
+ Create final data products.
23
+
24
+ Parameters
25
+ ----------
26
+ dataset : xr.Dataset
27
+ Decommed L0 data.
28
+
29
+ Returns
30
+ -------
31
+ codice_data : list[dict]
32
+ Dictionary of final data product.
33
+
34
+ Notes
35
+ -----
36
+ This function is incomplete and will need to be updated to include the
37
+ necessary calculations and data products.
38
+ - Calculate rates (assume 4 minutes per group)
39
+ - Calculate L2 CoDICE pseudodensities (pg 37 of Algorithm Document)
40
+ - Calculate the public data products
41
+ """
42
+ # For I-ALiRT SIT, the test data being used has all zeros and thus no
43
+ # groups can be found, thus there is no data to process
44
+ # TODO: Once I-ALiRT test data is acquired that actually has data in it,
45
+ # this can be turned back on
46
+ # codicelo_data = create_ialirt_dataset(CODICEAPID.COD_LO_IAL, dataset)
47
+ # codicehi_data = create_ialirt_dataset(CODICEAPID.COD_HI_IAL, dataset)
48
+
49
+ # TODO: calculate rates
50
+ # This will be done in codice.codice_l1b
51
+
52
+ # TODO: calculate L2 CoDICE pseudodensities
53
+ # This will be done in codice.codice_l2
54
+
55
+ # TODO: calculate the public data products
56
+ # This will be done in this module
57
+
58
+ # Create mock dataset for I-ALiRT SIT
59
+ # TODO: Once I-ALiRT test data is acquired that actually has data in it,
60
+ # we should be able to properly populate the I-ALiRT data, but for
61
+ # now, just create lists of dicts with FILLVALs
62
+ cod_lo_data = []
63
+ cod_hi_data = []
64
+
65
+ for epoch in range(len(dataset.epoch.data)):
66
+ sc_sclk_sec = dataset.sc_sclk_sec.data[epoch]
67
+ sc_sclk_sub_sec = dataset.sc_sclk_sub_sec.data[epoch]
68
+ met = calculate_time(sc_sclk_sec, sc_sclk_sub_sec, 256)
69
+ utc = met_to_utc(met).split(".")[0]
70
+ ttj2000ns = int(met_to_ttj2000ns(met))
71
+
72
+ epoch_data = {
73
+ "apid": int(dataset.pkt_apid[epoch].data),
74
+ "met": met,
75
+ "met_to_utc": utc,
76
+ "ttj2000ns": ttj2000ns,
77
+ }
78
+
79
+ # Add in CoDICE-Lo specific data
80
+ cod_lo_epoch_data = epoch_data.copy()
81
+ for field in constants.CODICE_LO_IAL_DATA_FIELDS:
82
+ cod_lo_epoch_data[f"codicelo_{field}"] = FILLVAL_FLOAT32
83
+ cod_lo_data.append(cod_lo_epoch_data)
84
+
85
+ # Add in CoDICE-Hi specific data
86
+ cod_hi_epoch_data = epoch_data.copy()
87
+ for field in constants.CODICE_HI_IAL_DATA_FIELDS:
88
+ cod_hi_epoch_data[f"codicehi_{field}"] = FILLVAL_FLOAT32
89
+ cod_hi_data.append(cod_hi_epoch_data)
90
+
91
+ return cod_lo_data, cod_hi_data
@@ -1,7 +1,6 @@
1
1
  """Functions to support HIT processing."""
2
2
 
3
3
  import logging
4
- from decimal import Decimal
5
4
 
6
5
  import numpy as np
7
6
  import xarray as xr
@@ -170,27 +169,19 @@ def process_hit(xarray_data: xr.Dataset) -> list[dict]:
170
169
  {
171
170
  "apid": 478,
172
171
  "met": int(met),
173
- "utc": met_to_utc(met).split(".")[0],
172
+ "met_in_utc": met_to_utc(met).split(".")[0],
174
173
  "ttj2000ns": int(met_to_ttj2000ns(met)),
175
- "hit_e_a_side_low_en": Decimal(
176
- str(l1["IALRT_RATE_1"] + l1["IALRT_RATE_2"])
177
- ),
178
- "hit_e_a_side_med_en": Decimal(
179
- str(l1["IALRT_RATE_5"] + l1["IALRT_RATE_6"])
180
- ),
181
- "hit_e_a_side_high_en": Decimal(str(l1["IALRT_RATE_7"])),
182
- "hit_e_b_side_low_en": Decimal(
183
- str(l1["IALRT_RATE_11"] + l1["IALRT_RATE_12"])
184
- ),
185
- "hit_e_b_side_med_en": Decimal(
186
- str(l1["IALRT_RATE_15"] + l1["IALRT_RATE_16"])
187
- ),
188
- "hit_e_b_side_high_en": Decimal(str(l1["IALRT_RATE_17"])),
189
- "hit_h_omni_med_en": Decimal(str(l1["H_12_15"] + l1["H_15_70"])),
190
- "hit_h_a_side_high_en": Decimal(str(l1["IALRT_RATE_8"])),
191
- "hit_h_b_side_high_en": Decimal(str(l1["IALRT_RATE_18"])),
192
- "hit_he_omni_low_en": Decimal(str(l1["HE4_06_08"])),
193
- "hit_he_omni_high_en": Decimal(str(l1["HE4_15_70"])),
174
+ "hit_e_a_side_low_en": int(l1["IALRT_RATE_1"] + l1["IALRT_RATE_2"]),
175
+ "hit_e_a_side_med_en": int(l1["IALRT_RATE_5"] + l1["IALRT_RATE_6"]),
176
+ "hit_e_a_side_high_en": int(l1["IALRT_RATE_7"]),
177
+ "hit_e_b_side_low_en": int(l1["IALRT_RATE_11"] + l1["IALRT_RATE_12"]),
178
+ "hit_e_b_side_med_en": int(l1["IALRT_RATE_15"] + l1["IALRT_RATE_16"]),
179
+ "hit_e_b_side_high_en": int(l1["IALRT_RATE_17"]),
180
+ "hit_h_omni_med_en": int(l1["H_12_15"] + l1["H_15_70"]),
181
+ "hit_h_a_side_high_en": int(l1["IALRT_RATE_8"]),
182
+ "hit_h_b_side_high_en": int(l1["IALRT_RATE_18"]),
183
+ "hit_he_omni_low_en": int(l1["HE4_06_08"]),
184
+ "hit_he_omni_high_en": int(l1["HE4_15_70"]),
194
185
  }
195
186
  )
196
187
 
@@ -1,20 +1,144 @@
1
1
  """Functions to support I-ALiRT SWAPI processing."""
2
2
 
3
3
  import logging
4
+ from decimal import Decimal
5
+ from typing import Optional
4
6
 
5
7
  import numpy as np
8
+ import pandas as pd
6
9
  import xarray as xr
7
- from xarray import DataArray
10
+ from scipy.optimize import curve_fit
11
+ from scipy.special import erf
8
12
 
13
+ from imap_processing import imap_module_directory
14
+ from imap_processing.ialirt.constants import IalirtSwapiConstants as Consts
9
15
  from imap_processing.ialirt.utils.grouping import find_groups
10
-
11
- # from imap_processing.swapi.l1.swapi_l1 import process_sweep_data
12
- # from imap_processing.swapi.l2.swapi_l2 import TIME_PER_BIN
16
+ from imap_processing.ialirt.utils.time import calculate_time
17
+ from imap_processing.spice.time import met_to_ttj2000ns, met_to_utc
18
+ from imap_processing.swapi.l1.swapi_l1 import process_sweep_data
19
+ from imap_processing.swapi.l2.swapi_l2 import TIME_PER_BIN
13
20
 
14
21
  logger = logging.getLogger(__name__)
15
22
 
16
23
 
17
- def process_swapi_ialirt(unpacked_data: xr.Dataset) -> dict[str, DataArray]:
24
+ def count_rate(
25
+ energy_pass: float, speed: float, density: float, temp: float
26
+ ) -> float | np.ndarray:
27
+ """
28
+ Compute SWAPI count rate for provided energy passband, speed, density and temp.
29
+
30
+ This model for coincidence count rate was developed by the SWAPI instrument
31
+ science team, detailed on page 52 of the IMAP SWAPI Instrument Algorithms Document.
32
+
33
+ Parameters
34
+ ----------
35
+ energy_pass : float
36
+ Energy passband [eV].
37
+ speed : float
38
+ Bulk solar wind speed [km/s].
39
+ density : float
40
+ Proton density [cm^-3].
41
+ temp : float
42
+ Temperature [K].
43
+
44
+ Returns
45
+ -------
46
+ count_rate : float | np.ndarray
47
+ Particle coincidence count rate.
48
+ """
49
+ # thermal velocity of solar wind ions
50
+ thermal_velocity = np.sqrt(2 * Consts.boltz * temp / Consts.prot_mass)
51
+ beta = 1 / (thermal_velocity**2)
52
+ # convert energy to Joules
53
+ center_speed = np.sqrt(2 * energy_pass * 1.60218e-19 / Consts.prot_mass)
54
+ speed = speed * 1000 # convert km/s to m/s
55
+ density = density * 1e6 # convert 1/cm**3 to 1/m**3
56
+
57
+ return (
58
+ (density * Consts.eff_area * (beta / np.pi) ** (3 / 2))
59
+ * (np.exp(-beta * (center_speed**2 + speed**2 - 2 * center_speed * speed)))
60
+ * np.sqrt(np.pi / (beta * speed * center_speed))
61
+ * erf(np.sqrt(beta * speed * center_speed) * (Consts.az_fov / 2))
62
+ * (
63
+ center_speed**4
64
+ * Consts.speed_ew
65
+ * np.arcsin(thermal_velocity / center_speed)
66
+ )
67
+ )
68
+
69
+
70
+ def optimize_pseudo_parameters(
71
+ count_rates: np.ndarray,
72
+ count_rate_error: np.ndarray,
73
+ energy_passbands: Optional[np.ndarray] = None,
74
+ ) -> (dict)[str, list[float]]:
75
+ """
76
+ Find the pseudo speed (u), density (n) and temperature (T) of solar wind particles.
77
+
78
+ Fit a curve to calculated count rate values as a function of energy passband.
79
+
80
+ Parameters
81
+ ----------
82
+ count_rates : np.ndarray
83
+ Particle coincidence count rates.
84
+ count_rate_error : np.ndarray
85
+ Standard deviation of the coincidence count rates parameter.
86
+ energy_passbands : np.ndarray, default None
87
+ Energy passbands, passed in only for testing purposes.
88
+
89
+ Returns
90
+ -------
91
+ solution_dict : dict
92
+ Dictionary containing the optimized speed, density, and temperature values for
93
+ each sweep included in the input count_rates array.
94
+ """
95
+ if not energy_passbands:
96
+ # Read in energy passbands
97
+ energy_data = pd.read_csv(
98
+ f"{imap_module_directory}/tests/swapi/lut/imap_swapi_esa-unit"
99
+ f"-conversion_20250211_v000.csv"
100
+ )
101
+ energy_passbands = (
102
+ energy_data["Energy"][0:63]
103
+ .replace(",", "", regex=True)
104
+ .to_numpy()
105
+ .astype(float)
106
+ )
107
+
108
+ # Initial guess pulled from page 52 of the IMAP SWAPI Instrument Algorithms Document
109
+ initial_param_guess = np.array([550, 5.27, 1e5])
110
+ solution_dict = { # type: ignore
111
+ "pseudo_speed": [],
112
+ "pseudo_density": [],
113
+ "pseudo_temperature": [],
114
+ }
115
+
116
+ for sweep in np.arange(count_rates.shape[0]):
117
+ current_sweep_count_rates = count_rates[sweep, :]
118
+ current_sweep_count_rate_errors = count_rate_error[sweep, :]
119
+ # Find the max count rate, and use the 6 points surrounding it (inclusive)
120
+ max_index = np.argmax(current_sweep_count_rates)
121
+ sol = curve_fit(
122
+ f=count_rate,
123
+ xdata=energy_passbands.take(
124
+ range(max_index - 3, max_index + 3), mode="wrap"
125
+ ),
126
+ ydata=current_sweep_count_rates.take(
127
+ range(max_index - 3, max_index + 3), mode="wrap"
128
+ ),
129
+ sigma=current_sweep_count_rate_errors.take(
130
+ range(max_index - 3, max_index + 3), mode="wrap"
131
+ ),
132
+ p0=initial_param_guess,
133
+ )
134
+ solution_dict["pseudo_speed"].append(sol[0][0])
135
+ solution_dict["pseudo_density"].append(sol[0][1])
136
+ solution_dict["pseudo_temperature"].append(sol[0][2])
137
+
138
+ return solution_dict
139
+
140
+
141
+ def process_swapi_ialirt(unpacked_data: xr.Dataset) -> list[dict]:
18
142
  """
19
143
  Extract I-ALiRT variables and calculate coincidence count rate.
20
144
 
@@ -32,7 +156,21 @@ def process_swapi_ialirt(unpacked_data: xr.Dataset) -> dict[str, DataArray]:
32
156
 
33
157
  sci_dataset = unpacked_data.sortby("epoch", ascending=True)
34
158
 
35
- grouped_dataset = find_groups(sci_dataset, (0, 11), "swapi_seq_number", "swapi_acq")
159
+ met = calculate_time(
160
+ sci_dataset["sc_sclk_sec"], sci_dataset["sc_sclk_sub_sec"], 256
161
+ )
162
+
163
+ # Add required parameters.
164
+ sci_dataset["met"] = met
165
+ met_values = []
166
+
167
+ grouped_dataset = find_groups(sci_dataset, (0, 11), "swapi_seq_number", "met")
168
+
169
+ if grouped_dataset.group.size == 0:
170
+ logger.warning(
171
+ "There was an issue with the SWAPI grouping process, returning empty data."
172
+ )
173
+ return [{}]
36
174
 
37
175
  for group in np.unique(grouped_dataset["group"]):
38
176
  # Sequence values for the group should be 0-11 with no duplicates.
@@ -40,6 +178,10 @@ def process_swapi_ialirt(unpacked_data: xr.Dataset) -> dict[str, DataArray]:
40
178
  (grouped_dataset["group"] == group)
41
179
  ]
42
180
 
181
+ met_values.append(
182
+ int(grouped_dataset["met"][(grouped_dataset["group"] == group).values][0])
183
+ )
184
+
43
185
  # Ensure no duplicates and all values from 0 to 11 are present
44
186
  if not np.array_equal(seq_values.astype(int), np.arange(12)):
45
187
  logger.info(
@@ -48,22 +190,29 @@ def process_swapi_ialirt(unpacked_data: xr.Dataset) -> dict[str, DataArray]:
48
190
  )
49
191
  continue
50
192
 
51
- total_packets = len(grouped_dataset["swapi_seq_number"].data)
52
-
53
- # It takes 12 sequence data to make one full SWAPI sweep
54
- total_sequence = 12
55
- total_full_sweeps = total_packets // total_sequence
56
-
57
- met_values = grouped_dataset["swapi_shcoarse"].data.reshape(total_full_sweeps, 12)[
58
- :, 0
59
- ]
60
-
61
- # raw_coin_count = process_sweep_data(grouped_dataset, "coin_cnt")
62
- # raw_coin_rate = raw_coin_count / TIME_PER_BIN
63
-
64
- swapi_data = {
65
- "met": met_values
66
- # more variables to go here
67
- }
193
+ raw_coin_count = process_sweep_data(grouped_dataset, "swapi_coin_cnt")
194
+ raw_coin_rate = raw_coin_count / TIME_PER_BIN
195
+ count_rate_error = np.sqrt(raw_coin_count) / TIME_PER_BIN
196
+
197
+ solution = optimize_pseudo_parameters(raw_coin_rate, count_rate_error)
198
+
199
+ swapi_data = []
200
+
201
+ for entry in np.arange(0, len(solution["pseudo_speed"])):
202
+ swapi_data.append(
203
+ {
204
+ "apid": 478,
205
+ "met": met_values[entry],
206
+ "met_in_utc": met_to_utc(met_values[entry]).split(".")[0],
207
+ "ttj2000ns": int(met_to_ttj2000ns(met_values[entry])),
208
+ "swapi_pseudo_proton_speed": Decimal(solution["pseudo_speed"][entry]),
209
+ "swapi_pseudo_proton_density": Decimal(
210
+ solution["pseudo_density"][entry]
211
+ ),
212
+ "swapi_pseudo_proton_temperature": Decimal(
213
+ solution["pseudo_temperature"][entry]
214
+ ),
215
+ }
216
+ )
68
217
 
69
218
  return swapi_data
@@ -1,7 +1,6 @@
1
1
  """Functions to support I-ALiRT SWE processing."""
2
2
 
3
3
  import logging
4
- from decimal import Decimal
5
4
 
6
5
  import numpy as np
7
6
  import pandas as pd
@@ -547,16 +546,10 @@ def process_swe(accumulated_data: xr.Dataset, in_flight_cal_files: list) -> list
547
546
  {
548
547
  "apid": 478,
549
548
  "met": int(grouped["met"].min()),
550
- "utc": met_to_utc(grouped["met"].min()).split(".")[0],
549
+ "met_in_utc": met_to_utc(grouped["met"].min()).split(".")[0],
551
550
  "ttj2000ns": int(met_to_ttj2000ns(grouped["met"].min())),
552
- **{
553
- f"swe_normalized_counts_quarter_1_esa_{i}": Decimal(str(val))
554
- for i, val in enumerate(summed_first)
555
- },
556
- **{
557
- f"swe_normalized_counts_quarter_2_esa_{i}": Decimal(str(val))
558
- for i, val in enumerate(summed_second)
559
- },
551
+ "swe_normalized_counts_half_1_esa": [int(val) for val in summed_first],
552
+ "swe_normalized_counts_half_2_esa": [int(val) for val in summed_second],
560
553
  "swe_counterstreaming_electrons": max(bde_first_half, bde_second_half),
561
554
  }
562
555
  )