imap-processing 0.17.0__py3-none-any.whl → 0.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (89) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/ccsds/excel_to_xtce.py +12 -0
  3. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -6
  4. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +11 -0
  5. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +11 -0
  6. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +24 -0
  7. imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +163 -100
  8. imap_processing/cdf/config/imap_hit_l2_variable_attrs.yaml +4 -4
  9. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +97 -54
  10. imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +119 -36
  11. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +16 -90
  12. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +30 -0
  13. imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +15 -1
  14. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +60 -0
  15. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +91 -11
  16. imap_processing/cli.py +28 -5
  17. imap_processing/codice/codice_l1a.py +36 -48
  18. imap_processing/codice/codice_l1b.py +1 -1
  19. imap_processing/codice/codice_l2.py +0 -9
  20. imap_processing/codice/constants.py +481 -498
  21. imap_processing/hit/l0/decom_hit.py +2 -2
  22. imap_processing/hit/l1a/hit_l1a.py +64 -24
  23. imap_processing/hit/l1b/constants.py +5 -0
  24. imap_processing/hit/l1b/hit_l1b.py +18 -16
  25. imap_processing/hit/l2/constants.py +1 -1
  26. imap_processing/hit/l2/hit_l2.py +4 -5
  27. imap_processing/ialirt/constants.py +21 -0
  28. imap_processing/ialirt/generate_coverage.py +188 -0
  29. imap_processing/ialirt/l0/parse_mag.py +62 -5
  30. imap_processing/ialirt/l0/process_swapi.py +1 -1
  31. imap_processing/ialirt/l0/process_swe.py +23 -7
  32. imap_processing/ialirt/utils/constants.py +22 -16
  33. imap_processing/ialirt/utils/create_xarray.py +42 -19
  34. imap_processing/idex/idex_constants.py +1 -5
  35. imap_processing/idex/idex_l2b.py +246 -67
  36. imap_processing/idex/idex_l2c.py +30 -196
  37. imap_processing/lo/l0/lo_apid.py +1 -0
  38. imap_processing/lo/l1a/lo_l1a.py +44 -0
  39. imap_processing/lo/packet_definitions/lo_xtce.xml +5359 -106
  40. imap_processing/mag/constants.py +1 -0
  41. imap_processing/mag/l1d/__init__.py +0 -0
  42. imap_processing/mag/l1d/mag_l1d.py +133 -0
  43. imap_processing/mag/l1d/mag_l1d_data.py +588 -0
  44. imap_processing/mag/l2/__init__.py +0 -0
  45. imap_processing/mag/l2/mag_l2.py +25 -20
  46. imap_processing/mag/l2/mag_l2_data.py +191 -130
  47. imap_processing/quality_flags.py +20 -2
  48. imap_processing/spice/geometry.py +25 -3
  49. imap_processing/spice/pointing_frame.py +1 -1
  50. imap_processing/spice/spin.py +4 -0
  51. imap_processing/spice/time.py +51 -0
  52. imap_processing/swapi/l2/swapi_l2.py +52 -8
  53. imap_processing/swapi/swapi_utils.py +1 -1
  54. imap_processing/swe/l1b/swe_l1b.py +2 -4
  55. imap_processing/ultra/constants.py +49 -1
  56. imap_processing/ultra/l0/decom_tools.py +15 -8
  57. imap_processing/ultra/l0/decom_ultra.py +35 -11
  58. imap_processing/ultra/l0/ultra_utils.py +97 -5
  59. imap_processing/ultra/l1a/ultra_l1a.py +25 -4
  60. imap_processing/ultra/l1b/cullingmask.py +3 -3
  61. imap_processing/ultra/l1b/de.py +53 -15
  62. imap_processing/ultra/l1b/extendedspin.py +26 -2
  63. imap_processing/ultra/l1b/lookup_utils.py +171 -50
  64. imap_processing/ultra/l1b/quality_flag_filters.py +14 -0
  65. imap_processing/ultra/l1b/ultra_l1b_culling.py +198 -5
  66. imap_processing/ultra/l1b/ultra_l1b_extended.py +304 -66
  67. imap_processing/ultra/l1c/helio_pset.py +54 -7
  68. imap_processing/ultra/l1c/spacecraft_pset.py +9 -1
  69. imap_processing/ultra/l1c/ultra_l1c.py +2 -0
  70. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +106 -109
  71. imap_processing/ultra/utils/ultra_l1_utils.py +13 -1
  72. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/METADATA +2 -2
  73. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/RECORD +76 -83
  74. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_LeftSlit.csv +0 -526
  75. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_RightSlit.csv +0 -526
  76. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_LeftSlit.csv +0 -526
  77. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +0 -524
  78. imap_processing/ultra/lookup_tables/EgyNorm.mem.csv +0 -32769
  79. imap_processing/ultra/lookup_tables/FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
  80. imap_processing/ultra/lookup_tables/FM90_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
  81. imap_processing/ultra/lookup_tables/dps_grid45_compressed.cdf +0 -0
  82. imap_processing/ultra/lookup_tables/ultra45_back-pos-luts.csv +0 -4097
  83. imap_processing/ultra/lookup_tables/ultra45_tdc_norm.csv +0 -2050
  84. imap_processing/ultra/lookup_tables/ultra90_back-pos-luts.csv +0 -4097
  85. imap_processing/ultra/lookup_tables/ultra90_tdc_norm.csv +0 -2050
  86. imap_processing/ultra/lookup_tables/yadjust.csv +0 -257
  87. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/LICENSE +0 -0
  88. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/WHEEL +0 -0
  89. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/entry_points.txt +0 -0
@@ -5,11 +5,16 @@ import xarray as xr
5
5
 
6
6
  from imap_processing.ultra.l1b.ultra_l1b_culling import (
7
7
  flag_attitude,
8
- flag_spin,
8
+ flag_hk,
9
+ flag_imap_instruments,
10
+ flag_rates,
9
11
  get_energy_histogram,
12
+ get_pulses_per_spin,
10
13
  )
11
14
  from imap_processing.ultra.utils.ultra_l1_utils import create_dataset
12
15
 
16
+ FILLVAL_UINT16 = 65535
17
+
13
18
 
14
19
  def calculate_extendedspin(
15
20
  dict_datasets: dict[str, xr.Dataset],
@@ -34,10 +39,11 @@ def calculate_extendedspin(
34
39
  Dataset containing the data.
35
40
  """
36
41
  aux_dataset = dict_datasets[f"imap_ultra_l1a_{instrument_id}sensor-aux"]
42
+ rates_dataset = dict_datasets[f"imap_ultra_l1a_{instrument_id}sensor-rates"]
37
43
  de_dataset = dict_datasets[f"imap_ultra_l1b_{instrument_id}sensor-de"]
38
44
 
39
45
  extendedspin_dict = {}
40
- rates_qf, spin, energy_midpoints, n_sigma_per_energy = flag_spin(
46
+ rates_qf, spin, energy_midpoints, n_sigma_per_energy = flag_rates(
41
47
  de_dataset["spin"].values,
42
48
  de_dataset["energy"].values,
43
49
  )
@@ -47,12 +53,19 @@ def calculate_extendedspin(
47
53
  attitude_qf, spin_rates, spin_period, spin_starttime = flag_attitude(
48
54
  de_dataset["spin"].values, aux_dataset
49
55
  )
56
+ # TODO: We will add to this later
57
+ hk_qf = flag_hk(de_dataset["spin"].values)
58
+ inst_qf = flag_imap_instruments(de_dataset["spin"].values)
59
+
50
60
  # Get the first epoch for each spin.
51
61
  mask = xr.DataArray(np.isin(de_dataset["spin"], spin), dims="epoch")
52
62
  filtered_dataset = de_dataset.where(mask, drop=True)
53
63
  _, first_indices = np.unique(filtered_dataset["spin"].values, return_index=True)
54
64
  first_epochs = filtered_dataset["epoch"].values[first_indices]
55
65
 
66
+ # Get the number of pulses per spin.
67
+ start_per_spin, stop_per_spin, coin_per_spin = get_pulses_per_spin(rates_dataset)
68
+
56
69
  # These will be the coordinates.
57
70
  extendedspin_dict["epoch"] = first_epochs
58
71
  extendedspin_dict["spin_number"] = spin
@@ -63,8 +76,19 @@ def calculate_extendedspin(
63
76
  extendedspin_dict["spin_start_time"] = spin_starttime
64
77
  extendedspin_dict["spin_period"] = spin_period
65
78
  extendedspin_dict["spin_rate"] = spin_rates
79
+ extendedspin_dict["start_pulses_per_spin"] = start_per_spin
80
+ extendedspin_dict["stop_pulses_per_spin"] = stop_per_spin
81
+ extendedspin_dict["coin_pulses_per_spin"] = coin_per_spin
82
+ # TODO: this will be used to track rejected events in each
83
+ # spin based on quality flags in de l1b data.
84
+ extendedspin_dict["rejected_events_per_spin"] = np.full_like(
85
+ spin, FILLVAL_UINT16, dtype=np.uint16
86
+ )
87
+
66
88
  extendedspin_dict["quality_attitude"] = attitude_qf
67
89
  extendedspin_dict["quality_ena_rates"] = rates_qf
90
+ extendedspin_dict["quality_hk"] = hk_qf
91
+ extendedspin_dict["quality_instruments"] = inst_qf
68
92
 
69
93
  extendedspin_dataset = create_dataset(extendedspin_dict, name, "l1b")
70
94
 
@@ -4,43 +4,12 @@ import numpy as np
4
4
  import numpy.typing as npt
5
5
  import pandas as pd
6
6
  import xarray as xr
7
+ from numpy.typing import NDArray
7
8
 
8
- from imap_processing import imap_module_directory
9
-
10
- BASE_PATH = imap_module_directory / "ultra" / "lookup_tables"
11
-
12
- _YADJUST_DF = pd.read_csv(BASE_PATH / "yadjust.csv").set_index("dYLUT")
13
- _TDC_NORM_DF_ULTRA45 = pd.read_csv(
14
- BASE_PATH / "ultra45_tdc_norm.csv", header=1, index_col="Index"
15
- )
16
- _TDC_NORM_DF_ULTRA90 = pd.read_csv(
17
- BASE_PATH / "ultra90_tdc_norm.csv", header=1, index_col="Index"
18
- )
19
- _BACK_POS_DF_ULTRA45 = pd.read_csv(
20
- BASE_PATH / "ultra45_back-pos-luts.csv", index_col="Index_offset"
21
- )
22
- _BACK_POS_DF_ULTRA90 = pd.read_csv(
23
- BASE_PATH / "ultra90_back-pos-luts.csv", index_col="Index_offset"
24
- )
25
- _ENERGY_NORM_DF = pd.read_csv(BASE_PATH / "EgyNorm.mem.csv")
26
- _IMAGE_PARAMS_DF = {
27
- "ultra45": pd.read_csv(BASE_PATH / "FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv"),
28
- "ultra90": pd.read_csv(BASE_PATH / "FM90_Startup1_ULTRA_IMGPARAMS_20240719.csv"),
29
- }
30
-
31
- _FWHM_TABLES = {
32
- ("left", "ultra45"): pd.read_csv(BASE_PATH / "Angular_Profiles_FM45_LeftSlit.csv"),
33
- ("right", "ultra45"): pd.read_csv(
34
- BASE_PATH / "Angular_Profiles_FM45_RightSlit.csv"
35
- ),
36
- ("left", "ultra90"): pd.read_csv(BASE_PATH / "Angular_Profiles_FM90_LeftSlit.csv"),
37
- ("right", "ultra90"): pd.read_csv(
38
- BASE_PATH / "Angular_Profiles_FM90_RightSlit.csv"
39
- ),
40
- }
41
-
42
-
43
- def get_y_adjust(dy_lut: np.ndarray) -> npt.NDArray:
9
+ from imap_processing.quality_flags import ImapDEUltraFlags
10
+
11
+
12
+ def get_y_adjust(dy_lut: np.ndarray, ancillary_files: dict) -> npt.NDArray:
44
13
  """
45
14
  Adjust the front yf position based on the particle's trajectory.
46
15
 
@@ -52,16 +21,21 @@ def get_y_adjust(dy_lut: np.ndarray) -> npt.NDArray:
52
21
  ----------
53
22
  dy_lut : np.ndarray
54
23
  Change in y direction used for the lookup table (mm).
24
+ ancillary_files : dict[Path]
25
+ Ancillary files containing the lookup tables.
55
26
 
56
27
  Returns
57
28
  -------
58
29
  yadj : np.ndarray
59
30
  Y adjustment (mm).
60
31
  """
61
- return _YADJUST_DF["dYAdj"].iloc[dy_lut].values
32
+ yadjust_df = pd.read_csv(ancillary_files["l1b-yadjust-lookup"]).set_index("dYLUT")
33
+ return yadjust_df["dYAdj"].iloc[dy_lut].values
62
34
 
63
35
 
64
- def get_norm(dn: xr.DataArray, key: str, file_label: str) -> npt.NDArray:
36
+ def get_norm(
37
+ dn: xr.DataArray, key: str, file_label: str, ancillary_files: dict
38
+ ) -> npt.NDArray:
65
39
  """
66
40
  Correct mismatches between the stop Time to Digital Converters (TDCs).
67
41
 
@@ -82,6 +56,8 @@ def get_norm(dn: xr.DataArray, key: str, file_label: str) -> npt.NDArray:
82
56
  BtSpNNorm, BtSpSNorm, BtSpENorm, or BtSpWNorm.
83
57
  file_label : str
84
58
  Instrument (ultra45 or ultra90).
59
+ ancillary_files : dict[Path]
60
+ Ancillary files containing the lookup tables.
85
61
 
86
62
  Returns
87
63
  -------
@@ -89,16 +65,22 @@ def get_norm(dn: xr.DataArray, key: str, file_label: str) -> npt.NDArray:
89
65
  Normalized DNs.
90
66
  """
91
67
  if file_label == "ultra45":
92
- tdc_norm_df = _TDC_NORM_DF_ULTRA45
68
+ tdc_norm_df = pd.read_csv(
69
+ ancillary_files["l1b-45sensor-tdc-norm-lookup"], header=1, index_col="Index"
70
+ )
93
71
  else:
94
- tdc_norm_df = _TDC_NORM_DF_ULTRA90
72
+ tdc_norm_df = pd.read_csv(
73
+ ancillary_files["l1b-90sensor-tdc-norm-lookup"], header=1, index_col="Index"
74
+ )
95
75
 
96
76
  dn_norm = tdc_norm_df[key].iloc[dn].values
97
77
 
98
78
  return dn_norm
99
79
 
100
80
 
101
- def get_back_position(back_index: np.ndarray, key: str, file_label: str) -> npt.NDArray:
81
+ def get_back_position(
82
+ back_index: np.ndarray, key: str, file_label: str, ancillary_files: dict
83
+ ) -> npt.NDArray:
102
84
  """
103
85
  Convert normalized TDC values using lookup tables.
104
86
 
@@ -117,6 +99,8 @@ def get_back_position(back_index: np.ndarray, key: str, file_label: str) -> npt.
117
99
  XBkTp, YBkTp, XBkBt, or YBkBt.
118
100
  file_label : str
119
101
  Instrument (ultra45 or ultra90).
102
+ ancillary_files : dict[Path]
103
+ Ancillary files containing the lookup tables.
120
104
 
121
105
  Returns
122
106
  -------
@@ -124,14 +108,20 @@ def get_back_position(back_index: np.ndarray, key: str, file_label: str) -> npt.
124
108
  Converted DNs to Units of hundredths of a millimeter.
125
109
  """
126
110
  if file_label == "ultra45":
127
- back_pos_df = _BACK_POS_DF_ULTRA45
111
+ back_pos_df = pd.read_csv(
112
+ ancillary_files["l1b-45sensor-back-pos-lookup"], index_col="Index_offset"
113
+ )
128
114
  else:
129
- back_pos_df = _BACK_POS_DF_ULTRA90
115
+ back_pos_df = pd.read_csv(
116
+ ancillary_files["l1b-90sensor-back-pos-lookup"], index_col="Index_offset"
117
+ )
130
118
 
131
119
  return back_pos_df[key].values[back_index]
132
120
 
133
121
 
134
- def get_energy_norm(ssd: np.ndarray, composite_energy: np.ndarray) -> npt.NDArray:
122
+ def get_energy_norm(
123
+ ssd: np.ndarray, composite_energy: np.ndarray, ancillary_files: dict
124
+ ) -> npt.NDArray:
135
125
  """
136
126
  Normalize composite energy per SSD using a lookup table.
137
127
 
@@ -146,6 +136,8 @@ def get_energy_norm(ssd: np.ndarray, composite_energy: np.ndarray) -> npt.NDArra
146
136
  Acts as index 1.
147
137
  composite_energy : np.ndarray
148
138
  Acts as index 2.
139
+ ancillary_files : dict[Path]
140
+ Ancillary files containing the lookup tables.
149
141
 
150
142
  Returns
151
143
  -------
@@ -153,11 +145,11 @@ def get_energy_norm(ssd: np.ndarray, composite_energy: np.ndarray) -> npt.NDArra
153
145
  Normalized composite energy.
154
146
  """
155
147
  row_number = ssd * 4096 + composite_energy
148
+ norm_lookup = pd.read_csv(ancillary_files["l1b-egynorm-lookup"])
149
+ return norm_lookup["NormEnergy"].iloc[row_number]
156
150
 
157
- return _ENERGY_NORM_DF["NormEnergy"].iloc[row_number]
158
151
 
159
-
160
- def get_image_params(image: str, sensor: str) -> np.float64:
152
+ def get_image_params(image: str, sensor: str, ancillary_files: dict) -> np.float64:
161
153
  """
162
154
  Lookup table for image parameters.
163
155
 
@@ -171,18 +163,26 @@ def get_image_params(image: str, sensor: str) -> np.float64:
171
163
  The column name to lookup in the CSV file, e.g., 'XFTLTOFF' or 'XFTRTOFF'.
172
164
  sensor : str
173
165
  Sensor name: "ultra45" or "ultra90".
166
+ ancillary_files : dict[Path]
167
+ Ancillary files containing the lookup tables.
174
168
 
175
169
  Returns
176
170
  -------
177
171
  value : np.float64
178
172
  Image parameter value from the CSV file.
179
173
  """
180
- lookup_table = _IMAGE_PARAMS_DF[sensor]
174
+ if sensor == "ultra45":
175
+ lookup_table = pd.read_csv(ancillary_files["l1b-45sensor-imgparams-lookup"])
176
+ else:
177
+ lookup_table = pd.read_csv(ancillary_files["l1b-90sensor-imgparams-lookup"])
178
+
181
179
  value: np.float64 = lookup_table[image].values[0]
182
180
  return value
183
181
 
184
182
 
185
- def get_angular_profiles(start_type: str, sensor: str) -> pd.DataFrame:
183
+ def get_angular_profiles(
184
+ start_type: str, sensor: str, ancillary_files: dict
185
+ ) -> pd.DataFrame:
186
186
  """
187
187
  Lookup table for FWHM for theta and phi.
188
188
 
@@ -195,13 +195,16 @@ def get_angular_profiles(start_type: str, sensor: str) -> pd.DataFrame:
195
195
  Start Type: Left, Right.
196
196
  sensor : str
197
197
  Sensor name: "ultra45" or "ultra90".
198
+ ancillary_files : dict[Path]
199
+ Ancillary files.
198
200
 
199
201
  Returns
200
202
  -------
201
203
  lookup_table : DataFrame
202
204
  Angular profile lookup table for a given start_type and sensor.
203
205
  """
204
- lookup_table = _FWHM_TABLES[(start_type.lower(), sensor)]
206
+ lut_descriptor = f"l1b-{sensor[-2:]}sensor-{start_type.lower()}slit-lookup"
207
+ lookup_table = pd.read_csv(ancillary_files[lut_descriptor])
205
208
 
206
209
  return lookup_table
207
210
 
@@ -227,3 +230,121 @@ def get_energy_efficiencies(ancillary_files: dict) -> pd.DataFrame:
227
230
  lookup_table = pd.read_csv(ancillary_files["l1b-45sensor-logistic-interpolation"])
228
231
 
229
232
  return lookup_table
233
+
234
+
235
+ def get_geometric_factor(
236
+ ancillary_files: dict,
237
+ filename: str,
238
+ phi: NDArray,
239
+ theta: NDArray,
240
+ quality_flag: NDArray,
241
+ ) -> tuple[NDArray, NDArray]:
242
+ """
243
+ Lookup table for geometric factor using nearest neighbor.
244
+
245
+ Parameters
246
+ ----------
247
+ ancillary_files : dict[Path]
248
+ Ancillary files.
249
+ filename : str
250
+ Name of the file in ancillary_files to use.
251
+ phi : NDArray
252
+ Azimuth angles in degrees.
253
+ theta : NDArray
254
+ Elevation angles in degrees.
255
+ quality_flag : NDArray
256
+ Quality flag to set when geometric factor is zero.
257
+
258
+ Returns
259
+ -------
260
+ geometric_factor : NDArray
261
+ Geometric factor.
262
+ """
263
+ gf_table = pd.read_csv(
264
+ ancillary_files[filename], header=None, skiprows=6, nrows=301
265
+ ).to_numpy(dtype=float)
266
+ theta_table = pd.read_csv(
267
+ ancillary_files[filename], header=None, skiprows=308, nrows=301
268
+ ).to_numpy(dtype=float)
269
+ phi_table = pd.read_csv(
270
+ ancillary_files[filename], header=None, skiprows=610, nrows=301
271
+ ).to_numpy(dtype=float)
272
+
273
+ # Assume uniform grids: extract 1D arrays from first row/col
274
+ theta_vals = theta_table[0, :] # columns represent theta
275
+ phi_vals = phi_table[:, 0] # rows represent phi
276
+
277
+ # Find nearest index in table for each input value
278
+ phi_idx = np.abs(phi_vals[:, None] - phi).argmin(axis=0)
279
+ theta_idx = np.abs(theta_vals[:, None] - theta).argmin(axis=0)
280
+
281
+ # Fetch geometric factor values at nearest (phi, theta) pairs
282
+ geometric_factor = gf_table[phi_idx, theta_idx]
283
+
284
+ phi_rad = np.deg2rad(phi)
285
+ numerator = 5.0 * np.cos(phi_rad)
286
+ denominator = 1 + 2.80 * np.cos(phi_rad)
287
+ # Equation 19 in the Ultra Algorithm Document.
288
+ theta_nom = np.arctan(numerator / denominator)
289
+ theta_nom = np.rad2deg(theta_nom)
290
+
291
+ outside_fov = np.abs(theta) > theta_nom
292
+ quality_flag[outside_fov] |= ImapDEUltraFlags.FOV.value
293
+
294
+ return geometric_factor
295
+
296
+
297
+ def get_ph_corrected(
298
+ sensor: str,
299
+ location: str,
300
+ ancillary_files: dict,
301
+ xlut: NDArray,
302
+ ylut: NDArray,
303
+ quality_flag: NDArray,
304
+ ) -> tuple[NDArray, NDArray]:
305
+ """
306
+ PH correction for stop anodes, top and bottom.
307
+
308
+ Further description is available starting on
309
+ page 207 of the Ultra Flight Software Document.
310
+
311
+ Parameters
312
+ ----------
313
+ sensor : str
314
+ Sensor name: "ultra45" or "ultra90".
315
+ location : str
316
+ Location: "tp" or "bt".
317
+ ancillary_files : dict[Path]
318
+ Ancillary files.
319
+ xlut : NDArray
320
+ X lookup index for PH correction.
321
+ ylut : NDArray
322
+ Y lookup index for PH correction.
323
+ quality_flag : NDArray
324
+ Quality flag to set when there is an outlier.
325
+
326
+ Returns
327
+ -------
328
+ ph_correction : NDArray
329
+ Correction for pulse height.
330
+ quality_flag : NDArray
331
+ Quality flag updated with PH correction flags.
332
+ """
333
+ ph_correct = pd.read_csv(
334
+ ancillary_files[f"l1b-{sensor[-2:]}sensor-sp{location}phcorr"], header=None
335
+ )
336
+ ph_correct_array = ph_correct.to_numpy()
337
+
338
+ max_x, max_y = ph_correct_array.shape[0] - 1, ph_correct_array.shape[1] - 1
339
+
340
+ # Clamp indices to nearest valid value
341
+ xlut_clamped = np.clip(xlut.astype(int), 0, max_x)
342
+ ylut_clamped = np.clip(ylut.astype(int), 0, max_y)
343
+
344
+ # Flag where clamping occurred
345
+ flagged_mask = (xlut != xlut_clamped) | (ylut != ylut_clamped)
346
+ quality_flag[flagged_mask] |= ImapDEUltraFlags.PHCORR.value
347
+
348
+ ph_correction = ph_correct_array[xlut_clamped, ylut_clamped]
349
+
350
+ return ph_correction, quality_flag
@@ -0,0 +1,14 @@
1
+ """Contains list of QFs to use for filtering."""
2
+
3
+ from imap_processing.quality_flags import (
4
+ FlagNameMixin,
5
+ ImapRatesUltraFlags,
6
+ )
7
+
8
+ QUALITY_FLAG_FILTERS: dict[str, list[FlagNameMixin]] = {
9
+ "quality_attitude": [],
10
+ "quality_ena_rates": [
11
+ ImapRatesUltraFlags.FIRSTSPIN,
12
+ ImapRatesUltraFlags.LASTSPIN,
13
+ ],
14
+ }
@@ -7,7 +7,12 @@ import pandas as pd
7
7
  import xarray as xr
8
8
  from numpy.typing import NDArray
9
9
 
10
- from imap_processing.quality_flags import ImapAttitudeUltraFlags, ImapRatesUltraFlags
10
+ from imap_processing.quality_flags import (
11
+ ImapAttitudeUltraFlags,
12
+ ImapHkUltraFlags,
13
+ ImapInstrumentUltraFlags,
14
+ ImapRatesUltraFlags,
15
+ )
11
16
  from imap_processing.spice.spin import get_spin_data
12
17
  from imap_processing.ultra.constants import UltraConstants
13
18
 
@@ -106,6 +111,10 @@ def flag_attitude(
106
111
 
107
112
  spin_period = spin_df.loc[spin_df.spin_number.isin(spins), "spin_period_sec"]
108
113
  spin_starttime = spin_df.loc[spin_df.spin_number.isin(spins), "spin_start_met"]
114
+ spin_phase_valid = spin_df.loc[spin_df.spin_number.isin(spins), "spin_phase_valid"]
115
+ spin_period_valid = spin_df.loc[
116
+ spin_df.spin_number.isin(spins), "spin_period_valid"
117
+ ]
109
118
  spin_rates = 60 / spin_period # 60 seconds in a minute
110
119
  bad_spin_rate_indices = (spin_rates < UltraConstants.CULLING_RPM_MIN) | (
111
120
  spin_rates > UltraConstants.CULLING_RPM_MAX
@@ -118,9 +127,59 @@ def flag_attitude(
118
127
  mismatch_indices = compare_aux_univ_spin_table(aux_dataset, spins, spin_df)
119
128
  quality_flags[mismatch_indices] |= ImapAttitudeUltraFlags.AUXMISMATCH.value
120
129
 
130
+ # Spin phase validity flag
131
+ phase_invalid_indices = spin_phase_valid == 0
132
+ quality_flags[phase_invalid_indices] |= ImapAttitudeUltraFlags.SPINPHASE.value
133
+
134
+ # Spin period validity flag
135
+ period_invalid_indices = ~spin_period_valid
136
+ quality_flags[period_invalid_indices] |= ImapAttitudeUltraFlags.SPINPERIOD.value
137
+
121
138
  return quality_flags, spin_rates, spin_period, spin_starttime
122
139
 
123
140
 
141
+ def flag_hk(spin_number: NDArray) -> NDArray:
142
+ """
143
+ Flag data based on hk.
144
+
145
+ Parameters
146
+ ----------
147
+ spin_number : NDArray
148
+ Spin number at each direct event.
149
+
150
+ Returns
151
+ -------
152
+ quality_flags : NDArray
153
+ Quality flags..
154
+ """
155
+ spins = np.unique(spin_number) # Get unique spins
156
+ quality_flags = np.full(spins.shape, ImapHkUltraFlags.NONE.value, dtype=np.uint16)
157
+
158
+ return quality_flags
159
+
160
+
161
+ def flag_imap_instruments(spin_number: NDArray) -> NDArray:
162
+ """
163
+ Flag data based on other IMAP instruments.
164
+
165
+ Parameters
166
+ ----------
167
+ spin_number : NDArray
168
+ Spin number at each direct event.
169
+
170
+ Returns
171
+ -------
172
+ quality_flags : NDArray
173
+ Quality flags..
174
+ """
175
+ spins = np.unique(spin_number) # Get unique spins
176
+ quality_flags = np.full(
177
+ spins.shape, ImapInstrumentUltraFlags.NONE.value, dtype=np.uint16
178
+ )
179
+
180
+ return quality_flags
181
+
182
+
124
183
  def get_n_sigma(count_rates: NDArray, mean_duration: float, sigma: int = 6) -> NDArray:
125
184
  """
126
185
  Calculate the threshold for the HIGHRATES flag.
@@ -140,7 +199,8 @@ def get_n_sigma(count_rates: NDArray, mean_duration: float, sigma: int = 6) -> N
140
199
  threshold : NDArray
141
200
  Threshold for applying HIGHRATES flag.
142
201
  """
143
- sigma_per_energy = np.std(count_rates, axis=1)
202
+ # Take the Sample Standard Deviation.
203
+ sigma_per_energy = np.std(count_rates, axis=1, ddof=1)
144
204
  n_sigma_per_energy = sigma * sigma_per_energy
145
205
  mean_per_energy = np.mean(count_rates, axis=1)
146
206
  # Must have a HIGHRATES threshold of at least 3 counts per spin.
@@ -149,7 +209,7 @@ def get_n_sigma(count_rates: NDArray, mean_duration: float, sigma: int = 6) -> N
149
209
  return threshold
150
210
 
151
211
 
152
- def flag_spin(
212
+ def flag_rates(
153
213
  spin_number: NDArray, energy: NDArray, sigma: int = 6
154
214
  ) -> tuple[NDArray, NDArray, NDArray, NDArray]:
155
215
  """
@@ -182,8 +242,6 @@ def flag_spin(
182
242
  count_rates.shape, ImapRatesUltraFlags.NONE.value, dtype=np.uint16
183
243
  )
184
244
 
185
- # Zero counts/spin/energy level
186
- quality_flags[counts == 0] |= ImapRatesUltraFlags.ZEROCOUNTS.value
187
245
  threshold = get_n_sigma(count_rates, duration, sigma=sigma)
188
246
 
189
247
  bin_edges = np.array(UltraConstants.CULLING_ENERGY_BIN_EDGES)
@@ -194,6 +252,10 @@ def flag_spin(
194
252
  indices_n_sigma = count_rates > threshold[:, np.newaxis]
195
253
  quality_flags[indices_n_sigma] |= ImapRatesUltraFlags.HIGHRATES.value
196
254
 
255
+ # Flags the first and last spin
256
+ quality_flags[:, 0] |= ImapRatesUltraFlags.FIRSTSPIN.value
257
+ quality_flags[:, -1] |= ImapRatesUltraFlags.LASTSPIN.value
258
+
197
259
  return quality_flags, spin, energy_midpoints, threshold
198
260
 
199
261
 
@@ -256,3 +318,134 @@ def compare_aux_univ_spin_table(
256
318
  mismatch_indices[missing_spin_mask] = True
257
319
 
258
320
  return mismatch_indices
321
+
322
+
323
+ # TODO: Make this a common util since it is being used for the de and rates packets.
324
+ def get_spin_and_duration(met: NDArray, spin: NDArray) -> tuple[NDArray, NDArray]:
325
+ """
326
+ Get the spin number and duration.
327
+
328
+ Parameters
329
+ ----------
330
+ met : NDArray
331
+ Mission elapsed time.
332
+ spin : NDArray
333
+ Spin number 0-255.
334
+
335
+ Returns
336
+ -------
337
+ assigned_spin_number : NDArray
338
+ Spin number for packet data product.
339
+ """
340
+ # Packet data.
341
+ # Since the spin number in the direct events packet
342
+ # is only 8 bits it goes from 0-255.
343
+ # Within a pointing that means we will always have duplicate spin numbers.
344
+ # In other words, different spins will be represented by the same spin number.
345
+ # Just to make certain that we won't accidentally combine
346
+ # multiple spins we need to sort by time here.
347
+ sort_idx = np.argsort(met)
348
+ packet_met_sorted = met[sort_idx]
349
+ packet_spin_sorted = spin[sort_idx]
350
+ # Here we are finding the start and end indices of each spin in the sorted array.
351
+ is_new_spin = np.concatenate(
352
+ [[True], packet_spin_sorted.values[1:] != packet_spin_sorted.values[:-1]]
353
+ )
354
+ spin_start_indices = np.where(is_new_spin)[0]
355
+ spin_end_indices = np.append(spin_start_indices[1:], len(packet_met_sorted))
356
+
357
+ # Universal Spin Table.
358
+ spin_df = get_spin_data()
359
+ # Retrieve the met values of the start of the spin.
360
+ spin_start_mets = spin_df["spin_start_met"].values
361
+ # Retrieve the corresponding spin numbers.
362
+ spin_numbers = spin_df["spin_number"].values
363
+ spin_period_sec = spin_df["spin_period_sec"].values
364
+ assigned_spin_number_sorted = np.empty(packet_spin_sorted.shape, dtype=np.uint32)
365
+ assigned_spin_duration_sorted = np.empty(packet_spin_sorted.shape, dtype=np.float32)
366
+ # These last 8 bits are the same as the spin number in the DE packet.
367
+ # So this will give us choices of which spins are
368
+ # available to assign to the packet data.
369
+ possible_spins = spin_numbers & 0xFF
370
+
371
+ # Assign each group based on time.
372
+ for start, end in zip(spin_start_indices, spin_end_indices):
373
+ # Now that we have the possible spins from the Universal Spin Table,
374
+ # we match the times of those spins to the nearest times in the DE data.
375
+ possible_times = spin_start_mets[
376
+ possible_spins == packet_spin_sorted.values[start]
377
+ ]
378
+ # Get nearest time for matching spins.
379
+ nearest_idx = np.abs(possible_times - packet_met_sorted.values[start]).argmin()
380
+ nearest_value = possible_times[nearest_idx]
381
+ assigned_spin_number_sorted[start:end] = spin_numbers[
382
+ spin_start_mets == nearest_value
383
+ ]
384
+ assigned_spin_duration_sorted[start:end] = spin_period_sec[
385
+ spin_start_mets == nearest_value
386
+ ]
387
+
388
+ # Undo the sort to match original order.
389
+ assigned_spin_number = np.empty_like(assigned_spin_number_sorted)
390
+ assigned_spin_number[sort_idx] = assigned_spin_number_sorted
391
+ assigned_duration = np.empty_like(assigned_spin_duration_sorted)
392
+ assigned_duration[sort_idx] = assigned_spin_duration_sorted
393
+
394
+ return assigned_spin_number, assigned_duration
395
+
396
+
397
+ def get_pulses_per_spin(rates: xr.Dataset) -> tuple[NDArray, NDArray, NDArray]:
398
+ """
399
+ Get the total number of pulses per spin.
400
+
401
+ Parameters
402
+ ----------
403
+ rates : xr.Dataset
404
+ Rates dataset.
405
+
406
+ Returns
407
+ -------
408
+ start_per_spin : NDArray
409
+ Total start pulses per spin.
410
+ stop_per_spin : NDArray
411
+ Total stop pulses per spin.
412
+ coin_per_spin : NDArray
413
+ Total coincidence pulses per spin.
414
+ """
415
+ spin_number, duration = get_spin_and_duration(rates["shcoarse"], rates["spin"])
416
+
417
+ # Top coin pulses
418
+ top_coin_pulses = np.stack(
419
+ [v for k, v in rates.items() if k.startswith("coin_t")], axis=1
420
+ )
421
+ max_top_coin_pulse = np.max(top_coin_pulses, axis=1)
422
+
423
+ # Bottom coin pulses
424
+ bottom_coin_pulses = np.stack(
425
+ [v for k, v in rates.items() if k.startswith("coin_b")], axis=1
426
+ )
427
+ max_bottom_coin_pulse = np.max(bottom_coin_pulses, axis=1)
428
+
429
+ # Top stop pulses
430
+ top_stop_pulses = np.stack(
431
+ [v for k, v in rates.items() if k.startswith("stop_t")], axis=1
432
+ )
433
+ max_top_stop_pulse = np.max(top_stop_pulses, axis=1)
434
+
435
+ # Bottom stop pulses
436
+ bottom_stop_pulses = np.stack(
437
+ [v for k, v in rates.items() if k.startswith("stop_b")], axis=1
438
+ )
439
+ max_bottom_stop_pulse = np.max(bottom_stop_pulses, axis=1)
440
+
441
+ stop_pulses = max_top_stop_pulse + max_bottom_stop_pulse
442
+ start_pulses = rates["start_rf"] + rates["start_lf"]
443
+ coin_pulses = max_top_coin_pulse + max_bottom_coin_pulse
444
+
445
+ unique_spins, spin_idx = np.unique(spin_number, return_inverse=True)
446
+
447
+ start_per_spin = np.bincount(spin_idx, weights=start_pulses)
448
+ stop_per_spin = np.bincount(spin_idx, weights=stop_pulses)
449
+ coin_per_spin = np.bincount(spin_idx, weights=coin_pulses)
450
+
451
+ return start_per_spin, stop_per_spin, coin_per_spin