imap-processing 0.16.2__py3-none-any.whl → 0.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (110) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/ccsds/excel_to_xtce.py +12 -0
  3. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -6
  4. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +35 -0
  5. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +35 -0
  6. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +24 -0
  7. imap_processing/cdf/config/imap_hi_variable_attrs.yaml +8 -8
  8. imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +1 -1
  9. imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +163 -100
  10. imap_processing/cdf/config/imap_hit_l2_variable_attrs.yaml +398 -415
  11. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +97 -54
  12. imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +9 -9
  13. imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +233 -57
  14. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +16 -90
  15. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +30 -0
  16. imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +15 -1
  17. imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +19 -0
  18. imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +20 -0
  19. imap_processing/cdf/config/imap_swe_l2_variable_attrs.yaml +39 -0
  20. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +168 -0
  21. imap_processing/cdf/config/imap_ultra_l1a_variable_attrs.yaml +103 -2
  22. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +91 -11
  23. imap_processing/cdf/utils.py +7 -1
  24. imap_processing/cli.py +42 -13
  25. imap_processing/codice/codice_l1a.py +125 -78
  26. imap_processing/codice/codice_l1b.py +1 -1
  27. imap_processing/codice/codice_l2.py +0 -9
  28. imap_processing/codice/constants.py +481 -498
  29. imap_processing/hi/hi_l1a.py +4 -4
  30. imap_processing/hi/hi_l1b.py +2 -2
  31. imap_processing/hi/packet_definitions/TLM_HI_COMBINED_SCI.xml +218 -38
  32. imap_processing/hit/hit_utils.py +2 -2
  33. imap_processing/hit/l0/decom_hit.py +4 -3
  34. imap_processing/hit/l1a/hit_l1a.py +64 -24
  35. imap_processing/hit/l1b/constants.py +5 -0
  36. imap_processing/hit/l1b/hit_l1b.py +18 -16
  37. imap_processing/hit/l2/constants.py +1 -1
  38. imap_processing/hit/l2/hit_l2.py +4 -4
  39. imap_processing/ialirt/constants.py +21 -0
  40. imap_processing/ialirt/generate_coverage.py +188 -0
  41. imap_processing/ialirt/l0/parse_mag.py +62 -5
  42. imap_processing/ialirt/l0/process_swapi.py +1 -1
  43. imap_processing/ialirt/l0/process_swe.py +23 -7
  44. imap_processing/ialirt/utils/constants.py +22 -16
  45. imap_processing/ialirt/utils/create_xarray.py +42 -19
  46. imap_processing/idex/idex_constants.py +8 -5
  47. imap_processing/idex/idex_l2b.py +554 -58
  48. imap_processing/idex/idex_l2c.py +30 -196
  49. imap_processing/lo/l0/lo_apid.py +1 -0
  50. imap_processing/lo/l0/lo_star_sensor.py +48 -0
  51. imap_processing/lo/l1a/lo_l1a.py +74 -30
  52. imap_processing/lo/packet_definitions/lo_xtce.xml +5359 -106
  53. imap_processing/mag/constants.py +1 -0
  54. imap_processing/mag/l0/decom_mag.py +9 -6
  55. imap_processing/mag/l0/mag_l0_data.py +46 -0
  56. imap_processing/mag/l1d/__init__.py +0 -0
  57. imap_processing/mag/l1d/mag_l1d.py +133 -0
  58. imap_processing/mag/l1d/mag_l1d_data.py +588 -0
  59. imap_processing/mag/l2/__init__.py +0 -0
  60. imap_processing/mag/l2/mag_l2.py +25 -20
  61. imap_processing/mag/l2/mag_l2_data.py +191 -130
  62. imap_processing/quality_flags.py +20 -2
  63. imap_processing/spice/geometry.py +25 -3
  64. imap_processing/spice/pointing_frame.py +1 -1
  65. imap_processing/spice/spin.py +4 -0
  66. imap_processing/spice/time.py +51 -0
  67. imap_processing/swapi/l1/swapi_l1.py +12 -2
  68. imap_processing/swapi/l2/swapi_l2.py +59 -14
  69. imap_processing/swapi/swapi_utils.py +1 -1
  70. imap_processing/swe/l1b/swe_l1b.py +11 -4
  71. imap_processing/swe/l2/swe_l2.py +111 -17
  72. imap_processing/ultra/constants.py +49 -1
  73. imap_processing/ultra/l0/decom_tools.py +28 -14
  74. imap_processing/ultra/l0/decom_ultra.py +225 -15
  75. imap_processing/ultra/l0/ultra_utils.py +281 -8
  76. imap_processing/ultra/l1a/ultra_l1a.py +77 -8
  77. imap_processing/ultra/l1b/cullingmask.py +3 -3
  78. imap_processing/ultra/l1b/de.py +53 -15
  79. imap_processing/ultra/l1b/extendedspin.py +26 -2
  80. imap_processing/ultra/l1b/lookup_utils.py +171 -50
  81. imap_processing/ultra/l1b/quality_flag_filters.py +14 -0
  82. imap_processing/ultra/l1b/ultra_l1b_culling.py +198 -5
  83. imap_processing/ultra/l1b/ultra_l1b_extended.py +304 -66
  84. imap_processing/ultra/l1c/helio_pset.py +54 -7
  85. imap_processing/ultra/l1c/spacecraft_pset.py +9 -1
  86. imap_processing/ultra/l1c/ultra_l1c.py +2 -0
  87. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +106 -109
  88. imap_processing/ultra/packet_definitions/ULTRA_SCI_COMBINED.xml +3 -3
  89. imap_processing/ultra/utils/ultra_l1_utils.py +13 -1
  90. imap_processing/utils.py +20 -42
  91. {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/METADATA +2 -2
  92. {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/RECORD +95 -103
  93. imap_processing/lo/l0/data_classes/star_sensor.py +0 -98
  94. imap_processing/lo/l0/utils/lo_base.py +0 -57
  95. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_LeftSlit.csv +0 -526
  96. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_RightSlit.csv +0 -526
  97. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_LeftSlit.csv +0 -526
  98. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +0 -524
  99. imap_processing/ultra/lookup_tables/EgyNorm.mem.csv +0 -32769
  100. imap_processing/ultra/lookup_tables/FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
  101. imap_processing/ultra/lookup_tables/FM90_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
  102. imap_processing/ultra/lookup_tables/dps_grid45_compressed.cdf +0 -0
  103. imap_processing/ultra/lookup_tables/ultra45_back-pos-luts.csv +0 -4097
  104. imap_processing/ultra/lookup_tables/ultra45_tdc_norm.csv +0 -2050
  105. imap_processing/ultra/lookup_tables/ultra90_back-pos-luts.csv +0 -4097
  106. imap_processing/ultra/lookup_tables/ultra90_tdc_norm.csv +0 -2050
  107. imap_processing/ultra/lookup_tables/yadjust.csv +0 -257
  108. {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/LICENSE +0 -0
  109. {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/WHEEL +0 -0
  110. {imap_processing-0.16.2.dist-info → imap_processing-0.18.0.dist-info}/entry_points.txt +0 -0
@@ -16,27 +16,73 @@ TIME_PER_BIN = 0.167 # seconds
16
16
 
17
17
 
18
18
  def solve_full_sweep_energy(
19
- esa_lvl5_data: np.ndarray, esa_table_df: pd.DataFrame, lut_notes_df: pd.DataFrame
19
+ esa_lvl5_data: np.ndarray,
20
+ sweep_table: np.ndarray,
21
+ esa_table_df: pd.DataFrame,
22
+ lut_notes_df: pd.DataFrame,
23
+ data_time: npt.NDArray[np.datetime64],
20
24
  ) -> npt.NDArray:
21
25
  """
22
26
  Calculate the energy of each full sweep data.
23
27
 
28
+ Get the fixed energy values for steps 0-62 using the
29
+ esa_table_df information. It's important to ensure
30
+ that the correct fixed energy values are selected for
31
+ the specified time, as the sweep table can contain
32
+ different values depending on the operational phase
33
+ (e.g., I+T, pre-launch, post-launch). There may be
34
+ more fixed energy added in the future. TODO: add
35
+ document section once SWAPI document is updated.
36
+
37
+ Now, find the last 9 fine energy values using steps
38
+ noted in the section x in the algorithm document.
39
+
24
40
  Parameters
25
41
  ----------
26
42
  esa_lvl5_data : numpy.ndarray
27
43
  The L1 data input.
44
+ sweep_table : numpy.ndarray
45
+ Sweep table information.
28
46
  esa_table_df : pandas.DataFrame
29
47
  The ESA unit conversion table that contains first 63 energies.
30
48
  lut_notes_df : pandas.DataFrame
31
49
  The LUT notes table that contains the last 9 fine energies.
50
+ data_time : numpy.ndarray
51
+ The collection time of the data.
32
52
 
33
53
  Returns
34
54
  -------
35
55
  energy : numpy.ndarray
36
56
  The energy of each full sweep data.
37
57
  """
38
- # Read 0 - 62 energy steps' fixed energy value
39
- fixed_energy_values = esa_table_df["Energy"].values[:63]
58
+ # Convert timestamp from string to datetime
59
+ # and to the same format as data_time
60
+ esa_table_df["timestamp"] = pd.to_datetime(
61
+ esa_table_df["timestamp"], format="%m/%d/%Y %H:%M"
62
+ )
63
+ esa_table_df["timestamp"] = esa_table_df["timestamp"].to_numpy(
64
+ dtype="datetime64[ns]"
65
+ )
66
+
67
+ first_63_energies = []
68
+
69
+ for time, sweep_id in zip(data_time, sweep_table):
70
+ # Find the sweep's ESA data for the given time and sweep_id
71
+ subset = esa_table_df[
72
+ (esa_table_df["timestamp"] <= time) & (esa_table_df["Sweep #"] == sweep_id)
73
+ ]
74
+ if subset.empty:
75
+ first_63_energies.append(np.full(63, np.nan, dtype=np.float64))
76
+ continue
77
+
78
+ # Subset data can contain multiple 72 energy values with last 9 fine energies
79
+ # with 'Solve' value. We need to sort by time and ESA step to maintain correct
80
+ # order. Then take the last group of 72 steps values and select first 63
81
+ # values only.
82
+ subset = subset.sort_values(["timestamp", "ESA Step #"])
83
+ grouped = subset["Energy"].values.reshape(-1, 72)
84
+ first_63 = grouped[-1, :63]
85
+ first_63_energies.append(first_63)
40
86
 
41
87
  # Find last 9 fine energy values of all sweeps data
42
88
  # -------------------------------------------------
@@ -96,13 +142,9 @@ def solve_full_sweep_energy(
96
142
  # order it should be in:
97
143
  # [64, 65, 66, 67, 68, 69, 70, 71, 72]
98
144
  energy_values = np.flip(energy_values, axis=1)
99
- # Expand to match the number of rows in energy_values
100
- first_63_values = np.tile(
101
- fixed_energy_values, (energy_values.shape[0], 1)
102
- ) # (epoch, 63)
103
145
 
104
146
  # Append the first_63_values in front of energy_values
105
- sweeps_energy_value = np.hstack((first_63_values, energy_values))
147
+ sweeps_energy_value = np.hstack([first_63_energies, energy_values])
106
148
 
107
149
  return sweeps_energy_value
108
150
 
@@ -149,15 +191,16 @@ def swapi_l2(
149
191
  # Copy over only certain variables from L1 to L2 dataset
150
192
  l1_data_keys = [
151
193
  "epoch",
194
+ "esa_lvl5",
152
195
  "esa_step",
153
196
  "esa_step_label",
154
- "swp_l1a_flags",
155
- "sweep_table",
156
- "plan_id",
157
- "lut_choice",
158
- "fpga_type",
159
197
  "fpga_rev",
160
- "esa_lvl5",
198
+ "fpga_type",
199
+ "lut_choice",
200
+ "plan_id",
201
+ "sci_start_time",
202
+ "sweep_table",
203
+ "swp_l1a_flags",
161
204
  ]
162
205
  l2_dataset = l1_dataset[l1_data_keys]
163
206
 
@@ -168,8 +211,10 @@ def swapi_l2(
168
211
  esa_lvl5_hex = np.vectorize(lambda x: format(x, "X"))(l1_dataset["esa_lvl5"].values)
169
212
  esa_energy = solve_full_sweep_energy(
170
213
  esa_lvl5_hex,
214
+ l1_dataset["sweep_table"].data,
171
215
  esa_table_df=esa_table_df,
172
216
  lut_notes_df=lut_notes_df,
217
+ data_time=np.array(l1_dataset["epoch"].data, dtype="datetime64[ns]"),
173
218
  )
174
219
 
175
220
  l2_dataset["swp_esa_energy"] = xr.DataArray(
@@ -51,7 +51,7 @@ def read_swapi_lut_table(file_path: Path) -> pd.DataFrame:
51
51
  .astype(str)
52
52
  .str.replace(",", "", regex=False)
53
53
  .replace("Solve", -1)
54
- .astype(np.int64)
54
+ .astype(np.float64)
55
55
  )
56
56
 
57
57
  return df
@@ -761,12 +761,14 @@ def swe_l1b_science(dependencies: ProcessingInputCollection) -> xr.Dataset:
761
761
 
762
762
  count_rate = convert_counts_to_rate(inflight_applied_count, acq_duration)
763
763
 
764
+ # Statistical uncertainty is sqrt(decompressed counts)
765
+ # TODO: Update this if SWE like to include deadtime correciton.
766
+ counts_stat_uncert = np.sqrt(populated_data["science_data"])
767
+
764
768
  # Store ESA energies of full cycle for L2 purposes.
765
769
  esa_energies = get_esa_energy_pattern(esa_lut_files[0])
766
- # Repeat energies to be in the same shape as the science data
767
- esa_energies = np.repeat(esa_energies, total_packets // 4).reshape(
768
- -1, swe_constants.N_ESA_STEPS, swe_constants.N_ANGLE_SECTORS
769
- )
770
+ # Repeat the (24, 30) energy pattern n_cycles times along a new first axis
771
+ esa_energies = np.repeat(esa_energies[np.newaxis, :, :], total_packets // 4, axis=0)
770
772
  # Convert voltage to electron energy in eV by apply conversion factor
771
773
  esa_energies = esa_energies * swe_constants.ENERGY_CONVERSION_FACTOR
772
774
  # ------------------------------------------------------------------
@@ -894,6 +896,11 @@ def swe_l1b_science(dependencies: ProcessingInputCollection) -> xr.Dataset:
894
896
  dims=["epoch", "esa_step", "spin_sector", "cem_id"],
895
897
  attrs=cdf_attrs.get_variable_attributes("science_data"),
896
898
  )
899
+ science_dataset["counts_stat_uncert"] = xr.DataArray(
900
+ counts_stat_uncert,
901
+ dims=["epoch", "esa_step", "spin_sector", "cem_id"],
902
+ attrs=cdf_attrs.get_variable_attributes("counts_stat_uncert"),
903
+ )
897
904
  science_dataset["acquisition_time"] = xr.DataArray(
898
905
  acq_time,
899
906
  dims=["epoch", "esa_step", "spin_sector"],
@@ -14,9 +14,11 @@ from imap_processing.spice.spin import get_instrument_spin_phase, get_spin_angle
14
14
  from imap_processing.swe.utils import swe_constants
15
15
 
16
16
 
17
- def calculate_phase_space_density(l1b_dataset: xr.Dataset) -> npt.NDArray:
17
+ def calculate_phase_space_density(
18
+ data: np.ndarray, particle_energy_data: np.ndarray
19
+ ) -> npt.NDArray:
18
20
  """
19
- Convert counts to phase space density.
21
+ Convert counts or uncertainty data to phase space density.
20
22
 
21
23
  Calculate phase space density is represented by this symbol, fv.
22
24
  Its unit is s^3/ (cm^6 * ster).
@@ -49,8 +51,11 @@ def calculate_phase_space_density(l1b_dataset: xr.Dataset) -> npt.NDArray:
49
51
 
50
52
  Parameters
51
53
  ----------
52
- l1b_dataset : xarray.Dataset
53
- The L1B dataset to process.
54
+ data : numpy.ndarray
55
+ The data to process. Two expected inputs are counts or uncertainty data.
56
+ particle_energy_data : numpy.ndarray
57
+ The energy values in eV. This is the energy values from the
58
+ "esa_energy" variable in the L1B dataset.
54
59
 
55
60
  Returns
56
61
  -------
@@ -58,18 +63,14 @@ def calculate_phase_space_density(l1b_dataset: xr.Dataset) -> npt.NDArray:
58
63
  Phase space density. We need to call this phase space density because
59
64
  there will be density in L3 processing.
60
65
  """
61
- # Get energy values.
62
- particle_energy_data = l1b_dataset["esa_energy"].values
63
-
64
66
  # Calculate phase space density using formula:
65
- # 2 * (C/tau) / (G * 1.237e31 * eV^2)
67
+ # 2 * ((C/tau) or uncertainty data) / (G * 1.237e31 * eV^2)
66
68
  # See doc string for more details.
67
- density = (2 * l1b_dataset["science_data"]) / (
69
+ phase_space_density = (2 * data) / (
68
70
  swe_constants.GEOMETRIC_FACTORS[np.newaxis, np.newaxis, np.newaxis, :]
69
71
  * swe_constants.VELOCITY_CONVERSION_FACTOR
70
72
  * particle_energy_data[:, :, :, np.newaxis] ** 2
71
73
  )
72
- phase_space_density = density.data
73
74
 
74
75
  return phase_space_density
75
76
 
@@ -114,7 +115,7 @@ def calculate_flux(
114
115
  Parameters
115
116
  ----------
116
117
  phase_space_density : numpy.ndarray
117
- The phase space density.
118
+ The phase space density of counts or uncertainty data.
118
119
  esa_energy : numpy.ndarray
119
120
  The energy values in eV.
120
121
 
@@ -131,6 +132,70 @@ def calculate_flux(
131
132
  return flux
132
133
 
133
134
 
135
+ def put_uncertainty_into_angle_bins(
136
+ data: np.ndarray, angle_bin_indices: npt.NDArray[np.int_]
137
+ ) -> npt.NDArray:
138
+ """
139
+ Put uncertainty data in its angle bins.
140
+
141
+ This function bins uncertainty data into 30 predefined angle bins
142
+ while preserving the original energy step structure.
143
+
144
+ Since multiple data points can fall into the same angle bin,
145
+ this function computes the combined uncertainty for the bin.
146
+
147
+ Parameters
148
+ ----------
149
+ data : numpy.ndarray
150
+ Uncertainty data to put in bins. Shape:
151
+ (full_cycle_data, N_ESA_STEPS, N_ANGLE_BINS, N_CEMS).
152
+ angle_bin_indices : numpy.ndarray
153
+ Indices of angle bins to put data in. Shape:
154
+ (full_cycle_data, N_ESA_STEPS, N_ANGLE_BINS).
155
+
156
+ Returns
157
+ -------
158
+ numpy.ndarray
159
+ Data in bins. Shape:
160
+ (full_cycle_data, N_ESA_STEPS, N_ANGLE_BINS, N_CEMS).
161
+ """
162
+ # Initialize with zeros instead of NaN because np.add.at() does not
163
+ # work with nan values. It results in nan + value = nan
164
+ binned_data = np.zeros(
165
+ (
166
+ data.shape[0],
167
+ swe_constants.N_ESA_STEPS,
168
+ swe_constants.N_ANGLE_BINS,
169
+ swe_constants.N_CEMS,
170
+ ),
171
+ dtype=np.float64,
172
+ )
173
+
174
+ time_indices = np.arange(data.shape[0])[:, None, None]
175
+ energy_indices = np.arange(swe_constants.N_ESA_STEPS)[None, :, None]
176
+
177
+ # Calculate new uncertainty of each uncertainty data in the bins.
178
+ # Per SWE instruction:
179
+ # At L1B, 'data' is result from sqrt(counts). Now in L2, average
180
+ # uncertainty data using this formula:
181
+ # sqrt(
182
+ # sum(
183
+ # (unc_1) ** 2 + (unc_2) ** 2 + ... + (unc_n) ** 2
184
+ # )
185
+ # )
186
+ # TODO: SWE want to add more defined formula based on spin data and
187
+ # counts uncertainty from it in the future.
188
+
189
+ # Use np.add.at() to put values into bins and add values in the bins into one.
190
+ # Here, we are applying power of 2 to each data point before summing them.
191
+ np.add.at(
192
+ binned_data,
193
+ (time_indices, energy_indices, angle_bin_indices),
194
+ data**2,
195
+ )
196
+ return np.sqrt(binned_data)
197
+
198
+
134
199
  def put_data_into_angle_bins(
135
200
  data: np.ndarray, angle_bin_indices: npt.NDArray[np.int_]
136
201
  ) -> npt.NDArray:
@@ -142,10 +207,8 @@ def put_data_into_angle_bins(
142
207
  full cycle, it assigns data to the corresponding angle bin
143
208
  based on the provided indices.
144
209
 
145
- Since multiple data points may fall into the same angle bin,
146
- the function accumulates values and computes the average across
147
- all 7 CEMs, ensuring that each bin contains a representative
148
- mean value while maintaining the 7 CEM structure.
210
+ Since multiple data points can fall into the same angle bin,
211
+ this function computes the combined averages.
149
212
 
150
213
  Parameters
151
214
  ----------
@@ -177,7 +240,7 @@ def put_data_into_angle_bins(
177
240
  time_indices = np.arange(data.shape[0])[:, None, None]
178
241
  energy_indices = np.arange(swe_constants.N_ESA_STEPS)[None, :, None]
179
242
 
180
- # Use np.add.at() to accumulate values into bins
243
+ # Use np.add.at() to put values into bins and add values in the bins into one.
181
244
  np.add.at(binned_data, (time_indices, energy_indices, angle_bin_indices), data)
182
245
 
183
246
  # Count occurrences in each bin to compute the mean.
@@ -343,7 +406,9 @@ def swe_l2(l1b_dataset: xr.Dataset) -> xr.Dataset:
343
406
  # Calculate phase space density and flux. Store data in shape
344
407
  # (epoch, esa_step, spin_sector, cem_id). This is for L3 purposes.
345
408
  ############################################################
346
- phase_space_density = calculate_phase_space_density(l1b_dataset)
409
+ phase_space_density = calculate_phase_space_density(
410
+ l1b_dataset["science_data"].data, l1b_dataset["esa_energy"].data
411
+ )
347
412
  dataset["phase_space_density_spin_sector"] = xr.DataArray(
348
413
  phase_space_density,
349
414
  name="phase_space_density_spin_sector",
@@ -419,4 +484,33 @@ def swe_l2(l1b_dataset: xr.Dataset) -> xr.Dataset:
419
484
  attrs=cdf_attributes.get_variable_attributes("phase_space_density"),
420
485
  )
421
486
 
487
+ #######################################################
488
+ # Calculate flux and phase space density of uncertainty data.
489
+ # Put uncertainty data in its angle bins.
490
+ #######################################################
491
+ # Calculate phase space density for uncertainty data.
492
+ phase_space_density_uncert = calculate_phase_space_density(
493
+ l1b_dataset["counts_stat_uncert"].data, l1b_dataset["esa_energy"].data
494
+ )
495
+ # Put uncertainty data into its spin angle bins and calculate new uncertainty
496
+ phase_space_density_uncert = put_uncertainty_into_angle_bins(
497
+ phase_space_density_uncert, spin_angle_bins_indices
498
+ )
499
+ dataset["psd_stat_uncert"] = xr.DataArray(
500
+ phase_space_density_uncert,
501
+ name="psd_stat_uncert",
502
+ dims=["epoch", "esa_step", "spin_sector", "cem_id"],
503
+ attrs=cdf_attributes.get_variable_attributes("psd_stat_uncert"),
504
+ )
505
+ # Calculate flux for uncertainty data.
506
+ flux_uncert = calculate_flux(
507
+ phase_space_density_uncert, l1b_dataset["esa_energy"].data
508
+ )
509
+ flux_uncert = put_uncertainty_into_angle_bins(flux_uncert, spin_angle_bins_indices)
510
+ dataset["flux_stat_uncert"] = xr.DataArray(
511
+ flux_uncert,
512
+ name="flux_stat_uncert",
513
+ dims=["epoch", "esa_step", "spin_sector", "cem_id"],
514
+ attrs=cdf_attributes.get_variable_attributes("flux_stat_uncert"),
515
+ )
422
516
  return dataset
@@ -80,4 +80,52 @@ class UltraConstants:
80
80
  CULLING_RPM_MAX = 6.0
81
81
 
82
82
  # Thresholds for culling based on counts.
83
- CULLING_ENERGY_BIN_EDGES: ClassVar[list] = [0, 10, 20, 1e5]
83
+ CULLING_ENERGY_BIN_EDGES: ClassVar[list] = [
84
+ 3.385,
85
+ 4.13722222222222,
86
+ 4.13722222222222,
87
+ 5.05660493827161,
88
+ 5.05660493827161,
89
+ 6.18029492455419,
90
+ 6.18029492455419,
91
+ 7.55369379667734,
92
+ 7.55369379667734,
93
+ 9.23229241816119,
94
+ 9.23229241816119,
95
+ 11.2839129555303,
96
+ 11.2839129555303,
97
+ 13.7914491678704,
98
+ 13.7914491678704,
99
+ 16.8562156496194,
100
+ 16.8562156496194,
101
+ 20.6020413495348,
102
+ 20.6020413495348,
103
+ 25.1802727605426,
104
+ 25.1802727605426,
105
+ 30.775888929552,
106
+ 30.775888929552,
107
+ 37.6149753583414,
108
+ 37.6149753583414,
109
+ 45.9738587713061,
110
+ 45.9738587713061,
111
+ 56.1902718315964,
112
+ 56.1902718315964,
113
+ 68.6769989052845,
114
+ 68.6769989052845,
115
+ 83.93855421757,
116
+ 83.93855421757,
117
+ 102.591566265919,
118
+ 102.591566265919,
119
+ 125.38969210279,
120
+ 125.38969210279,
121
+ 153.254068125632,
122
+ 153.254068125632,
123
+ 187.310527709106,
124
+ 187.310527709106,
125
+ 228.93508942224,
126
+ 228.93508942224,
127
+ 279.809553738294,
128
+ 279.809553738294,
129
+ 341.989454569026,
130
+ 1e5,
131
+ ]
@@ -4,6 +4,7 @@ import numpy as np
4
4
  from numpy.typing import NDArray
5
5
 
6
6
  from imap_processing.ultra.l0.ultra_utils import (
7
+ PacketProperties,
7
8
  parse_event,
8
9
  )
9
10
  from imap_processing.utils import convert_to_binary_string
@@ -53,15 +54,15 @@ def log_decompression(value: int, mantissa_bit_length: int) -> int:
53
54
  """
54
55
  Perform logarithmic decompression on an integer.
55
56
 
56
- Supports both 16-bit and 8-bit formats based on the specified
57
+ Supports 16-bit, 10-bit, and 8-bit formats based on the specified
57
58
  mantissa bit length.
58
59
 
59
60
  Parameters
60
61
  ----------
61
62
  value : int
62
- An integer comprised of a 4-bit exponent followed by a variable-length mantissa.
63
+ An integer comprised of an exponent followed by a mantissa.
63
64
  mantissa_bit_length : int
64
- The bit length of the mantissa (default is 12 for 16-bit format).
65
+ The bit length of the mantissa.
65
66
 
66
67
  Returns
67
68
  -------
@@ -72,6 +73,9 @@ def log_decompression(value: int, mantissa_bit_length: int) -> int:
72
73
  if mantissa_bit_length == 12:
73
74
  base_value = 4096
74
75
  mantissa_mask = 0xFFF
76
+ elif mantissa_bit_length == 5:
77
+ base_value = 32
78
+ mantissa_mask = 0x1F
75
79
  elif mantissa_bit_length == 4:
76
80
  base_value = 16
77
81
  mantissa_mask = 0x0F
@@ -152,8 +156,7 @@ def decompress_binary(
152
156
  def decompress_image(
153
157
  pixel0: int,
154
158
  binary_data: str,
155
- width_bit: int,
156
- mantissa_bit_length: int,
159
+ packet_props: PacketProperties,
157
160
  ) -> NDArray:
158
161
  """
159
162
  Will decompress a binary string representing an image into a matrix of pixel values.
@@ -168,10 +171,9 @@ def decompress_image(
168
171
  The first, unmodified pixel p0,0.
169
172
  binary_data : str
170
173
  Binary string.
171
- width_bit : int
172
- The bit width that describes the width of data in the block.
173
- mantissa_bit_length : int
174
- The bit length of the mantissa.
174
+ packet_props : PacketProperties
175
+ Properties of the packet, including width bit, mantissa bit length and pixel
176
+ window dimensions.
175
177
 
176
178
  Returns
177
179
  -------
@@ -184,10 +186,18 @@ def decompress_image(
184
186
  This process is described starting on page 168 in IMAP-Ultra Flight
185
187
  Software Specification document.
186
188
  """
187
- rows = 54
188
- cols = 180
189
+ rows = packet_props.pixel_window_rows
190
+ cols = packet_props.pixel_window_columns
191
+ width_bit = packet_props.width
192
+ mantissa_bit_length = packet_props.mantissa_bit_length
189
193
  pixels_per_block = 15
190
194
 
195
+ if width_bit is None or rows is None or cols is None or mantissa_bit_length is None:
196
+ raise ValueError(
197
+ "Packet properties must specify pixel window dimensions, "
198
+ "width bit, and mantissa bit length for this packet type."
199
+ )
200
+
191
201
  blocks_per_row = cols // pixels_per_block
192
202
 
193
203
  # Compressed pixel matrix
@@ -239,6 +249,7 @@ def decompress_image(
239
249
  def read_image_raw_events_binary(
240
250
  event_data: bytes,
241
251
  count: int,
252
+ field_ranges: dict,
242
253
  ) -> NDArray:
243
254
  """
244
255
  Convert contents of binary string 'EVENTDATA' into values.
@@ -249,6 +260,8 @@ def read_image_raw_events_binary(
249
260
  Event data.
250
261
  count : int
251
262
  Number of events.
263
+ field_ranges : dict
264
+ Field ranges for the event data.
252
265
 
253
266
  Returns
254
267
  -------
@@ -256,15 +269,16 @@ def read_image_raw_events_binary(
256
269
  Event data.
257
270
  """
258
271
  binary = convert_to_binary_string(event_data)
259
- # 166 bits per event
260
- event_length = 166 if count else 0
272
+ length = max(end for (_, end) in field_ranges.values())
273
+ # bits per event
274
+ event_length = length if count else 0
261
275
  event_data_list = []
262
276
 
263
277
  # For all packets with event data, parses the binary string
264
278
  for i in range(count):
265
279
  start_index = i * event_length
266
280
  event_binary = binary[start_index : start_index + event_length]
267
- parsed_event = parse_event(event_binary)
281
+ parsed_event = parse_event(event_binary, field_ranges)
268
282
  event_data_list.append(parsed_event)
269
283
 
270
284
  return np.array(event_data_list)