imap-processing 0.16.1__py3-none-any.whl → 0.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (46) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +24 -0
  3. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +24 -0
  4. imap_processing/cdf/config/imap_hi_variable_attrs.yaml +8 -8
  5. imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +1 -1
  6. imap_processing/cdf/config/imap_hit_l2_variable_attrs.yaml +394 -411
  7. imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +9 -9
  8. imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +150 -57
  9. imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +19 -0
  10. imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +20 -0
  11. imap_processing/cdf/config/imap_swe_l2_variable_attrs.yaml +39 -0
  12. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +108 -0
  13. imap_processing/cdf/config/imap_ultra_l1a_variable_attrs.yaml +103 -2
  14. imap_processing/cdf/utils.py +7 -1
  15. imap_processing/cli.py +14 -8
  16. imap_processing/codice/codice_l1a.py +89 -30
  17. imap_processing/hi/hi_l1a.py +4 -4
  18. imap_processing/hi/hi_l1b.py +2 -2
  19. imap_processing/hi/packet_definitions/TLM_HI_COMBINED_SCI.xml +218 -38
  20. imap_processing/hit/hit_utils.py +2 -2
  21. imap_processing/hit/l0/decom_hit.py +2 -1
  22. imap_processing/hit/l2/hit_l2.py +2 -1
  23. imap_processing/ialirt/l0/process_codice.py +4 -34
  24. imap_processing/idex/idex_constants.py +7 -0
  25. imap_processing/idex/idex_l2b.py +372 -55
  26. imap_processing/lo/l0/lo_star_sensor.py +48 -0
  27. imap_processing/lo/l1a/lo_l1a.py +32 -32
  28. imap_processing/mag/l0/decom_mag.py +9 -6
  29. imap_processing/mag/l0/mag_l0_data.py +46 -0
  30. imap_processing/swapi/l1/swapi_l1.py +12 -2
  31. imap_processing/swapi/l2/swapi_l2.py +7 -6
  32. imap_processing/swe/l1b/swe_l1b.py +9 -0
  33. imap_processing/swe/l2/swe_l2.py +111 -17
  34. imap_processing/ultra/l0/decom_tools.py +13 -6
  35. imap_processing/ultra/l0/decom_ultra.py +190 -4
  36. imap_processing/ultra/l0/ultra_utils.py +184 -3
  37. imap_processing/ultra/l1a/ultra_l1a.py +52 -4
  38. imap_processing/ultra/packet_definitions/ULTRA_SCI_COMBINED.xml +3 -3
  39. imap_processing/utils.py +20 -42
  40. {imap_processing-0.16.1.dist-info → imap_processing-0.17.0.dist-info}/METADATA +1 -1
  41. {imap_processing-0.16.1.dist-info → imap_processing-0.17.0.dist-info}/RECORD +44 -45
  42. imap_processing/lo/l0/data_classes/star_sensor.py +0 -98
  43. imap_processing/lo/l0/utils/lo_base.py +0 -57
  44. {imap_processing-0.16.1.dist-info → imap_processing-0.17.0.dist-info}/LICENSE +0 -0
  45. {imap_processing-0.16.1.dist-info → imap_processing-0.17.0.dist-info}/WHEEL +0 -0
  46. {imap_processing-0.16.1.dist-info → imap_processing-0.17.0.dist-info}/entry_points.txt +0 -0
@@ -368,8 +368,8 @@ def add_energy_variables(
368
368
  """
369
369
  updated_ds = dataset.copy()
370
370
 
371
- energy_mean = np.mean(
372
- np.array([energy_min_values, energy_max_values]), axis=0
371
+ energy_mean = np.round(
372
+ np.mean(np.array([energy_min_values, energy_max_values]), axis=0), 3
373
373
  ).astype(np.float32)
374
374
 
375
375
  updated_ds[f"{particle}_energy_mean"] = xr.DataArray(
@@ -116,8 +116,9 @@ def parse_count_rates(sci_dataset: xr.Dataset) -> None:
116
116
  else:
117
117
  dims = ["epoch"]
118
118
 
119
+ dtype = np.uint8 if field == "hdr_dynamic_threshold_state" else np.int64
119
120
  sci_dataset[field] = xr.DataArray(
120
- np.array(parsed_data, dtype=np.int64), dims=dims, name=field
121
+ np.array(parsed_data, dtype=dtype), dims=dims, name=field
121
122
  )
122
123
  # Add dimensions to coordinates
123
124
  for dim in dims:
@@ -137,7 +137,7 @@ def add_cdf_attributes(
137
137
  label_array = xr.DataArray(
138
138
  dataset[dim].values.astype(str),
139
139
  name=f"{dim}_label",
140
- dims=[dim],
140
+ dims=[f"{dim}_label"],
141
141
  attrs=attr_mgr.get_variable_attributes(
142
142
  f"{dim}_label", check_schema=False
143
143
  ),
@@ -312,6 +312,7 @@ def calculate_intensities_for_a_species(
312
312
  The updated dataset with intensities calculated for the given species.
313
313
  """
314
314
  updated_ds = l2_dataset.copy()
315
+ # Get the dynamic threshold state for the species variable
315
316
  dynamic_threshold_states = updated_ds["dynamic_threshold_state"].values
316
317
  unique_states = np.unique(dynamic_threshold_states)
317
318
  species_name = (
@@ -6,10 +6,6 @@ from typing import Any
6
6
 
7
7
  import xarray as xr
8
8
 
9
- from imap_processing.codice import constants
10
- from imap_processing.ialirt.utils.time import calculate_time
11
- from imap_processing.spice.time import met_to_ttj2000ns, met_to_utc
12
-
13
9
  logger = logging.getLogger(__name__)
14
10
 
15
11
  FILLVAL_FLOAT32 = Decimal(str(-1.0e31))
@@ -28,7 +24,7 @@ def process_codice(
28
24
 
29
25
  Returns
30
26
  -------
31
- codice_data : list[dict]
27
+ codice_data : tuple[list[dict[str, Any]], list[dict[str, Any]]]:
32
28
  Dictionary of final data product.
33
29
 
34
30
  Notes
@@ -58,34 +54,8 @@ def process_codice(
58
54
  # Create mock dataset for I-ALiRT SIT
59
55
  # TODO: Once I-ALiRT test data is acquired that actually has data in it,
60
56
  # we should be able to properly populate the I-ALiRT data, but for
61
- # now, just create lists of dicts with FILLVALs
62
- cod_lo_data = []
63
- cod_hi_data = []
64
-
65
- for epoch in range(len(dataset.epoch.data)):
66
- sc_sclk_sec = dataset.sc_sclk_sec.data[epoch]
67
- sc_sclk_sub_sec = dataset.sc_sclk_sub_sec.data[epoch]
68
- met = calculate_time(sc_sclk_sec, sc_sclk_sub_sec, 256)
69
- utc = met_to_utc(met).split(".")[0]
70
- ttj2000ns = int(met_to_ttj2000ns(met))
71
-
72
- epoch_data = {
73
- "apid": int(dataset.pkt_apid[epoch].data),
74
- "met": int(met),
75
- "met_to_utc": utc,
76
- "ttj2000ns": ttj2000ns,
77
- }
78
-
79
- # Add in CoDICE-Lo specific data
80
- cod_lo_epoch_data = epoch_data.copy()
81
- for field in constants.CODICE_LO_IAL_DATA_FIELDS:
82
- cod_lo_epoch_data[f"codicelo_{field}"] = []
83
- cod_lo_data.append(cod_lo_epoch_data)
84
-
85
- # Add in CoDICE-Hi specific data
86
- cod_hi_epoch_data = epoch_data.copy()
87
- for field in constants.CODICE_HI_IAL_DATA_FIELDS:
88
- cod_hi_epoch_data[f"codicehi_{field}"] = []
89
- cod_hi_data.append(cod_hi_epoch_data)
57
+ # now, just create lists of dicts.
58
+ cod_lo_data: list[dict[str, Any]] = []
59
+ cod_hi_data: list[dict[str, Any]] = []
90
60
 
91
61
  return cod_lo_data, cod_hi_data
@@ -46,6 +46,13 @@ NS_TO_S = 1e-9
46
46
  # Microseconds to seconds conversion
47
47
  US_TO_S = 1e-6
48
48
 
49
+ # Seconds in a day
50
+ SECONDS_IN_DAY = 86400
51
+ # Nanoseconds in day
52
+ NANOSECONDS_IN_DAY = SECONDS_IN_DAY * int(1e9)
53
+ # fg to kg conversion factor
54
+ FG_TO_KG = 1e-15
55
+
49
56
  TARGET_HIGH_FREQUENCY_CUTOFF = 100
50
57
 
51
58
  TARGET_NOISE_FREQUENCY = 7000
@@ -21,26 +21,67 @@ Examples
21
21
  write_cdf(l2b_data)
22
22
  """
23
23
 
24
+ import collections
24
25
  import logging
26
+ from collections import defaultdict
27
+ from datetime import datetime, timedelta
25
28
 
26
29
  import numpy as np
27
30
  import xarray as xr
28
31
 
29
- from imap_processing.idex.idex_constants import IDEXEvtAcquireCodes
30
- from imap_processing.idex.idex_utils import get_idex_attrs, setup_dataset
31
- from imap_processing.spice.time import epoch_to_doy
32
+ from imap_processing.idex.idex_constants import (
33
+ FG_TO_KG,
34
+ SECONDS_IN_DAY,
35
+ IDEXEvtAcquireCodes,
36
+ )
37
+ from imap_processing.idex.idex_utils import get_idex_attrs
38
+ from imap_processing.spice.time import epoch_to_doy, et_to_datetime64, ttj2000ns_to_et
32
39
 
33
40
  logger = logging.getLogger(__name__)
41
+ # Bin edges
42
+ MASS_BIN_EDGES = np.array(
43
+ [
44
+ 6.31e-17,
45
+ 1.00e-16,
46
+ 1.58e-16,
47
+ 2.51e-16,
48
+ 3.98e-16,
49
+ 6.31e-16,
50
+ 1.00e-15,
51
+ 1.58e-15,
52
+ 2.51e-15,
53
+ 3.98e-15,
54
+ 1.00e-14,
55
+ ]
56
+ )
57
+ CHARGE_BIN_EDGES = np.array(
58
+ [
59
+ 1.00e-01,
60
+ 3.16e-01,
61
+ 1.00e00,
62
+ 3.16e00,
63
+ 1.00e01,
64
+ 3.16e01,
65
+ 1.00e02,
66
+ 3.16e02,
67
+ 1.00e03,
68
+ 3.16e03,
69
+ 1.00e04,
70
+ ]
71
+ )
72
+ SPIN_PHASE_BIN_EDGES = np.array([0, 90, 180, 270, 360])
34
73
 
35
74
 
36
- def idex_l2b(l2a_dataset: xr.Dataset, evt_datasets: list[xr.Dataset]) -> xr.Dataset:
75
+ def idex_l2b(
76
+ l2a_datasets: list[xr.Dataset], evt_datasets: list[xr.Dataset]
77
+ ) -> xr.Dataset:
37
78
  """
38
79
  Will process IDEX l2a data to create l2b data products.
39
80
 
40
81
  Parameters
41
82
  ----------
42
- l2a_dataset : xarray.Dataset
43
- IDEX L2a dataset to process.
83
+ l2a_datasets : list[xarray.Dataset]
84
+ IDEX L2a datasets to process.
44
85
  evt_datasets : list[xarray.Dataset]
45
86
  List of IDEX housekeeping event message datasets.
46
87
 
@@ -50,63 +91,268 @@ def idex_l2b(l2a_dataset: xr.Dataset, evt_datasets: list[xr.Dataset]) -> xr.Data
50
91
  The``xarray`` dataset containing the science data and supporting metadata.
51
92
  """
52
93
  logger.info(
53
- f"Running IDEX L2B processing on dataset: {l2a_dataset.attrs['Logical_source']}"
94
+ f"Running IDEX L2B processing on datasets: "
95
+ f"{[ds.attrs['Logical_source'] for ds in l2a_datasets]}"
54
96
  )
55
97
 
56
98
  # create the attribute manager for this data level
57
99
  idex_attrs = get_idex_attrs("l2b")
58
-
59
100
  evt_dataset = xr.concat(evt_datasets, dim="epoch")
60
101
 
102
+ # Concat all the l2a datasets together
103
+ l2a_dataset = xr.concat(l2a_datasets, dim="epoch")
104
+ epoch_doy_unique = np.unique(epoch_to_doy(l2a_dataset["epoch"].data))
105
+ counts_by_charge, counts_by_mass, daily_epoch = compute_counts_by_charge_and_mass(
106
+ l2a_dataset, epoch_doy_unique
107
+ )
108
+ # Get science acquisition percentage for each day
109
+ daily_on_percentage = get_science_acquisition_on_percentage(evt_dataset)
110
+ rate_by_charge, rate_by_mass, rate_quality_flags = compute_rates_by_charge_and_mass(
111
+ counts_by_charge, counts_by_mass, epoch_doy_unique, daily_on_percentage
112
+ )
61
113
  # Create l2b Dataset
62
- prefixes = ["latitude", "longitude", "_dust_mass_estimate", "_impact_charge"]
63
- l2b_dataset = setup_dataset(l2a_dataset, prefixes, idex_attrs)
64
- l2b_dataset.attrs = idex_attrs.get_global_attributes("imap_idex_l2b_sci")
65
-
66
- # Get science acquisition start and stop times from event dataset
67
- evt_logs, evt_time, evt_values = get_science_acquisition_timestamps(evt_dataset)
68
- l2b_dataset["science_acquisition_messages"] = xr.DataArray(
69
- name="science_acquisition_messages",
70
- data=evt_logs.astype(str),
71
- dims="epoch_science_acquisition",
72
- attrs=idex_attrs.get_variable_attributes("science_acquisition_messages"),
114
+ charge_bins = np.arange(len(CHARGE_BIN_EDGES))
115
+ mass_bins = np.arange(len(CHARGE_BIN_EDGES))
116
+ spin_phase_bins = np.arange(len(SPIN_PHASE_BIN_EDGES) - 1)
117
+ epoch = xr.DataArray(
118
+ name="epoch",
119
+ data=daily_epoch,
120
+ dims="epoch",
121
+ attrs=idex_attrs.get_variable_attributes("epoch", check_schema=False),
73
122
  )
74
- l2b_dataset["epoch_science_acquisition"] = xr.DataArray(
75
- name="epoch_science_acquisition",
76
- data=evt_time,
77
- dims="epoch_science_acquisition",
78
- attrs=idex_attrs.get_variable_attributes(
79
- "epoch_science_acquisition", check_schema=False
123
+ vars = {
124
+ "impact_day_of_year": xr.DataArray(
125
+ name="impact_day_of_year",
126
+ data=epoch_doy_unique,
127
+ dims="epoch",
128
+ attrs=idex_attrs.get_variable_attributes("impact_day_of_year"),
129
+ ),
130
+ "rate_calculation_quality_flags": xr.DataArray(
131
+ name="rate_calculation_quality_flags",
132
+ data=rate_quality_flags,
133
+ dims="epoch",
134
+ attrs=idex_attrs.get_variable_attributes("rate_calculation_quality_flags"),
135
+ ),
136
+ "charge_labels": xr.DataArray(
137
+ name="impact_charge_labels",
138
+ data=charge_bins.astype(str),
139
+ dims="impact_charge_bins",
140
+ attrs=idex_attrs.get_variable_attributes(
141
+ "charge_labels", check_schema=False
142
+ ),
143
+ ),
144
+ "spin_phase_labels": xr.DataArray(
145
+ name="spin_phase_labels",
146
+ data=spin_phase_bins.astype(str),
147
+ dims="spin_phase_bins",
148
+ attrs=idex_attrs.get_variable_attributes(
149
+ "spin_phase_labels", check_schema=False
150
+ ),
80
151
  ),
152
+ "mass_labels": xr.DataArray(
153
+ name="mass_labels",
154
+ data=mass_bins.astype(str),
155
+ dims="mass_bins",
156
+ attrs=idex_attrs.get_variable_attributes("mass_labels", check_schema=False),
157
+ ),
158
+ "impact_charge_bins": xr.DataArray(
159
+ name="impact_charge_bins",
160
+ data=charge_bins,
161
+ dims="impact_charge_bins",
162
+ attrs=idex_attrs.get_variable_attributes(
163
+ "impact_charge_bins", check_schema=False
164
+ ),
165
+ ),
166
+ "mass_bins": xr.DataArray(
167
+ name="mass_bins",
168
+ data=mass_bins,
169
+ dims="mass_bins",
170
+ attrs=idex_attrs.get_variable_attributes("mass_bins", check_schema=False),
171
+ ),
172
+ "spin_phase_bins": xr.DataArray(
173
+ name="spin_phase_bins",
174
+ data=spin_phase_bins,
175
+ dims="spin_phase_bins",
176
+ attrs=idex_attrs.get_variable_attributes(
177
+ "spin_phase_bins", check_schema=False
178
+ ),
179
+ ),
180
+ "counts_by_charge": xr.DataArray(
181
+ name="counts_by_charge",
182
+ data=counts_by_charge.astype(np.int64),
183
+ dims=("epoch", "charge_bins", "spin_phase_bins"),
184
+ attrs=idex_attrs.get_variable_attributes("counts_by_charge"),
185
+ ),
186
+ "counts_by_mass": xr.DataArray(
187
+ name="counts_by_mass",
188
+ data=counts_by_mass.astype(np.int64),
189
+ dims=("epoch", "mass_bins", "spin_phase_bins"),
190
+ attrs=idex_attrs.get_variable_attributes("counts_by_mass"),
191
+ ),
192
+ "rate_by_charge": xr.DataArray(
193
+ name="rate_by_charge",
194
+ data=rate_by_charge,
195
+ dims=("epoch", "charge_bins", "spin_phase_bins"),
196
+ attrs=idex_attrs.get_variable_attributes("rate_by_charge"),
197
+ ),
198
+ "rate_by_mass": xr.DataArray(
199
+ name="rate_by_mass",
200
+ data=rate_by_mass,
201
+ dims=("epoch", "mass_bins", "spin_phase_bins"),
202
+ attrs=idex_attrs.get_variable_attributes("rate_by_mass"),
203
+ ),
204
+ }
205
+ l2b_dataset = xr.Dataset(
206
+ coords={"epoch": epoch},
207
+ data_vars=vars,
208
+ attrs=idex_attrs.get_global_attributes("imap_idex_l2b_sci"),
81
209
  )
82
- l2b_dataset["science_acquisition_values"] = xr.DataArray(
83
- name="science_acquisition_values",
84
- data=evt_values,
85
- dims="epoch_science_acquisition",
86
- attrs=idex_attrs.get_variable_attributes("science_acquisition_values"),
210
+ # Copy longitude and latitude from the l2a dataset
211
+ l2b_dataset["longitude"] = l2a_dataset["longitude"].copy()
212
+ l2b_dataset["latitude"] = l2a_dataset["latitude"].copy()
213
+
214
+ logger.info("IDEX L2B science data processing completed.")
215
+
216
+ return l2b_dataset
217
+
218
+
219
+ def compute_counts_by_charge_and_mass(
220
+ l2a_dataset: xr.Dataset, epoch_doy_unique: np.ndarray
221
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
222
+ """
223
+ Compute the dust event counts by charge and mass by spin phase per day.
224
+
225
+ Parameters
226
+ ----------
227
+ l2a_dataset : xarray.Dataset
228
+ Combined IDEX L2a datasets.
229
+ epoch_doy_unique : np.ndarray
230
+ Unique days of year corresponding to the epochs in the dataset.
231
+
232
+ Returns
233
+ -------
234
+ tuple[np.ndarray, np.ndarray, np.ndarray]
235
+ Two 3D arrays containing counts by charge or mass, and by spin phase for each
236
+ dataset, and a 1D array of daily epoch values.
237
+ """
238
+ # Initialize arrays to hold counts.
239
+ # There should be 4 spin phase bins, 11 charge bins, and 11 mass bins.
240
+ # The first bin for charge and mass is for values below the first bin edge.
241
+ counts_by_charge = np.zeros(
242
+ (len(epoch_doy_unique), len(CHARGE_BIN_EDGES), len(SPIN_PHASE_BIN_EDGES) - 1),
87
243
  )
88
- spin_phase_quadrants = round_spin_phases(l2a_dataset["spin_phase"])
89
- spin_phase_quadrants.attrs.update(
90
- idex_attrs.get_variable_attributes("spin_phase_quadrants")
244
+ counts_by_mass = np.zeros(
245
+ (len(epoch_doy_unique), len(MASS_BIN_EDGES), len(SPIN_PHASE_BIN_EDGES) - 1),
91
246
  )
92
- l2b_dataset["spin_phase_quadrants"] = spin_phase_quadrants
247
+ daily_epoch = np.zeros(len(epoch_doy_unique))
248
+ for i in range(len(epoch_doy_unique)):
249
+ doy = epoch_doy_unique[i]
250
+ # Get the indices for the current day
251
+ current_day_indices = np.where(epoch_to_doy(l2a_dataset["epoch"].data) == doy)[
252
+ 0
253
+ ]
254
+ # Set the epoch for the current day to be the mean epoch of the day.
255
+ daily_epoch[i] = np.mean(l2a_dataset["epoch"].data[current_day_indices])
256
+ mass_vals = l2a_dataset["target_low_dust_mass_estimate"].data[
257
+ current_day_indices
258
+ ]
259
+ charge_vals = l2a_dataset["target_low_impact_charge"].data[current_day_indices]
260
+ spin_phase_angles = l2a_dataset["spin_phase"].data[current_day_indices]
261
+ # Convert units
262
+ mass_vals = FG_TO_KG * np.array(mass_vals)
263
+ # Bin masses
264
+ binned_mass = np.array(np.digitize(mass_vals, bins=MASS_BIN_EDGES))
265
+ # Bin charges
266
+ binned_charge = np.array(np.digitize(charge_vals, bins=CHARGE_BIN_EDGES))
267
+ # Bin spin phases
268
+ binned_spin_phase = bin_spin_phases(spin_phase_angles)
269
+ # If the values in the array are beyond the bounds of bins, 0 or len(bins) it is
270
+ # returned as such. In this case, the desired result is to place the values
271
+ # beyond the last bin into the last bin and keep the values below the first bin.
272
+ binned_charge[binned_charge == len(CHARGE_BIN_EDGES)] = (
273
+ len(CHARGE_BIN_EDGES) - 1
274
+ )
275
+ binned_mass[binned_mass == len(MASS_BIN_EDGES)] = len(MASS_BIN_EDGES) - 1
93
276
 
94
- # Get the time of impact array (in day of year)
95
- impact_day_of_year = epoch_to_doy(l2b_dataset["epoch"].data)
96
- l2b_dataset["impact_day_of_year"] = xr.DataArray(
97
- name="impact_day_of_year",
98
- data=impact_day_of_year,
99
- dims="epoch",
100
- attrs=idex_attrs.get_variable_attributes("impact_day_of_year"),
277
+ # TODO use np.histogramdd to compute the counts by charge and mass.
278
+ # Count dust events for each spin phase and mass bin or charge bin.
279
+ for mass_bin, charge_bin, spin_phase_bin in zip(
280
+ binned_mass, binned_charge, binned_spin_phase
281
+ ):
282
+ counts_by_mass[i, mass_bin, spin_phase_bin] += 1
283
+ counts_by_charge[i, charge_bin, spin_phase_bin] += 1
284
+
285
+ return counts_by_charge, counts_by_mass, daily_epoch
286
+
287
+
288
+ def compute_rates_by_charge_and_mass(
289
+ counts_by_charge: np.ndarray,
290
+ counts_by_mass: np.ndarray,
291
+ epoch_doy: np.ndarray,
292
+ daily_on_percentage: dict,
293
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
294
+ """
295
+ Compute the dust event counts rates by charge and mass by spin phase for each day.
296
+
297
+ Parameters
298
+ ----------
299
+ counts_by_charge : np.ndarray
300
+ 3D array containing counts by charge and spin phase for each dataset.
301
+ counts_by_mass : np.ndarray
302
+ 3D array containing counts by mass and spin phase for each dataset.
303
+ epoch_doy : np.ndarray
304
+ Unique days of year corresponding to the epochs in the dataset.
305
+ daily_on_percentage : dict
306
+ Percentage of time science acquisition was on for each doy.
307
+
308
+ Returns
309
+ -------
310
+ tuple[np.ndarray, np.ndarray, np.ndarray]
311
+ Two 3D arrays containing counts rates by charge or mass, and by spin phase for
312
+ each dataset and the quality flags for each epoch.
313
+ """
314
+ # Initialize arrays to hold rates.
315
+ rate_by_charge = np.full(counts_by_charge.shape, -1.0)
316
+ rate_by_mass = np.full(counts_by_mass.shape, -1.0)
317
+ # Initialize an array to hold quality flags for each epoch. A quality flag of 0
318
+ # indicates that there was no science acquisition data for that epoch, and the rate
319
+ # is not valid. A quality flag of 1 indicates that the rate is valid.
320
+ rate_quality_flags = np.ones(epoch_doy.shape, dtype=np.uint8)
321
+
322
+ # Get percentages in order of epoch_doy. Log any missing days.
323
+ epoch_doy_percent_on = np.array(
324
+ [daily_on_percentage.get(doy, -1) for doy in epoch_doy]
101
325
  )
102
- logger.info("IDEX L2B science data processing completed.")
103
326
 
104
- return l2b_dataset
327
+ missing_doy_uptimes_inds = np.where(epoch_doy_percent_on == -1)[0]
328
+ if np.any(missing_doy_uptimes_inds):
329
+ rate_quality_flags[missing_doy_uptimes_inds] = 0
330
+ logger.warning(
331
+ f"Missing science acquisition uptime percentages for day(s) of"
332
+ f" year: {epoch_doy[missing_doy_uptimes_inds]}."
333
+ )
334
+ # Compute rates
335
+ # Create a boolean mask for DOYs that have a non-zero percentage of science
336
+ # acquisition time.
337
+ non_zero_inds = np.where(epoch_doy_percent_on > 0)[0]
338
+ # Compute rates only for days with non-zero science acquisition percentage
339
+ rate_by_charge[non_zero_inds] = counts_by_charge[non_zero_inds] / (
340
+ 0.01
341
+ * epoch_doy_percent_on[non_zero_inds, np.newaxis, np.newaxis]
342
+ * SECONDS_IN_DAY
343
+ )
344
+ rate_by_mass[non_zero_inds] = counts_by_mass[non_zero_inds] / (
345
+ 0.01
346
+ * epoch_doy_percent_on[non_zero_inds, np.newaxis, np.newaxis]
347
+ * SECONDS_IN_DAY
348
+ )
349
+
350
+ return rate_by_charge, rate_by_mass, rate_quality_flags
105
351
 
106
352
 
107
- def round_spin_phases(spin_phases: xr.DataArray) -> xr.DataArray:
353
+ def bin_spin_phases(spin_phases: xr.DataArray) -> np.ndarray:
108
354
  """
109
- Round spin phase angles to the nearest quadrant (0, 90, 180, 270).
355
+ Bin spin phase angles into 4 quadrants: [315°-45°,45°-135°,135°-225°, 225°-315°].
110
356
 
111
357
  Parameters
112
358
  ----------
@@ -115,21 +361,22 @@ def round_spin_phases(spin_phases: xr.DataArray) -> xr.DataArray:
115
361
 
116
362
  Returns
117
363
  -------
118
- xarray.DataArray
119
- Spin phases rounded to the nearest quadrant.
364
+ numpy.ndarray
365
+ Spin phases binned into quadrants.
120
366
  """
121
367
  if np.any(spin_phases < 0) or np.any(spin_phases >= 360):
122
368
  logger.warning(
123
369
  f"Spin phase angles, {spin_phases.data} are outside of the expected spin "
124
370
  f"phase angle range, [0, 360)."
125
371
  )
126
- quadrant_size = 90
127
- # Shift spin phases so any value exactly between two quadrants gets shifted to the
128
- # Higher quadrant
129
- shifted_spin_phases = spin_phases + quadrant_size / 2
130
- # Calculate nearest quadrant value.
131
- # Use mod to wrap values > 315 to 0.
132
- return (quadrant_size * (shifted_spin_phases / quadrant_size).astype(int)) % 360
372
+ # Shift spin phases by +45° so that the first bin starts at 0°.
373
+ # Use mod to wrap values > 360 to 0.
374
+ shifted_spin_phases = (spin_phases + 45) % 360
375
+ # Use np.digitize to find the bin index for each spin phase.
376
+ bin_indices = np.digitize(shifted_spin_phases, SPIN_PHASE_BIN_EDGES, right=False)
377
+ # Shift bins to be zero-based.
378
+ bin_indices -= 1
379
+ return np.asarray(bin_indices)
133
380
 
134
381
 
135
382
  def get_science_acquisition_timestamps(
@@ -169,7 +416,7 @@ def get_science_acquisition_timestamps(
169
416
  evt_dataset["el3par_evtpkt"].data[sc_indices] << 8
170
417
  | evt_dataset["el4par_evtpkt"].data[sc_indices]
171
418
  )
172
- epochs = evt_dataset["epoch"][sc_indices]
419
+ epochs = evt_dataset["epoch"][sc_indices].data
173
420
  # Now the state change values and check if it is either a science
174
421
  # acquisition start or science acquisition stop event.
175
422
  for v1, v2, epoch in zip(val1, val2, epochs):
@@ -184,8 +431,78 @@ def get_science_acquisition_timestamps(
184
431
  event_timestamps.append(epoch)
185
432
  event_values.append(0)
186
433
 
434
+ logger.info(
435
+ f"Found science acquisition events: {event_logs} at times: {event_timestamps}"
436
+ )
187
437
  return (
188
438
  np.asarray(event_logs),
189
439
  np.asarray(event_timestamps),
190
440
  np.asarray(event_values),
191
441
  )
442
+
443
+
444
+ def get_science_acquisition_on_percentage(evt_dataset: xr.Dataset) -> dict:
445
+ """
446
+ Calculate the percentage of time science acquisition was occurring for each day.
447
+
448
+ Parameters
449
+ ----------
450
+ evt_dataset : xarray.Dataset
451
+ Contains IDEX event message data.
452
+
453
+ Returns
454
+ -------
455
+ dict
456
+ Percentages of time the instrument was in science acquisition mode for each day
457
+ of year.
458
+ """
459
+ # Get science acquisition start and stop times
460
+ evt_logs, evt_time, evt_values = get_science_acquisition_timestamps(evt_dataset)
461
+ # Track total and 'on' durations per day
462
+ daily_totals: collections.defaultdict = defaultdict(timedelta)
463
+ daily_on: collections.defaultdict = defaultdict(timedelta)
464
+ # Convert epoch event times to datetime
465
+ dates = et_to_datetime64(ttj2000ns_to_et(evt_time)).astype(datetime)
466
+ # Simulate an event at the start of the first day.
467
+ start_of_first_day = dates[0].replace(hour=0, minute=0, second=0, microsecond=0)
468
+ # Assume that the state at the start of the day is the opposite of what the first
469
+ # state is.
470
+ state_at_start = 0 if evt_values[0] == 1 else 1
471
+ dates = np.insert(dates, 0, start_of_first_day)
472
+ evt_values = np.insert(evt_values, 0, state_at_start)
473
+ for i in range(len(dates)):
474
+ start = dates[i]
475
+ state = evt_values[i]
476
+ if i == len(dates) - 1:
477
+ # If this is the last event, set the "end" value the end of the day.
478
+ end = (start + timedelta(days=1)).replace(
479
+ hour=0, minute=0, second=0, microsecond=0
480
+ )
481
+ else:
482
+ # Otherwise, use the next event time as the end time.
483
+ end = dates[i + 1]
484
+
485
+ # Split time span by day boundaries
486
+ current = start
487
+ while current < end:
488
+ next_day = (current + timedelta(days=1)).replace(
489
+ hour=0, minute=0, second=0, microsecond=0
490
+ )
491
+ segment_end = min(end, next_day)
492
+ duration = segment_end - current
493
+ doy = current.timetuple().tm_yday
494
+ daily_totals[doy] += duration
495
+ # If the state is 1, add to the 'on' duration for that day
496
+ if state == 1:
497
+ daily_on[doy] += duration
498
+ current = segment_end
499
+
500
+ # Calculate the percentage of time science acquisition was on for each day
501
+ percent_on_times = {}
502
+ for doy in sorted(daily_totals.keys()):
503
+ total = daily_totals[doy].total_seconds()
504
+ on_time = daily_on[doy].total_seconds()
505
+ pct_on = (on_time / total) * 100 if total > 0 else 0
506
+ percent_on_times[doy] = pct_on
507
+
508
+ return percent_on_times
@@ -0,0 +1,48 @@
1
+ """Processing function for Lo star sensor data."""
2
+
3
+ import logging
4
+
5
+ import numpy as np
6
+ import xarray as xr
7
+
8
+ from imap_processing.lo.l0.utils.bit_decompression import (
9
+ DECOMPRESSION_TABLES,
10
+ Decompress,
11
+ )
12
+
13
+ logger = logging.getLogger(__name__)
14
+ logger.setLevel(logging.INFO)
15
+
16
+
17
+ def process_star_sensor(ds: xr.Dataset) -> xr.Dataset:
18
+ """
19
+ Process Lo star sensor data.
20
+
21
+ Parameters
22
+ ----------
23
+ ds : xr.Dataset
24
+ The packet dataset containing Lo star sensor data.
25
+
26
+ Returns
27
+ -------
28
+ xr.Dataset
29
+ Processed dataset with a decompressed data field.
30
+ """
31
+ # Make one long flat buffer
32
+ # This assumes that all data_compressed entries are of the same length
33
+ # but allows for only one frombuffer call
34
+ buffer = b"".join(ds["data_compressed"].values)
35
+ data = np.frombuffer(buffer, dtype=np.uint8).reshape(-1, 720)
36
+
37
+ # Decompress from 8 -> 12 bits using the decompression tables
38
+ decompression = DECOMPRESSION_TABLES[Decompress.DECOMPRESS8TO12].astype(np.uint16)
39
+ # Use the mean value column (2)
40
+ data = decompression[data, 2]
41
+
42
+ # There is already a variable called "count" in the dataset that
43
+ # came with the packet
44
+ ds["data_index"] = xr.DataArray(np.arange(720), dims="data_index")
45
+ ds["data"] = xr.DataArray(data, dims=("epoch", "data_index"))
46
+ # Remove the original compressed data field
47
+ ds = ds.drop_vars("data_compressed")
48
+ return ds