imap-processing 0.14.0__py3-none-any.whl → 0.15.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (61) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +60 -35
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +765 -287
  4. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +1577 -288
  5. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1004 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +28 -0
  7. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +1 -1
  8. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +18 -0
  9. imap_processing/cdf/config/imap_glows_l2_variable_attrs.yaml +39 -3
  10. imap_processing/cdf/config/imap_ialirt_global_cdf_attrs.yaml +18 -0
  11. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +370 -0
  12. imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +7 -0
  13. imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +11 -0
  14. imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +4 -0
  15. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +7 -3
  16. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +6 -0
  17. imap_processing/cdf/config/imap_mag_l2_variable_attrs.yaml +114 -0
  18. imap_processing/cdf/config/imap_swe_global_cdf_attrs.yaml +11 -5
  19. imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +23 -1
  20. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +4 -0
  21. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +2 -2
  22. imap_processing/cli.py +144 -76
  23. imap_processing/codice/codice_l1a.py +53 -22
  24. imap_processing/codice/codice_l1b.py +91 -18
  25. imap_processing/codice/codice_l2.py +89 -0
  26. imap_processing/codice/constants.py +62 -5
  27. imap_processing/ena_maps/ena_maps.py +43 -1
  28. imap_processing/glows/l2/glows_l2_data.py +3 -6
  29. imap_processing/ialirt/l0/process_swe.py +2 -2
  30. imap_processing/ialirt/utils/constants.py +48 -0
  31. imap_processing/ialirt/utils/create_xarray.py +87 -0
  32. imap_processing/idex/idex_l2c.py +9 -9
  33. imap_processing/lo/l1b/lo_l1b.py +6 -1
  34. imap_processing/lo/l1c/lo_l1c.py +22 -13
  35. imap_processing/lo/l2/lo_l2.py +213 -0
  36. imap_processing/mag/l1c/mag_l1c.py +8 -1
  37. imap_processing/mag/l2/mag_l2.py +6 -2
  38. imap_processing/mag/l2/mag_l2_data.py +7 -5
  39. imap_processing/swe/l1a/swe_l1a.py +6 -6
  40. imap_processing/swe/l1b/swe_l1b.py +70 -11
  41. imap_processing/ultra/l0/decom_ultra.py +1 -1
  42. imap_processing/ultra/l0/ultra_utils.py +0 -4
  43. imap_processing/ultra/l1b/badtimes.py +7 -3
  44. imap_processing/ultra/l1b/cullingmask.py +7 -2
  45. imap_processing/ultra/l1b/de.py +26 -12
  46. imap_processing/ultra/l1b/lookup_utils.py +8 -7
  47. imap_processing/ultra/l1b/ultra_l1b.py +59 -48
  48. imap_processing/ultra/l1b/ultra_l1b_culling.py +50 -18
  49. imap_processing/ultra/l1b/ultra_l1b_extended.py +4 -4
  50. imap_processing/ultra/l1c/helio_pset.py +53 -0
  51. imap_processing/ultra/l1c/spacecraft_pset.py +20 -12
  52. imap_processing/ultra/l1c/ultra_l1c.py +49 -26
  53. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +40 -2
  54. imap_processing/ultra/l2/ultra_l2.py +47 -2
  55. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +524 -526
  56. imap_processing/ultra/utils/ultra_l1_utils.py +47 -8
  57. {imap_processing-0.14.0.dist-info → imap_processing-0.15.0.dist-info}/METADATA +2 -2
  58. {imap_processing-0.14.0.dist-info → imap_processing-0.15.0.dist-info}/RECORD +61 -52
  59. {imap_processing-0.14.0.dist-info → imap_processing-0.15.0.dist-info}/LICENSE +0 -0
  60. {imap_processing-0.14.0.dist-info → imap_processing-0.15.0.dist-info}/WHEEL +0 -0
  61. {imap_processing-0.14.0.dist-info → imap_processing-0.15.0.dist-info}/entry_points.txt +0 -0
@@ -166,19 +166,17 @@ class CoDICEL1aPipeline:
166
166
  """
167
167
  self.coords = {}
168
168
 
169
- coord_names = ["epoch", *list(self.config["output_dims"].keys())]
170
-
171
- # These are labels unique to lo-counters products coordinates
172
- if self.config["dataset_name"] in [
173
- "imap_codice_l1a_lo-counters-aggregated",
174
- "imap_codice_l1a_lo-counters-singles",
175
- ]:
176
- coord_names.append("spin_sector_pairs_label")
169
+ coord_names = [
170
+ "epoch",
171
+ *self.config["output_dims"].keys(),
172
+ *[key + "_label" for key in self.config["output_dims"].keys()],
173
+ ]
177
174
 
178
175
  # Define the values for the coordinates
179
176
  for name in coord_names:
180
177
  if name == "epoch":
181
178
  values = self.calculate_epoch_values()
179
+ dims = [name]
182
180
  elif name in [
183
181
  "esa_step",
184
182
  "inst_az",
@@ -188,6 +186,7 @@ class CoDICEL1aPipeline:
188
186
  "ssd_index",
189
187
  ]:
190
188
  values = np.arange(self.config["output_dims"][name])
189
+ dims = [name]
191
190
  elif name == "spin_sector_pairs_label":
192
191
  values = np.array(
193
192
  [
@@ -199,12 +198,23 @@ class CoDICEL1aPipeline:
199
198
  "150-180 deg",
200
199
  ]
201
200
  )
201
+ dims = [name]
202
+ elif name in [
203
+ "spin_sector_label",
204
+ "esa_step_label",
205
+ "inst_az_label",
206
+ "spin_sector_index_label",
207
+ "ssd_index_label",
208
+ ]:
209
+ key = name.removesuffix("_label")
210
+ values = np.arange(self.config["output_dims"][key]).astype(str)
211
+ dims = [key]
202
212
 
203
213
  coord = xr.DataArray(
204
214
  values,
205
215
  name=name,
206
- dims=[name],
207
- attrs=self.cdf_attrs.get_variable_attributes(name),
216
+ dims=dims,
217
+ attrs=self.cdf_attrs.get_variable_attributes(name, check_schema=False),
208
218
  )
209
219
 
210
220
  self.coords[name] = coord
@@ -282,7 +292,7 @@ class CoDICEL1aPipeline:
282
292
  if self.config["dataset_name"] == "imap_codice_l1a_hi-sectored":
283
293
  for species in self.config["energy_table"]:
284
294
  dataset = self.define_energy_bins(dataset, species)
285
- dataset = dataset.drop_vars("esa_step")
295
+ dataset = dataset.drop_vars(["esa_step", "esa_step_label"])
286
296
 
287
297
  return dataset
288
298
 
@@ -316,14 +326,16 @@ class CoDICEL1aPipeline:
316
326
  centers,
317
327
  dims=[energy_bin_name],
318
328
  attrs=self.cdf_attrs.get_variable_attributes(
319
- f"{self.config['dataset_name'].split('_')[-1]}-{energy_bin_name}"
329
+ f"{self.config['dataset_name'].split('_')[-1]}-{energy_bin_name}",
330
+ check_schema=False,
320
331
  ),
321
332
  )
322
333
  dataset[f"{energy_bin_name}_delta"] = xr.DataArray(
323
334
  deltas,
324
335
  dims=[f"{energy_bin_name}_delta"],
325
336
  attrs=self.cdf_attrs.get_variable_attributes(
326
- f"{self.config['dataset_name'].split('_')[-1]}-{energy_bin_name}_delta"
337
+ f"{self.config['dataset_name'].split('_')[-1]}-{energy_bin_name}_delta",
338
+ check_schema=False,
327
339
  ),
328
340
  )
329
341
 
@@ -360,13 +372,15 @@ class CoDICEL1aPipeline:
360
372
  if variable_name == "energy_table":
361
373
  variable_data = self.get_energy_table()
362
374
  dims = ["esa_step"]
363
- attrs = self.cdf_attrs.get_variable_attributes("energy_table")
375
+ attrs = self.cdf_attrs.get_variable_attributes(
376
+ "energy_table", check_schema=False
377
+ )
364
378
 
365
379
  elif variable_name == "acquisition_time_per_step":
366
380
  variable_data = self.get_acquisition_times()
367
381
  dims = ["esa_step"]
368
382
  attrs = self.cdf_attrs.get_variable_attributes(
369
- "acquisition_time_per_step"
383
+ "acquisition_time_per_step", check_schema=False
370
384
  )
371
385
 
372
386
  # These variables can be gathered straight from the packet data
@@ -743,7 +757,7 @@ def create_binned_dataset(
743
757
  np.array(data["epoch"], dtype=np.uint64),
744
758
  name="epoch",
745
759
  dims=["epoch"],
746
- attrs=pipeline.cdf_attrs.get_variable_attributes("epoch"),
760
+ attrs=pipeline.cdf_attrs.get_variable_attributes("epoch", check_schema=False),
747
761
  )
748
762
  dataset = xr.Dataset(
749
763
  coords={"epoch": coord},
@@ -751,10 +765,11 @@ def create_binned_dataset(
751
765
  )
752
766
 
753
767
  # Add the data variables
768
+ descriptor = pipeline.config["dataset_name"].removeprefix("imap_codice_l1a_")
754
769
  for species in pipeline.config["energy_table"]:
755
770
  # Add the species data to the dataset
756
771
  values = np.array(data[species], dtype=np.uint32)
757
- attrs = pipeline.cdf_attrs.get_variable_attributes(f"hi-omni-{species}")
772
+ attrs = pipeline.cdf_attrs.get_variable_attributes(f"{descriptor}-{species}")
758
773
  dims = ["epoch", f"energy_{species}"]
759
774
  dataset[species] = xr.DataArray(
760
775
  values,
@@ -843,13 +858,19 @@ def create_direct_event_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
843
858
  epochs,
844
859
  name="epoch",
845
860
  dims=["epoch"],
846
- attrs=cdf_attrs.get_variable_attributes("epoch"),
861
+ attrs=cdf_attrs.get_variable_attributes("epoch", check_schema=False),
847
862
  )
848
863
  event_num = xr.DataArray(
849
864
  np.arange(10000),
850
865
  name="event_num",
851
866
  dims=["event_num"],
852
- attrs=cdf_attrs.get_variable_attributes("event_num"),
867
+ attrs=cdf_attrs.get_variable_attributes("event_num", check_schema=False),
868
+ )
869
+ event_num_label = xr.DataArray(
870
+ np.arange(10000).astype(str),
871
+ name="event_num_label",
872
+ dims=["event_num"],
873
+ attrs=cdf_attrs.get_variable_attributes("event_num_label", check_schema=False),
853
874
  )
854
875
 
855
876
  # Create the dataset to hold the data variables
@@ -858,7 +879,11 @@ def create_direct_event_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
858
879
  elif apid == CODICEAPID.COD_HI_PHA:
859
880
  attrs = cdf_attrs.get_global_attributes("imap_codice_l1a_hi-pha")
860
881
  dataset = xr.Dataset(
861
- coords={"epoch": epoch, "event_num": event_num},
882
+ coords={
883
+ "epoch": epoch,
884
+ "event_num": event_num,
885
+ "event_num_label": event_num_label,
886
+ },
862
887
  attrs=attrs,
863
888
  )
864
889
 
@@ -903,7 +928,7 @@ def create_hskp_dataset(packet: xr.Dataset) -> xr.Dataset:
903
928
  packet.epoch,
904
929
  name="epoch",
905
930
  dims=["epoch"],
906
- attrs=cdf_attrs.get_variable_attributes("epoch"),
931
+ attrs=cdf_attrs.get_variable_attributes("epoch", check_schema=False),
907
932
  )
908
933
 
909
934
  dataset = xr.Dataset(
@@ -928,7 +953,13 @@ def create_hskp_dataset(packet: xr.Dataset) -> xr.Dataset:
928
953
  if variable in exclude_variables:
929
954
  continue
930
955
 
931
- attrs = cdf_attrs.get_variable_attributes(variable)
956
+ # The housekeeping spin_period variable has different values than
957
+ # the spin_value attribute in other datasets, so it gets special
958
+ # treatment
959
+ if variable == "spin_period":
960
+ attrs = cdf_attrs.get_variable_attributes("spin_period_hskp")
961
+ else:
962
+ attrs = cdf_attrs.get_variable_attributes(variable)
932
963
 
933
964
  dataset[variable] = xr.DataArray(
934
965
  packet[variable].data, dims=["epoch"], attrs=attrs
@@ -9,9 +9,13 @@ from imap_processing.codice.codice_l1b import process_codice_l1b
9
9
  dataset = process_codice_l1b(l1a_filenanme)
10
10
  """
11
11
 
12
+ # TODO: Figure out how to convert hi-priority data product. Need an updated
13
+ # algorithm document that describes this.
14
+
12
15
  import logging
13
16
  from pathlib import Path
14
17
 
18
+ import numpy as np
15
19
  import xarray as xr
16
20
 
17
21
  from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
@@ -22,6 +26,71 @@ logger = logging.getLogger(__name__)
22
26
  logger.setLevel(logging.INFO)
23
27
 
24
28
 
29
+ def convert_to_rates(
30
+ dataset: xr.Dataset, descriptor: str, variable_name: str
31
+ ) -> np.ndarray:
32
+ """
33
+ Apply a conversion from counts to rates.
34
+
35
+ The formula for conversion from counts to rates is specific to each data
36
+ product, but is largely grouped by CoDICE-Lo and CoDICE-Hi products.
37
+
38
+ Parameters
39
+ ----------
40
+ dataset : xarray.Dataset
41
+ The L1b dataset containing the data to convert.
42
+ descriptor : str
43
+ The descriptor of the data product of interest.
44
+ variable_name : str
45
+ The variable name to apply the conversion to.
46
+
47
+ Returns
48
+ -------
49
+ rates_data : np.ndarray
50
+ The converted data array.
51
+ """
52
+ # TODO: Temporary workaround to create CDFs for SIT-4. Revisit after SIT-4.
53
+ acq_times = 1
54
+
55
+ if descriptor in [
56
+ "lo-counters-aggregated",
57
+ "lo-counters-singles",
58
+ "lo-nsw-angular",
59
+ "lo-sw-angular",
60
+ "lo-nsw-priority",
61
+ "lo-sw-priority",
62
+ "lo-nsw-species",
63
+ "lo-sw-species",
64
+ "lo-ialirt",
65
+ ]:
66
+ # Applying rate calculation described in section 10.2 of the algorithm
67
+ # document
68
+ rates_data = dataset[variable_name].data / (
69
+ acq_times
70
+ * 1e-6 # Converting from microseconds to seconds
71
+ * constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spin_sectors"]
72
+ )
73
+ elif descriptor in [
74
+ "hi-counters-aggregated",
75
+ "hi-counters-singles",
76
+ "hi-omni",
77
+ "hi-priority",
78
+ "hi-sectored",
79
+ "hi-ialirt",
80
+ ]:
81
+ # Applying rate calculation described in section 10.1 of the algorithm
82
+ # document
83
+ rates_data = dataset[variable_name].data / (
84
+ constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spin_sectors"]
85
+ * constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spins"]
86
+ * acq_times
87
+ )
88
+ elif descriptor == "hskp":
89
+ rates_data = dataset[variable_name].data / acq_times
90
+
91
+ return rates_data
92
+
93
+
25
94
  def process_codice_l1b(file_path: Path) -> xr.Dataset:
26
95
  """
27
96
  Will process CoDICE l1a data to create l1b data products.
@@ -45,7 +114,11 @@ def process_codice_l1b(file_path: Path) -> xr.Dataset:
45
114
  # set some useful distinguishing variables
46
115
  dataset_name = l1a_dataset.attrs["Logical_source"].replace("_l1a_", "_l1b_")
47
116
  descriptor = dataset_name.removeprefix("imap_codice_l1b_")
48
- apid = constants.CODICEAPID_MAPPING[descriptor]
117
+
118
+ # Direct event data products do not have a level L1B
119
+ if descriptor in ["lo-pha", "hi-pha"]:
120
+ logger.warning("Encountered direct event data product. Skipping L1b processing")
121
+ return None
49
122
 
50
123
  # Get the L1b CDF attributes
51
124
  cdf_attrs = ImapCdfAttributes()
@@ -60,32 +133,32 @@ def process_codice_l1b(file_path: Path) -> xr.Dataset:
60
133
 
61
134
  # Determine which variables need to be converted from counts to rates
62
135
  # TODO: Figure out exactly which hskp variables need to be converted
136
+ # Housekeeping and binned datasets are treated a bit differently since
137
+ # not all variables need to be converted
63
138
  if descriptor == "hskp":
64
- data_variables = []
65
- support_variables = ["cmdexe", "cmdrjct"]
66
- variables_to_convert = support_variables
139
+ # TODO: Check with Joey if any housekeeping data needs to be converted
140
+ variables_to_convert = []
141
+ elif descriptor == "hi-sectored":
142
+ variables_to_convert = ["h", "he3he4", "cno", "fe"]
143
+ elif descriptor == "hi-omni":
144
+ variables_to_convert = ["h", "he3", "he4", "c", "o", "ne_mg_si", "fe", "uh"]
145
+ elif descriptor == "hi-ialirt":
146
+ variables_to_convert = ["h"]
67
147
  else:
68
- data_variables = getattr(
148
+ variables_to_convert = getattr(
69
149
  constants, f"{descriptor.upper().replace('-', '_')}_VARIABLE_NAMES"
70
150
  )
71
- support_variables = constants.DATA_PRODUCT_CONFIGURATIONS[apid][
72
- "support_variables"
73
- ]
74
- variables_to_convert = data_variables + support_variables
75
151
 
152
+ # Apply the conversion to rates
76
153
  for variable_name in variables_to_convert:
77
- # Apply conversion of data from counts to rates
78
- # TODO: Properly implement conversion factors on a per-data-product basis
79
- # For now, just divide by 100 to get float values
80
- l1b_dataset[variable_name].data = l1b_dataset[variable_name].data / 100
154
+ l1b_dataset[variable_name].data = convert_to_rates(
155
+ l1b_dataset, descriptor, variable_name
156
+ )
81
157
 
82
158
  # Set the variable attributes
83
- if variable_name in data_variables:
84
- cdf_attrs_key = f"{descriptor}-{variable_name}"
85
- elif variable_name in support_variables:
86
- cdf_attrs_key = variable_name
159
+ cdf_attrs_key = f"{descriptor}-{variable_name}"
87
160
  l1b_dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
88
- cdf_attrs_key
161
+ cdf_attrs_key, check_schema=False
89
162
  )
90
163
 
91
164
  logger.info(f"\nFinal data product:\n{l1b_dataset}\n")
@@ -0,0 +1,89 @@
1
+ """
2
+ Perform CoDICE l2 processing.
3
+
4
+ This module processes CoDICE l1 files and creates L2 data products.
5
+
6
+ Notes
7
+ -----
8
+ from imap_processing.codice.codice_l2 import process_codice_l2
9
+ dataset = process_codice_l2(l1_filename)
10
+ """
11
+
12
+ import logging
13
+ from pathlib import Path
14
+
15
+ import numpy as np
16
+ import xarray as xr
17
+
18
+ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
19
+ from imap_processing.cdf.utils import load_cdf
20
+
21
+ logger = logging.getLogger(__name__)
22
+ logger.setLevel(logging.INFO)
23
+
24
+
25
+ def process_codice_l2(file_path: Path) -> xr.Dataset:
26
+ """
27
+ Will process CoDICE l1 data to create l2 data products.
28
+
29
+ Parameters
30
+ ----------
31
+ file_path : pathlib.Path
32
+ Path to the CoDICE L1 file to process.
33
+
34
+ Returns
35
+ -------
36
+ l2_dataset : xarray.Dataset
37
+ The``xarray`` dataset containing the science data and supporting metadata.
38
+ """
39
+ logger.info(f"Processing {file_path}")
40
+
41
+ # Open the l1 file
42
+ l1_dataset = load_cdf(file_path)
43
+
44
+ # Use the logical source as a way to distinguish between data products and
45
+ # set some useful distinguishing variables
46
+ # TODO: Could clean this up by using imap-data-access methods?
47
+ dataset_name = l1_dataset.attrs["Logical_source"]
48
+ data_level = dataset_name.removeprefix("imap_codice_").split("_")[0]
49
+ descriptor = dataset_name.removeprefix(f"imap_codice_{data_level}_")
50
+ dataset_name = dataset_name.replace(data_level, "l2")
51
+
52
+ # TODO: Temporary work-around to replace "PHA" naming convention with
53
+ # "direct events" This will eventually be changed at the L1 level and
54
+ # thus this will eventually be removed.
55
+ if descriptor == "lo-pha":
56
+ dataset_name = dataset_name.replace("lo-pha", "lo-direct-events")
57
+ elif descriptor == "hi-pha":
58
+ dataset_name = dataset_name.replace("hi-pha", "hi-direct-events")
59
+
60
+ # Use the L1 data product as a starting point for L2
61
+ l2_dataset = l1_dataset.copy()
62
+
63
+ # Get the L2 CDF attributes
64
+ cdf_attrs = ImapCdfAttributes()
65
+ cdf_attrs.add_instrument_global_attrs("codice")
66
+ cdf_attrs.add_instrument_variable_attrs("codice", "l2")
67
+
68
+ # Update the global attributes
69
+ l2_dataset.attrs = cdf_attrs.get_global_attributes(dataset_name)
70
+
71
+ # Set the variable attributes
72
+ for variable_name in l2_dataset:
73
+ l2_dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
74
+ variable_name, check_schema=False
75
+ )
76
+
77
+ # TODO: Add L2-specific algorithms/functionality here. For SIT-4, we can
78
+ # just keep the data as-is.
79
+
80
+ # Workaround to fix monitomically increasing epoch issue
81
+ epoch_attrs = cdf_attrs.get_variable_attributes("epoch", check_schema=False)
82
+ new_epoch_values = np.array([843543730220584064 + i for i in range(113)])
83
+ l2_dataset = l2_dataset.assign_coords(
84
+ epoch=("epoch", new_epoch_values, epoch_attrs)
85
+ )
86
+
87
+ logger.info(f"\nFinal data product:\n{l2_dataset}\n")
88
+
89
+ return l2_dataset
@@ -207,13 +207,13 @@ HI_COUNTERS_AGGREGATED_ACTIVE_VARIABLES = {
207
207
  "Reserved3": False,
208
208
  "Reserved4": False,
209
209
  "Reserved5": False,
210
- "LowTOFCutoff": False,
211
- "Reserved6": False,
212
- "Reserved7": False,
210
+ "LowTOFCutoff": True,
211
+ "Reserved6": True,
212
+ "Reserved7": True,
213
213
  "ASIC1FlagInvalid": True,
214
214
  "ASIC2FlagInvalid": True,
215
- "ASIC1ChannelInvalid": False,
216
- "ASIC2ChannelInvalid": False,
215
+ "ASIC1ChannelInvalid": True,
216
+ "ASIC2ChannelInvalid": True,
217
217
  }
218
218
  HI_COUNTERS_AGGREGATED_VARIABLE_NAMES = [
219
219
  name
@@ -663,6 +663,63 @@ DATA_PRODUCT_CONFIGURATIONS: dict[CODICEAPID | int, dict] = {
663
663
  },
664
664
  }
665
665
 
666
+ # Various configurations to support L1b processing of individual data products
667
+ # Much of these are described in the algorithm document in chapter 11 ("Data
668
+ # Level 1B")
669
+ L1B_DATA_PRODUCT_CONFIGURATIONS: dict[str, dict] = {
670
+ "hi-counters-aggregated": {
671
+ "num_spin_sectors": 24,
672
+ "num_spins": 16,
673
+ },
674
+ "hi-counters-singles": {
675
+ "num_spin_sectors": 24,
676
+ "num_spins": 16,
677
+ },
678
+ "hi-ialirt": {
679
+ "num_spin_sectors": 24,
680
+ "num_spins": 4,
681
+ },
682
+ "hi-omni": {
683
+ "num_spin_sectors": 24,
684
+ "num_spins": 4,
685
+ },
686
+ "hi-priority": { # TODO: Ask Joey to define these
687
+ "num_spin_sectors": 1,
688
+ "num_spins": 1,
689
+ },
690
+ "hi-sectored": {
691
+ "num_spin_sectors": 2,
692
+ "num_spins": 16,
693
+ },
694
+ "lo-counters-aggregated": {
695
+ "num_spin_sectors": 2,
696
+ },
697
+ "lo-counters-singles": {
698
+ "num_spin_sectors": 2,
699
+ },
700
+ "lo-nsw-angular": {
701
+ "num_spin_sectors": 1,
702
+ },
703
+ "lo-sw-angular": {
704
+ "num_spin_sectors": 1,
705
+ },
706
+ "lo-nsw-priority": {
707
+ "num_spin_sectors": 1,
708
+ },
709
+ "lo-sw-priority": {
710
+ "num_spin_sectors": 1,
711
+ },
712
+ "lo-nsw-species": {
713
+ "num_spin_sectors": 12,
714
+ },
715
+ "lo-sw-species": {
716
+ "num_spin_sectors": 12,
717
+ },
718
+ "lo-ialirt": {
719
+ "num_spin_sectors": 12,
720
+ },
721
+ }
722
+
666
723
  # Various configurations to support processing of direct events data products
667
724
  # These are described in the algorithm document in chapter 10 ("Data Level 1A")
668
725
  DE_DATA_PRODUCT_CONFIGURATIONS: dict[Any, dict[str, Any]] = {
@@ -620,6 +620,42 @@ class HiPointingSet(PointingSet):
620
620
  self.spatial_coords = ("spin_angle_bin",)
621
621
 
622
622
 
623
+ class LoPointingSet(PointingSet):
624
+ """
625
+ PointingSet object specific to Lo L1C PSet data.
626
+
627
+ Parameters
628
+ ----------
629
+ dataset : xarray.Dataset
630
+ Lo L1C pointing set data loaded in an xarray.DataArray.
631
+ """
632
+
633
+ def __init__(self, dataset: xr.Dataset):
634
+ super().__init__(dataset, spice_reference_frame=geometry.SpiceFrame.IMAP_DPS)
635
+ # TODO: Use spatial_utils.az_el_grid instead of
636
+ # manually creating the lon/lat values
637
+ inferred_spacing_deg = 360 / dataset.longitude.size
638
+ longitude_bin_centers = np.arange(
639
+ 0 + inferred_spacing_deg / 2, 360, inferred_spacing_deg
640
+ )
641
+ latitude_bin_centers = np.arange(
642
+ -2 + inferred_spacing_deg / 2, 2, inferred_spacing_deg
643
+ )
644
+
645
+ # Could be wrong about the order here
646
+ longitude_grid, latitude_grid = np.meshgrid(
647
+ longitude_bin_centers,
648
+ latitude_bin_centers,
649
+ indexing="ij",
650
+ )
651
+
652
+ longitude = longitude_grid.ravel()
653
+ latitude = latitude_grid.ravel()
654
+
655
+ self.az_el_points = np.column_stack((longitude, latitude))
656
+ self.spatial_coords = ("longitude", "latitude")
657
+
658
+
623
659
  # Define the Map classes
624
660
  class AbstractSkyMap(ABC):
625
661
  """
@@ -1119,7 +1155,7 @@ class RectangularSkyMap(AbstractSkyMap):
1119
1155
  )
1120
1156
  # Add the solid angle variable to the data_1d Dataset
1121
1157
  self.data_1d["solid_angle"] = xr.DataArray(
1122
- self.solid_angle_points[np.newaxis, :],
1158
+ self.solid_angle_points[np.newaxis, :].astype(np.float32),
1123
1159
  name="solid_angle",
1124
1160
  dims=[CoordNames.TIME.value, CoordNames.GENERIC_PIXEL.value],
1125
1161
  )
@@ -1423,6 +1459,12 @@ class HealpixSkyMap(AbstractSkyMap):
1423
1459
  {},
1424
1460
  coords={**self.spatial_coords},
1425
1461
  )
1462
+ # Add the solid angle variable to the data_1d Dataset
1463
+ self.data_1d["solid_angle"] = xr.DataArray(
1464
+ self.solid_angle_points[np.newaxis, :].astype(np.float32),
1465
+ name="solid_angle",
1466
+ dims=[CoordNames.TIME.value, CoordNames.GENERIC_PIXEL.value],
1467
+ )
1426
1468
  # return the data_1d as is, but with the pixel coordinate
1427
1469
  # renamed to CoordNames.HEALPIX_INDEX.value
1428
1470
  return self.data_1d.rename(
@@ -34,8 +34,6 @@ class DailyLightcurve:
34
34
  ecliptic latitude of bin centers [deg]
35
35
  number_of_bins : int
36
36
  number of bins in lightcurve
37
- raw_uncertainties : numpy.ndarray
38
- statistical uncertainties for raw histograms (sqrt of self.raw_histograms)
39
37
  l1b_data : xarray.Dataset
40
38
  L1B data filtered by good times, good angles, and good bins.
41
39
  """
@@ -52,7 +50,6 @@ class DailyLightcurve:
52
50
  ecliptic_lon: np.ndarray = field(init=False)
53
51
  ecliptic_lat: np.ndarray = field(init=False)
54
52
  number_of_bins: int = field(init=False)
55
- raw_uncertainties: np.ndarray = field(init=False)
56
53
  l1b_data: InitVar[xr.Dataset]
57
54
 
58
55
  def __post_init__(self, l1b_data: xr.Dataset) -> None:
@@ -76,14 +73,14 @@ class DailyLightcurve:
76
73
  self.exposure_times = self.calculate_exposure_times(
77
74
  l1b_data, exposure_times_per_timestamp
78
75
  )
79
- self.raw_uncertainties = np.sqrt(self.raw_histograms)
76
+ raw_uncertainties = np.sqrt(self.raw_histograms)
80
77
  self.photon_flux = np.zeros(len(self.raw_histograms))
81
78
  self.flux_uncertainties = np.zeros(len(self.raw_histograms))
82
79
 
83
80
  # TODO: Only where exposure counts != 0
84
81
  if len(self.exposure_times) != 0:
85
82
  self.photon_flux = self.raw_histograms / self.exposure_times
86
- self.flux_uncertainties = self.raw_uncertainties / self.exposure_times
83
+ self.flux_uncertainties = raw_uncertainties / self.exposure_times
87
84
 
88
85
  # TODO: Average this, or should they all be the same?
89
86
  self.spin_angle = np.average(l1b_data["imap_spin_angle_bin_cntr"].data, axis=0)
@@ -135,7 +132,7 @@ class DailyLightcurve:
135
132
  Sum of valid histograms across all timestamps.
136
133
  """
137
134
  histograms[histograms == -1] = 0
138
- return np.sum(histograms, axis=0)
135
+ return np.sum(histograms, axis=0, dtype=np.int64)
139
136
 
140
137
 
141
138
  @dataclass
@@ -550,11 +550,11 @@ def process_swe(accumulated_data: xr.Dataset, in_flight_cal_files: list) -> list
550
550
  "utc": met_to_utc(grouped["met"].min()).split(".")[0],
551
551
  "ttj2000ns": int(met_to_ttj2000ns(grouped["met"].min())),
552
552
  **{
553
- f"swe_normalized_counts_quarter_1_esa_{i}": Decimal(str(val))
553
+ f"swe_normalized_counts_half_1_esa_{i}": Decimal(str(val))
554
554
  for i, val in enumerate(summed_first)
555
555
  },
556
556
  **{
557
- f"swe_normalized_counts_quarter_2_esa_{i}": Decimal(str(val))
557
+ f"swe_normalized_counts_half_2_esa_{i}": Decimal(str(val))
558
558
  for i, val in enumerate(summed_second)
559
559
  },
560
560
  "swe_counterstreaming_electrons": max(bde_first_half, bde_second_half),
@@ -0,0 +1,48 @@
1
+ """Keys for I-ALiRT data products."""
2
+
3
+ IALIRT_KEYS = [
4
+ # Low energy (~300 keV) electrons (A-side)
5
+ "hit_e_a_side_low_en",
6
+ # Medium energy (~3 MeV) electrons (A-side)
7
+ "hit_e_a_side_med_en",
8
+ # High energy (>3 MeV) electrons (A-side)
9
+ "hit_e_a_side_high_en",
10
+ # Low energy (~300 keV) electrons (B-side)
11
+ "hit_e_b_side_low_en",
12
+ # Medium energy (~3 MeV) electrons (B-side)
13
+ "hit_e_b_side_med_en",
14
+ # High energy (>3 MeV) electrons (B-side)
15
+ "hit_e_b_side_high_en",
16
+ # Medium energy (12 to 70 MeV) protons (Omnidirectional)
17
+ "hit_h_omni_med_en",
18
+ # High energy (>70 MeV) protons (A-side)
19
+ "hit_h_a_side_high_en",
20
+ # High energy (>70 MeV) protons (B-side)
21
+ "hit_h_b_side_high_en",
22
+ # Low energy (6 to 8 MeV/nuc) He (Omnidirectional)
23
+ "hit_he_omni_low_en",
24
+ # High energy (15 to 70 MeV/nuc) He (Omnidirectional)
25
+ "hit_he_omni_high_en",
26
+ # Magnetic field vector in GSE coordinates
27
+ "mag_4s_b_gse",
28
+ # Magnetic field vector in GSM coordinates
29
+ "mag_4s_b_gsm",
30
+ # Magnetic field vector in RTN coordinates
31
+ "mag_4s_b_rtn",
32
+ # Azimuth angle (φ) of the magnetic field in GSM coordinates
33
+ "mag_phi_4s_b_gsm",
34
+ # Elevation angle (θ) of the magnetic field in GSM coordinates
35
+ "mag_theta_4s_b_gsm",
36
+ # Pseudo density of solar wind protons
37
+ "swapi_pseudo_proton_density",
38
+ # Pseudo speed of solar wind protons in solar inertial frame
39
+ "swapi_pseudo_proton_speed",
40
+ # Pseudo temperature of solar wind protons in plasma frame
41
+ "swapi_pseudo_proton_temperature",
42
+ # SWE Normalized Counts - Half Cycle 1
43
+ *[f"swe_normalized_counts_half_1_esa_{i}" for i in range(8)],
44
+ # SWE Normalized Counts - Half Cycle 2
45
+ *[f"swe_normalized_counts_half_2_esa_{i}" for i in range(8)],
46
+ # SWE Counterstreaming flag
47
+ "swe_counterstreaming_electrons",
48
+ ]