imap-processing 0.14.0__py3-none-any.whl → 0.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (81) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +60 -35
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +765 -287
  4. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +1577 -288
  5. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1004 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +28 -0
  7. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +1 -1
  8. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +18 -0
  9. imap_processing/cdf/config/imap_glows_l2_variable_attrs.yaml +39 -3
  10. imap_processing/cdf/config/imap_ialirt_global_cdf_attrs.yaml +18 -0
  11. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +353 -0
  12. imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +7 -0
  13. imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +11 -0
  14. imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +4 -0
  15. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +7 -3
  16. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +6 -0
  17. imap_processing/cdf/config/imap_mag_l2_variable_attrs.yaml +114 -0
  18. imap_processing/cdf/config/imap_swe_global_cdf_attrs.yaml +11 -5
  19. imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +23 -1
  20. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +4 -0
  21. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +2 -2
  22. imap_processing/cli.py +145 -80
  23. imap_processing/codice/codice_l1a.py +140 -84
  24. imap_processing/codice/codice_l1b.py +91 -18
  25. imap_processing/codice/codice_l2.py +81 -0
  26. imap_processing/codice/constants.py +68 -0
  27. imap_processing/ena_maps/ena_maps.py +43 -1
  28. imap_processing/glows/l2/glows_l2_data.py +3 -6
  29. imap_processing/hi/hi_l1a.py +447 -0
  30. imap_processing/hi/{l1b/hi_l1b.py → hi_l1b.py} +1 -1
  31. imap_processing/hi/{l1c/hi_l1c.py → hi_l1c.py} +21 -21
  32. imap_processing/hi/{l2/hi_l2.py → hi_l2.py} +13 -13
  33. imap_processing/hi/utils.py +6 -6
  34. imap_processing/hit/l1b/hit_l1b.py +30 -11
  35. imap_processing/ialirt/constants.py +38 -0
  36. imap_processing/ialirt/l0/parse_mag.py +1 -1
  37. imap_processing/ialirt/l0/process_codice.py +91 -0
  38. imap_processing/ialirt/l0/process_hit.py +12 -21
  39. imap_processing/ialirt/l0/process_swapi.py +172 -23
  40. imap_processing/ialirt/l0/process_swe.py +3 -10
  41. imap_processing/ialirt/utils/constants.py +62 -0
  42. imap_processing/ialirt/utils/create_xarray.py +135 -0
  43. imap_processing/idex/idex_l2c.py +9 -9
  44. imap_processing/lo/l1b/lo_l1b.py +6 -1
  45. imap_processing/lo/l1c/lo_l1c.py +22 -13
  46. imap_processing/lo/l2/lo_l2.py +213 -0
  47. imap_processing/mag/l1c/mag_l1c.py +8 -1
  48. imap_processing/mag/l2/mag_l2.py +6 -2
  49. imap_processing/mag/l2/mag_l2_data.py +7 -5
  50. imap_processing/swe/l1a/swe_l1a.py +6 -6
  51. imap_processing/swe/l1b/swe_l1b.py +70 -11
  52. imap_processing/ultra/l0/decom_ultra.py +1 -1
  53. imap_processing/ultra/l0/ultra_utils.py +0 -4
  54. imap_processing/ultra/l1b/badtimes.py +7 -3
  55. imap_processing/ultra/l1b/cullingmask.py +7 -2
  56. imap_processing/ultra/l1b/de.py +26 -12
  57. imap_processing/ultra/l1b/lookup_utils.py +8 -7
  58. imap_processing/ultra/l1b/ultra_l1b.py +59 -48
  59. imap_processing/ultra/l1b/ultra_l1b_culling.py +50 -18
  60. imap_processing/ultra/l1b/ultra_l1b_extended.py +4 -4
  61. imap_processing/ultra/l1c/helio_pset.py +53 -0
  62. imap_processing/ultra/l1c/spacecraft_pset.py +20 -12
  63. imap_processing/ultra/l1c/ultra_l1c.py +49 -26
  64. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +40 -2
  65. imap_processing/ultra/l2/ultra_l2.py +47 -2
  66. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +524 -526
  67. imap_processing/ultra/utils/ultra_l1_utils.py +51 -10
  68. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/METADATA +2 -2
  69. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/RECORD +72 -69
  70. imap_processing/hi/l1a/__init__.py +0 -0
  71. imap_processing/hi/l1a/hi_l1a.py +0 -98
  72. imap_processing/hi/l1a/histogram.py +0 -152
  73. imap_processing/hi/l1a/science_direct_event.py +0 -214
  74. imap_processing/hi/l1b/__init__.py +0 -0
  75. imap_processing/hi/l1c/__init__.py +0 -0
  76. imap_processing/hi/l2/__init__.py +0 -0
  77. imap_processing/ialirt/l0/process_codicehi.py +0 -156
  78. imap_processing/ialirt/l0/process_codicelo.py +0 -41
  79. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/LICENSE +0 -0
  80. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/WHEEL +0 -0
  81. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/entry_points.txt +0 -0
@@ -166,19 +166,17 @@ class CoDICEL1aPipeline:
166
166
  """
167
167
  self.coords = {}
168
168
 
169
- coord_names = ["epoch", *list(self.config["output_dims"].keys())]
170
-
171
- # These are labels unique to lo-counters products coordinates
172
- if self.config["dataset_name"] in [
173
- "imap_codice_l1a_lo-counters-aggregated",
174
- "imap_codice_l1a_lo-counters-singles",
175
- ]:
176
- coord_names.append("spin_sector_pairs_label")
169
+ coord_names = [
170
+ "epoch",
171
+ *self.config["output_dims"].keys(),
172
+ *[key + "_label" for key in self.config["output_dims"].keys()],
173
+ ]
177
174
 
178
175
  # Define the values for the coordinates
179
176
  for name in coord_names:
180
177
  if name == "epoch":
181
178
  values = self.calculate_epoch_values()
179
+ dims = [name]
182
180
  elif name in [
183
181
  "esa_step",
184
182
  "inst_az",
@@ -188,6 +186,7 @@ class CoDICEL1aPipeline:
188
186
  "ssd_index",
189
187
  ]:
190
188
  values = np.arange(self.config["output_dims"][name])
189
+ dims = [name]
191
190
  elif name == "spin_sector_pairs_label":
192
191
  values = np.array(
193
192
  [
@@ -199,12 +198,23 @@ class CoDICEL1aPipeline:
199
198
  "150-180 deg",
200
199
  ]
201
200
  )
201
+ dims = [name]
202
+ elif name in [
203
+ "spin_sector_label",
204
+ "esa_step_label",
205
+ "inst_az_label",
206
+ "spin_sector_index_label",
207
+ "ssd_index_label",
208
+ ]:
209
+ key = name.removesuffix("_label")
210
+ values = np.arange(self.config["output_dims"][key]).astype(str)
211
+ dims = [key]
202
212
 
203
213
  coord = xr.DataArray(
204
214
  values,
205
215
  name=name,
206
- dims=[name],
207
- attrs=self.cdf_attrs.get_variable_attributes(name),
216
+ dims=dims,
217
+ attrs=self.cdf_attrs.get_variable_attributes(name, check_schema=False),
208
218
  )
209
219
 
210
220
  self.coords[name] = coord
@@ -282,7 +292,7 @@ class CoDICEL1aPipeline:
282
292
  if self.config["dataset_name"] == "imap_codice_l1a_hi-sectored":
283
293
  for species in self.config["energy_table"]:
284
294
  dataset = self.define_energy_bins(dataset, species)
285
- dataset = dataset.drop_vars("esa_step")
295
+ dataset = dataset.drop_vars(["esa_step", "esa_step_label"])
286
296
 
287
297
  return dataset
288
298
 
@@ -316,14 +326,16 @@ class CoDICEL1aPipeline:
316
326
  centers,
317
327
  dims=[energy_bin_name],
318
328
  attrs=self.cdf_attrs.get_variable_attributes(
319
- f"{self.config['dataset_name'].split('_')[-1]}-{energy_bin_name}"
329
+ f"{self.config['dataset_name'].split('_')[-1]}-{energy_bin_name}",
330
+ check_schema=False,
320
331
  ),
321
332
  )
322
333
  dataset[f"{energy_bin_name}_delta"] = xr.DataArray(
323
334
  deltas,
324
335
  dims=[f"{energy_bin_name}_delta"],
325
336
  attrs=self.cdf_attrs.get_variable_attributes(
326
- f"{self.config['dataset_name'].split('_')[-1]}-{energy_bin_name}_delta"
337
+ f"{self.config['dataset_name'].split('_')[-1]}-{energy_bin_name}_delta",
338
+ check_schema=False,
327
339
  ),
328
340
  )
329
341
 
@@ -360,13 +372,15 @@ class CoDICEL1aPipeline:
360
372
  if variable_name == "energy_table":
361
373
  variable_data = self.get_energy_table()
362
374
  dims = ["esa_step"]
363
- attrs = self.cdf_attrs.get_variable_attributes("energy_table")
375
+ attrs = self.cdf_attrs.get_variable_attributes(
376
+ "energy_table", check_schema=False
377
+ )
364
378
 
365
379
  elif variable_name == "acquisition_time_per_step":
366
380
  variable_data = self.get_acquisition_times()
367
381
  dims = ["esa_step"]
368
382
  attrs = self.cdf_attrs.get_variable_attributes(
369
- "acquisition_time_per_step"
383
+ "acquisition_time_per_step", check_schema=False
370
384
  )
371
385
 
372
386
  # These variables can be gathered straight from the packet data
@@ -660,7 +674,9 @@ class CoDICEL1aPipeline:
660
674
  self.cdf_attrs.add_instrument_variable_attrs("codice", "l1a")
661
675
 
662
676
 
663
- def group_ialirt_data(packets: xr.Dataset, data_field_range: range) -> list[bytearray]:
677
+ def group_ialirt_data(
678
+ packets: xr.Dataset, data_field_range: range, prefix: str
679
+ ) -> list[bytearray]:
664
680
  """
665
681
  Group together the individual I-ALiRT data fields.
666
682
 
@@ -670,6 +686,8 @@ def group_ialirt_data(packets: xr.Dataset, data_field_range: range) -> list[byte
670
686
  The dataset containing the I-ALiRT data packets.
671
687
  data_field_range : range
672
688
  The range of the individual data fields (15 or lo, 6 for hi).
689
+ prefix : str
690
+ The prefix used to index the data (i.e. ``cod_lo`` or ``cod_hi``).
673
691
 
674
692
  Returns
675
693
  -------
@@ -679,14 +697,28 @@ def group_ialirt_data(packets: xr.Dataset, data_field_range: range) -> list[byte
679
697
  current_data_stream = bytearray()
680
698
  grouped_data = []
681
699
 
700
+ # Workaround to get this function working for both I-ALiRT spacecraft
701
+ # data and CoDICE-specific I-ALiRT test data from Joey
702
+ # TODO: Once CoDICE I-ALiRT processing is more established, we can probably
703
+ # do away with processing the test data from Joey and just use the
704
+ # I-ALiRT data that is constructed closer to what we expect in-flight.
705
+ if hasattr(packets, "acquisition_time"):
706
+ time_key = "acquisition_time"
707
+ counter_key = "counter"
708
+ data_key = "data"
709
+ else:
710
+ time_key = f"{prefix}_acq"
711
+ counter_key = f"{prefix}_counter"
712
+ data_key = f"{prefix}_data"
713
+
682
714
  # When a counter value of 255 is encountered, this signifies the
683
715
  # end of the data stream
684
- for packet_num in range(0, len(packets.acquisition_time.data)):
685
- counter = packets.counter.data[packet_num]
716
+ for packet_num in range(0, len(packets[time_key].data)):
717
+ counter = packets[counter_key].data[packet_num]
686
718
  if counter != 255:
687
719
  for field in data_field_range:
688
720
  current_data_stream.extend(
689
- bytearray([packets[f"data_{field:02}"].data[packet_num]])
721
+ bytearray([packets[f"{data_key}_{field:02}"].data[packet_num]])
690
722
  )
691
723
  else:
692
724
  # At this point, if there are data, the data stream is ready
@@ -743,7 +775,7 @@ def create_binned_dataset(
743
775
  np.array(data["epoch"], dtype=np.uint64),
744
776
  name="epoch",
745
777
  dims=["epoch"],
746
- attrs=pipeline.cdf_attrs.get_variable_attributes("epoch"),
778
+ attrs=pipeline.cdf_attrs.get_variable_attributes("epoch", check_schema=False),
747
779
  )
748
780
  dataset = xr.Dataset(
749
781
  coords={"epoch": coord},
@@ -751,10 +783,11 @@ def create_binned_dataset(
751
783
  )
752
784
 
753
785
  # Add the data variables
786
+ descriptor = pipeline.config["dataset_name"].removeprefix("imap_codice_l1a_")
754
787
  for species in pipeline.config["energy_table"]:
755
788
  # Add the species data to the dataset
756
789
  values = np.array(data[species], dtype=np.uint32)
757
- attrs = pipeline.cdf_attrs.get_variable_attributes(f"hi-omni-{species}")
790
+ attrs = pipeline.cdf_attrs.get_variable_attributes(f"{descriptor}-{species}")
758
791
  dims = ["epoch", f"energy_{species}"]
759
792
  dataset[species] = xr.DataArray(
760
793
  values,
@@ -843,13 +876,19 @@ def create_direct_event_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
843
876
  epochs,
844
877
  name="epoch",
845
878
  dims=["epoch"],
846
- attrs=cdf_attrs.get_variable_attributes("epoch"),
879
+ attrs=cdf_attrs.get_variable_attributes("epoch", check_schema=False),
847
880
  )
848
881
  event_num = xr.DataArray(
849
882
  np.arange(10000),
850
883
  name="event_num",
851
884
  dims=["event_num"],
852
- attrs=cdf_attrs.get_variable_attributes("event_num"),
885
+ attrs=cdf_attrs.get_variable_attributes("event_num", check_schema=False),
886
+ )
887
+ event_num_label = xr.DataArray(
888
+ np.arange(10000).astype(str),
889
+ name="event_num_label",
890
+ dims=["event_num"],
891
+ attrs=cdf_attrs.get_variable_attributes("event_num_label", check_schema=False),
853
892
  )
854
893
 
855
894
  # Create the dataset to hold the data variables
@@ -858,7 +897,11 @@ def create_direct_event_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
858
897
  elif apid == CODICEAPID.COD_HI_PHA:
859
898
  attrs = cdf_attrs.get_global_attributes("imap_codice_l1a_hi-pha")
860
899
  dataset = xr.Dataset(
861
- coords={"epoch": epoch, "event_num": event_num},
900
+ coords={
901
+ "epoch": epoch,
902
+ "event_num": event_num,
903
+ "event_num_label": event_num_label,
904
+ },
862
905
  attrs=attrs,
863
906
  )
864
907
 
@@ -903,7 +946,7 @@ def create_hskp_dataset(packet: xr.Dataset) -> xr.Dataset:
903
946
  packet.epoch,
904
947
  name="epoch",
905
948
  dims=["epoch"],
906
- attrs=cdf_attrs.get_variable_attributes("epoch"),
949
+ attrs=cdf_attrs.get_variable_attributes("epoch", check_schema=False),
907
950
  )
908
951
 
909
952
  dataset = xr.Dataset(
@@ -928,7 +971,13 @@ def create_hskp_dataset(packet: xr.Dataset) -> xr.Dataset:
928
971
  if variable in exclude_variables:
929
972
  continue
930
973
 
931
- attrs = cdf_attrs.get_variable_attributes(variable)
974
+ # The housekeeping spin_period variable has different values than
975
+ # the spin_value attribute in other datasets, so it gets special
976
+ # treatment
977
+ if variable == "spin_period":
978
+ attrs = cdf_attrs.get_variable_attributes("spin_period_hskp")
979
+ else:
980
+ attrs = cdf_attrs.get_variable_attributes(variable)
932
981
 
933
982
  dataset[variable] = xr.DataArray(
934
983
  packet[variable].data, dims=["epoch"], attrs=attrs
@@ -972,72 +1021,79 @@ def create_ialirt_dataset(apid: int, packets: xr.Dataset) -> xr.Dataset:
972
1021
  # See sections 10.4.1 and 10.4.2 in the algorithm document
973
1022
  if apid == CODICEAPID.COD_LO_IAL:
974
1023
  data_field_range = range(0, 15)
1024
+ prefix = "cod_lo"
975
1025
  elif apid == CODICEAPID.COD_HI_IAL:
976
1026
  data_field_range = range(0, 5)
1027
+ prefix = "cod_hi"
977
1028
 
978
1029
  # Group together packets of I-ALiRT data to form complete data sets
979
- grouped_data = group_ialirt_data(packets, data_field_range)
980
-
981
- # Process each group to get the science data and corresponding metadata
982
- science_values, metadata_values = process_ialirt_data_streams(grouped_data)
983
-
984
- # How data are processed is different for lo-iarlirt and hi-ialirt
985
- if apid == CODICEAPID.COD_HI_IAL:
986
- # Set some necessary values and process as a binned dataset similar to
987
- # a hi-omni data product
988
- metadata_for_processing = [
989
- "table_id",
990
- "plan_id",
991
- "plan_step",
992
- "view_id",
993
- "spin_period",
994
- "suspect",
995
- ]
996
- for var in metadata_for_processing:
997
- packets[var] = metadata_values[var.upper()]
998
- dataset = create_binned_dataset(apid, packets, science_values)
999
-
1000
- elif apid == CODICEAPID.COD_LO_IAL:
1001
- # Create a nominal instance of the pipeline and process similar to a
1002
- # lo-sw-species data product
1003
- pipeline = CoDICEL1aPipeline(
1004
- metadata_values["TABLE_ID"][0],
1005
- metadata_values["PLAN_ID"][0],
1006
- metadata_values["PLAN_STEP"][0],
1007
- metadata_values["VIEW_ID"][0],
1008
- )
1009
- pipeline.set_data_product_config(apid, packets)
1010
- pipeline.decompress_data(science_values)
1011
- pipeline.reshape_data()
1012
-
1013
- # The calculate_epoch_values method needs acq_start_seconds and
1014
- # acq_start_subseconds attributes on the dataset
1015
- pipeline.dataset["acq_start_seconds"] = (
1016
- "_",
1017
- metadata_values["ACQ_START_SECONDS"],
1018
- )
1019
- pipeline.dataset["acq_start_subseconds"] = (
1020
- "_",
1021
- metadata_values["ACQ_START_SUBSECONDS"],
1022
- )
1030
+ grouped_data = group_ialirt_data(packets, data_field_range, prefix)
1031
+
1032
+ if grouped_data:
1033
+ # Process each group to get the science data and corresponding metadata
1034
+ science_values, metadata_values = process_ialirt_data_streams(grouped_data)
1035
+
1036
+ # How data are processed is different for lo-iarlirt and hi-ialirt
1037
+ if apid == CODICEAPID.COD_HI_IAL:
1038
+ # Set some necessary values and process as a binned dataset similar to
1039
+ # a hi-omni data product
1040
+ metadata_for_processing = [
1041
+ "table_id",
1042
+ "plan_id",
1043
+ "plan_step",
1044
+ "view_id",
1045
+ "spin_period",
1046
+ "suspect",
1047
+ ]
1048
+ for var in metadata_for_processing:
1049
+ packets[var] = metadata_values[var.upper()]
1050
+ dataset = create_binned_dataset(apid, packets, science_values)
1051
+
1052
+ elif apid == CODICEAPID.COD_LO_IAL:
1053
+ # Create a nominal instance of the pipeline and process similar to a
1054
+ # lo-sw-species data product
1055
+ pipeline = CoDICEL1aPipeline(
1056
+ metadata_values["TABLE_ID"][0],
1057
+ metadata_values["PLAN_ID"][0],
1058
+ metadata_values["PLAN_STEP"][0],
1059
+ metadata_values["VIEW_ID"][0],
1060
+ )
1061
+ pipeline.set_data_product_config(apid, packets)
1062
+ pipeline.decompress_data(science_values)
1063
+ pipeline.reshape_data()
1023
1064
 
1024
- pipeline.define_coordinates()
1065
+ # The calculate_epoch_values method needs acq_start_seconds and
1066
+ # acq_start_subseconds attributes on the dataset
1067
+ pipeline.dataset["acq_start_seconds"] = (
1068
+ "_",
1069
+ metadata_values["ACQ_START_SECONDS"],
1070
+ )
1071
+ pipeline.dataset["acq_start_subseconds"] = (
1072
+ "_",
1073
+ metadata_values["ACQ_START_SUBSECONDS"],
1074
+ )
1025
1075
 
1026
- # The dataset also needs the metadata that will be carried through
1027
- # to the final data product
1028
- for field in [
1029
- "spin_period",
1030
- "suspect",
1031
- "st_bias_gain_mode",
1032
- "sw_bias_gain_mode",
1033
- "rgfo_half_spin",
1034
- "nso_half_spin",
1035
- ]:
1036
- pipeline.dataset[field] = ("_", metadata_values[field.upper()])
1076
+ pipeline.define_coordinates()
1037
1077
 
1038
- dataset = pipeline.define_data_variables()
1078
+ # The dataset also needs the metadata that will be carried through
1079
+ # to the final data product
1080
+ for field in [
1081
+ "spin_period",
1082
+ "suspect",
1083
+ "st_bias_gain_mode",
1084
+ "sw_bias_gain_mode",
1085
+ "rgfo_half_spin",
1086
+ "nso_half_spin",
1087
+ ]:
1088
+ pipeline.dataset[field] = ("_", metadata_values[field.upper()])
1039
1089
 
1040
- return dataset
1090
+ dataset = pipeline.define_data_variables()
1091
+
1092
+ return dataset
1093
+
1094
+ else:
1095
+ logger.warning("No I-ALiRT data found")
1096
+ return None
1041
1097
 
1042
1098
 
1043
1099
  def get_de_metadata(packets: xr.Dataset, segment: int) -> bytes:
@@ -9,9 +9,13 @@ from imap_processing.codice.codice_l1b import process_codice_l1b
9
9
  dataset = process_codice_l1b(l1a_filenanme)
10
10
  """
11
11
 
12
+ # TODO: Figure out how to convert hi-priority data product. Need an updated
13
+ # algorithm document that describes this.
14
+
12
15
  import logging
13
16
  from pathlib import Path
14
17
 
18
+ import numpy as np
15
19
  import xarray as xr
16
20
 
17
21
  from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
@@ -22,6 +26,71 @@ logger = logging.getLogger(__name__)
22
26
  logger.setLevel(logging.INFO)
23
27
 
24
28
 
29
+ def convert_to_rates(
30
+ dataset: xr.Dataset, descriptor: str, variable_name: str
31
+ ) -> np.ndarray:
32
+ """
33
+ Apply a conversion from counts to rates.
34
+
35
+ The formula for conversion from counts to rates is specific to each data
36
+ product, but is largely grouped by CoDICE-Lo and CoDICE-Hi products.
37
+
38
+ Parameters
39
+ ----------
40
+ dataset : xarray.Dataset
41
+ The L1b dataset containing the data to convert.
42
+ descriptor : str
43
+ The descriptor of the data product of interest.
44
+ variable_name : str
45
+ The variable name to apply the conversion to.
46
+
47
+ Returns
48
+ -------
49
+ rates_data : np.ndarray
50
+ The converted data array.
51
+ """
52
+ # TODO: Temporary workaround to create CDFs for SIT-4. Revisit after SIT-4.
53
+ acq_times = 1
54
+
55
+ if descriptor in [
56
+ "lo-counters-aggregated",
57
+ "lo-counters-singles",
58
+ "lo-nsw-angular",
59
+ "lo-sw-angular",
60
+ "lo-nsw-priority",
61
+ "lo-sw-priority",
62
+ "lo-nsw-species",
63
+ "lo-sw-species",
64
+ "lo-ialirt",
65
+ ]:
66
+ # Applying rate calculation described in section 10.2 of the algorithm
67
+ # document
68
+ rates_data = dataset[variable_name].data / (
69
+ acq_times
70
+ * 1e-6 # Converting from microseconds to seconds
71
+ * constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spin_sectors"]
72
+ )
73
+ elif descriptor in [
74
+ "hi-counters-aggregated",
75
+ "hi-counters-singles",
76
+ "hi-omni",
77
+ "hi-priority",
78
+ "hi-sectored",
79
+ "hi-ialirt",
80
+ ]:
81
+ # Applying rate calculation described in section 10.1 of the algorithm
82
+ # document
83
+ rates_data = dataset[variable_name].data / (
84
+ constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spin_sectors"]
85
+ * constants.L1B_DATA_PRODUCT_CONFIGURATIONS[descriptor]["num_spins"]
86
+ * acq_times
87
+ )
88
+ elif descriptor == "hskp":
89
+ rates_data = dataset[variable_name].data / acq_times
90
+
91
+ return rates_data
92
+
93
+
25
94
  def process_codice_l1b(file_path: Path) -> xr.Dataset:
26
95
  """
27
96
  Will process CoDICE l1a data to create l1b data products.
@@ -45,7 +114,11 @@ def process_codice_l1b(file_path: Path) -> xr.Dataset:
45
114
  # set some useful distinguishing variables
46
115
  dataset_name = l1a_dataset.attrs["Logical_source"].replace("_l1a_", "_l1b_")
47
116
  descriptor = dataset_name.removeprefix("imap_codice_l1b_")
48
- apid = constants.CODICEAPID_MAPPING[descriptor]
117
+
118
+ # Direct event data products do not have a level L1B
119
+ if descriptor in ["lo-pha", "hi-pha"]:
120
+ logger.warning("Encountered direct event data product. Skipping L1b processing")
121
+ return None
49
122
 
50
123
  # Get the L1b CDF attributes
51
124
  cdf_attrs = ImapCdfAttributes()
@@ -60,32 +133,32 @@ def process_codice_l1b(file_path: Path) -> xr.Dataset:
60
133
 
61
134
  # Determine which variables need to be converted from counts to rates
62
135
  # TODO: Figure out exactly which hskp variables need to be converted
136
+ # Housekeeping and binned datasets are treated a bit differently since
137
+ # not all variables need to be converted
63
138
  if descriptor == "hskp":
64
- data_variables = []
65
- support_variables = ["cmdexe", "cmdrjct"]
66
- variables_to_convert = support_variables
139
+ # TODO: Check with Joey if any housekeeping data needs to be converted
140
+ variables_to_convert = []
141
+ elif descriptor == "hi-sectored":
142
+ variables_to_convert = ["h", "he3he4", "cno", "fe"]
143
+ elif descriptor == "hi-omni":
144
+ variables_to_convert = ["h", "he3", "he4", "c", "o", "ne_mg_si", "fe", "uh"]
145
+ elif descriptor == "hi-ialirt":
146
+ variables_to_convert = ["h"]
67
147
  else:
68
- data_variables = getattr(
148
+ variables_to_convert = getattr(
69
149
  constants, f"{descriptor.upper().replace('-', '_')}_VARIABLE_NAMES"
70
150
  )
71
- support_variables = constants.DATA_PRODUCT_CONFIGURATIONS[apid][
72
- "support_variables"
73
- ]
74
- variables_to_convert = data_variables + support_variables
75
151
 
152
+ # Apply the conversion to rates
76
153
  for variable_name in variables_to_convert:
77
- # Apply conversion of data from counts to rates
78
- # TODO: Properly implement conversion factors on a per-data-product basis
79
- # For now, just divide by 100 to get float values
80
- l1b_dataset[variable_name].data = l1b_dataset[variable_name].data / 100
154
+ l1b_dataset[variable_name].data = convert_to_rates(
155
+ l1b_dataset, descriptor, variable_name
156
+ )
81
157
 
82
158
  # Set the variable attributes
83
- if variable_name in data_variables:
84
- cdf_attrs_key = f"{descriptor}-{variable_name}"
85
- elif variable_name in support_variables:
86
- cdf_attrs_key = variable_name
159
+ cdf_attrs_key = f"{descriptor}-{variable_name}"
87
160
  l1b_dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
88
- cdf_attrs_key
161
+ cdf_attrs_key, check_schema=False
89
162
  )
90
163
 
91
164
  logger.info(f"\nFinal data product:\n{l1b_dataset}\n")
@@ -0,0 +1,81 @@
1
+ """
2
+ Perform CoDICE l2 processing.
3
+
4
+ This module processes CoDICE l1 files and creates L2 data products.
5
+
6
+ Notes
7
+ -----
8
+ from imap_processing.codice.codice_l2 import process_codice_l2
9
+ dataset = process_codice_l2(l1_filename)
10
+ """
11
+
12
+ import logging
13
+ from pathlib import Path
14
+
15
+ import xarray as xr
16
+
17
+ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
18
+ from imap_processing.cdf.utils import load_cdf
19
+
20
+ logger = logging.getLogger(__name__)
21
+ logger.setLevel(logging.INFO)
22
+
23
+
24
+ def process_codice_l2(file_path: Path) -> xr.Dataset:
25
+ """
26
+ Will process CoDICE l1 data to create l2 data products.
27
+
28
+ Parameters
29
+ ----------
30
+ file_path : pathlib.Path
31
+ Path to the CoDICE L1 file to process.
32
+
33
+ Returns
34
+ -------
35
+ l2_dataset : xarray.Dataset
36
+ The``xarray`` dataset containing the science data and supporting metadata.
37
+ """
38
+ logger.info(f"Processing {file_path}")
39
+
40
+ # Open the l1 file
41
+ l1_dataset = load_cdf(file_path)
42
+
43
+ # Use the logical source as a way to distinguish between data products and
44
+ # set some useful distinguishing variables
45
+ # TODO: Could clean this up by using imap-data-access methods?
46
+ dataset_name = l1_dataset.attrs["Logical_source"]
47
+ data_level = dataset_name.removeprefix("imap_codice_").split("_")[0]
48
+ descriptor = dataset_name.removeprefix(f"imap_codice_{data_level}_")
49
+ dataset_name = dataset_name.replace(data_level, "l2")
50
+
51
+ # TODO: Temporary work-around to replace "PHA" naming convention with
52
+ # "direct events" This will eventually be changed at the L1 level and
53
+ # thus this will eventually be removed.
54
+ if descriptor == "lo-pha":
55
+ dataset_name = dataset_name.replace("lo-pha", "lo-direct-events")
56
+ elif descriptor == "hi-pha":
57
+ dataset_name = dataset_name.replace("hi-pha", "hi-direct-events")
58
+
59
+ # Use the L1 data product as a starting point for L2
60
+ l2_dataset = l1_dataset.copy()
61
+
62
+ # Get the L2 CDF attributes
63
+ cdf_attrs = ImapCdfAttributes()
64
+ cdf_attrs.add_instrument_global_attrs("codice")
65
+ cdf_attrs.add_instrument_variable_attrs("codice", "l2")
66
+
67
+ # Update the global attributes
68
+ l2_dataset.attrs = cdf_attrs.get_global_attributes(dataset_name)
69
+
70
+ # Set the variable attributes
71
+ for variable_name in l2_dataset:
72
+ l2_dataset[variable_name].attrs = cdf_attrs.get_variable_attributes(
73
+ variable_name, check_schema=False
74
+ )
75
+
76
+ # TODO: Add L2-specific algorithms/functionality here. For SIT-4, we can
77
+ # just keep the data as-is.
78
+
79
+ logger.info(f"\nFinal data product:\n{l2_dataset}\n")
80
+
81
+ return l2_dataset
@@ -158,6 +158,17 @@ LO_PHA_VARIABLE_NAMES = [
158
158
  f"P{n}_{field}" for n in range(8) for field in LO_PHA_CDF_FIELDS
159
159
  ]
160
160
 
161
+ # Final I-ALiRT data product fields
162
+ CODICE_LO_IAL_DATA_FIELDS = [
163
+ "c_over_o_abundance",
164
+ "mg_over_o_abundance",
165
+ "fe_over_o_abundance",
166
+ "c_plus_6_over_c_plus_5_ratio",
167
+ "o_plus_7_over_o_plus_6_ratio",
168
+ "fe_low_over_fe_high_ratio",
169
+ ]
170
+ CODICE_HI_IAL_DATA_FIELDS = ["h"]
171
+
161
172
  # lo- and hi-counters-aggregated data product variables are dynamically
162
173
  # determined based on the number of active counters
163
174
  # TODO: Try to convince Joey to move to lower case variable names with
@@ -663,6 +674,63 @@ DATA_PRODUCT_CONFIGURATIONS: dict[CODICEAPID | int, dict] = {
663
674
  },
664
675
  }
665
676
 
677
+ # Various configurations to support L1b processing of individual data products
678
+ # Much of these are described in the algorithm document in chapter 11 ("Data
679
+ # Level 1B")
680
+ L1B_DATA_PRODUCT_CONFIGURATIONS: dict[str, dict] = {
681
+ "hi-counters-aggregated": {
682
+ "num_spin_sectors": 24,
683
+ "num_spins": 16,
684
+ },
685
+ "hi-counters-singles": {
686
+ "num_spin_sectors": 24,
687
+ "num_spins": 16,
688
+ },
689
+ "hi-ialirt": {
690
+ "num_spin_sectors": 24,
691
+ "num_spins": 4,
692
+ },
693
+ "hi-omni": {
694
+ "num_spin_sectors": 24,
695
+ "num_spins": 4,
696
+ },
697
+ "hi-priority": { # TODO: Ask Joey to define these
698
+ "num_spin_sectors": 1,
699
+ "num_spins": 1,
700
+ },
701
+ "hi-sectored": {
702
+ "num_spin_sectors": 2,
703
+ "num_spins": 16,
704
+ },
705
+ "lo-counters-aggregated": {
706
+ "num_spin_sectors": 2,
707
+ },
708
+ "lo-counters-singles": {
709
+ "num_spin_sectors": 2,
710
+ },
711
+ "lo-nsw-angular": {
712
+ "num_spin_sectors": 1,
713
+ },
714
+ "lo-sw-angular": {
715
+ "num_spin_sectors": 1,
716
+ },
717
+ "lo-nsw-priority": {
718
+ "num_spin_sectors": 1,
719
+ },
720
+ "lo-sw-priority": {
721
+ "num_spin_sectors": 1,
722
+ },
723
+ "lo-nsw-species": {
724
+ "num_spin_sectors": 12,
725
+ },
726
+ "lo-sw-species": {
727
+ "num_spin_sectors": 12,
728
+ },
729
+ "lo-ialirt": {
730
+ "num_spin_sectors": 12,
731
+ },
732
+ }
733
+
666
734
  # Various configurations to support processing of direct events data products
667
735
  # These are described in the algorithm document in chapter 10 ("Data Level 1A")
668
736
  DE_DATA_PRODUCT_CONFIGURATIONS: dict[Any, dict[str, Any]] = {