imap-processing 0.14.0__py3-none-any.whl → 0.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (81) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +60 -35
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +765 -287
  4. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +1577 -288
  5. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1004 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +28 -0
  7. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +1 -1
  8. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +18 -0
  9. imap_processing/cdf/config/imap_glows_l2_variable_attrs.yaml +39 -3
  10. imap_processing/cdf/config/imap_ialirt_global_cdf_attrs.yaml +18 -0
  11. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +353 -0
  12. imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +7 -0
  13. imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +11 -0
  14. imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +4 -0
  15. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +7 -3
  16. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +6 -0
  17. imap_processing/cdf/config/imap_mag_l2_variable_attrs.yaml +114 -0
  18. imap_processing/cdf/config/imap_swe_global_cdf_attrs.yaml +11 -5
  19. imap_processing/cdf/config/imap_swe_l1b_variable_attrs.yaml +23 -1
  20. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +4 -0
  21. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +2 -2
  22. imap_processing/cli.py +145 -80
  23. imap_processing/codice/codice_l1a.py +140 -84
  24. imap_processing/codice/codice_l1b.py +91 -18
  25. imap_processing/codice/codice_l2.py +81 -0
  26. imap_processing/codice/constants.py +68 -0
  27. imap_processing/ena_maps/ena_maps.py +43 -1
  28. imap_processing/glows/l2/glows_l2_data.py +3 -6
  29. imap_processing/hi/hi_l1a.py +447 -0
  30. imap_processing/hi/{l1b/hi_l1b.py → hi_l1b.py} +1 -1
  31. imap_processing/hi/{l1c/hi_l1c.py → hi_l1c.py} +21 -21
  32. imap_processing/hi/{l2/hi_l2.py → hi_l2.py} +13 -13
  33. imap_processing/hi/utils.py +6 -6
  34. imap_processing/hit/l1b/hit_l1b.py +30 -11
  35. imap_processing/ialirt/constants.py +38 -0
  36. imap_processing/ialirt/l0/parse_mag.py +1 -1
  37. imap_processing/ialirt/l0/process_codice.py +91 -0
  38. imap_processing/ialirt/l0/process_hit.py +12 -21
  39. imap_processing/ialirt/l0/process_swapi.py +172 -23
  40. imap_processing/ialirt/l0/process_swe.py +3 -10
  41. imap_processing/ialirt/utils/constants.py +62 -0
  42. imap_processing/ialirt/utils/create_xarray.py +135 -0
  43. imap_processing/idex/idex_l2c.py +9 -9
  44. imap_processing/lo/l1b/lo_l1b.py +6 -1
  45. imap_processing/lo/l1c/lo_l1c.py +22 -13
  46. imap_processing/lo/l2/lo_l2.py +213 -0
  47. imap_processing/mag/l1c/mag_l1c.py +8 -1
  48. imap_processing/mag/l2/mag_l2.py +6 -2
  49. imap_processing/mag/l2/mag_l2_data.py +7 -5
  50. imap_processing/swe/l1a/swe_l1a.py +6 -6
  51. imap_processing/swe/l1b/swe_l1b.py +70 -11
  52. imap_processing/ultra/l0/decom_ultra.py +1 -1
  53. imap_processing/ultra/l0/ultra_utils.py +0 -4
  54. imap_processing/ultra/l1b/badtimes.py +7 -3
  55. imap_processing/ultra/l1b/cullingmask.py +7 -2
  56. imap_processing/ultra/l1b/de.py +26 -12
  57. imap_processing/ultra/l1b/lookup_utils.py +8 -7
  58. imap_processing/ultra/l1b/ultra_l1b.py +59 -48
  59. imap_processing/ultra/l1b/ultra_l1b_culling.py +50 -18
  60. imap_processing/ultra/l1b/ultra_l1b_extended.py +4 -4
  61. imap_processing/ultra/l1c/helio_pset.py +53 -0
  62. imap_processing/ultra/l1c/spacecraft_pset.py +20 -12
  63. imap_processing/ultra/l1c/ultra_l1c.py +49 -26
  64. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +40 -2
  65. imap_processing/ultra/l2/ultra_l2.py +47 -2
  66. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +524 -526
  67. imap_processing/ultra/utils/ultra_l1_utils.py +51 -10
  68. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/METADATA +2 -2
  69. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/RECORD +72 -69
  70. imap_processing/hi/l1a/__init__.py +0 -0
  71. imap_processing/hi/l1a/hi_l1a.py +0 -98
  72. imap_processing/hi/l1a/histogram.py +0 -152
  73. imap_processing/hi/l1a/science_direct_event.py +0 -214
  74. imap_processing/hi/l1b/__init__.py +0 -0
  75. imap_processing/hi/l1c/__init__.py +0 -0
  76. imap_processing/hi/l2/__init__.py +0 -0
  77. imap_processing/ialirt/l0/process_codicehi.py +0 -156
  78. imap_processing/ialirt/l0/process_codicelo.py +0 -41
  79. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/LICENSE +0 -0
  80. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/WHEEL +0 -0
  81. {imap_processing-0.14.0.dist-info → imap_processing-0.16.0.dist-info}/entry_points.txt +0 -0
@@ -58,13 +58,13 @@ def swe_l1a(packet_file: str) -> xr.Dataset:
58
58
 
59
59
  if SWEAPID.SWE_APP_HK in datasets_by_apid:
60
60
  logger.info("Processing SWE housekeeping data.")
61
- hk_ds = datasets_by_apid[SWEAPID.SWE_APP_HK]
62
- hk_ds.attrs.update(imap_attrs.get_global_attributes("imap_swe_l1a_hk"))
63
- hk_ds["epoch"].attrs.update(epoch_attrs)
61
+ l1a_hk_ds = datasets_by_apid[SWEAPID.SWE_APP_HK]
62
+ l1a_hk_ds.attrs.update(imap_attrs.get_global_attributes("imap_swe_l1a_hk"))
63
+ l1a_hk_ds["epoch"].attrs.update(epoch_attrs)
64
64
  # Add attrs to HK data variables
65
- for var_name in hk_ds.data_vars:
66
- hk_ds[var_name].attrs.update(non_science_attrs)
67
- processed_data.append(hk_ds)
65
+ for var_name in l1a_hk_ds.data_vars:
66
+ l1a_hk_ds[var_name].attrs.update(non_science_attrs)
67
+ processed_data.append(l1a_hk_ds)
68
68
 
69
69
  if SWEAPID.SWE_CEM_RAW in datasets_by_apid:
70
70
  logger.info("Processing SWE CEM raw data.")
@@ -10,6 +10,7 @@ import pandas as pd
10
10
  import xarray as xr
11
11
  from imap_data_access.processing_input import ProcessingInputCollection
12
12
 
13
+ from imap_processing import imap_module_directory
13
14
  from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
14
15
  from imap_processing.cdf.utils import load_cdf
15
16
  from imap_processing.spice.time import met_to_ttj2000ns
@@ -20,7 +21,7 @@ from imap_processing.swe.utils.swe_utils import (
20
21
  combine_acquisition_time,
21
22
  read_lookup_table,
22
23
  )
23
- from imap_processing.utils import convert_raw_to_eu
24
+ from imap_processing.utils import convert_raw_to_eu, packet_file_to_datasets
24
25
 
25
26
  logger = logging.getLogger(__name__)
26
27
 
@@ -628,7 +629,7 @@ def filter_full_cycle_data(
628
629
  return l1a_data
629
630
 
630
631
 
631
- def swe_l1b(dependencies: ProcessingInputCollection) -> xr.Dataset:
632
+ def swe_l1b_science(dependencies: ProcessingInputCollection) -> xr.Dataset:
632
633
  """
633
634
  SWE l1b science processing.
634
635
 
@@ -641,7 +642,7 @@ def swe_l1b(dependencies: ProcessingInputCollection) -> xr.Dataset:
641
642
  Returns
642
643
  -------
643
644
  dataset : xarray.Dataset
644
- Processed l1b data.
645
+ Processed l1b science data.
645
646
  """
646
647
  # Read science data
647
648
  science_files = dependencies.get_file_paths(descriptor="sci")
@@ -872,8 +873,8 @@ def swe_l1b(dependencies: ProcessingInputCollection) -> xr.Dataset:
872
873
  # 4 rows --> metadata for each full cycle. Each element of 4 maps to
873
874
  # metadata of one quarter cycle.
874
875
 
875
- # Create the dataset
876
- dataset = xr.Dataset(
876
+ # Create the science dataset
877
+ science_dataset = xr.Dataset(
877
878
  coords={
878
879
  "epoch": epoch_time,
879
880
  "esa_step": esa_step,
@@ -888,23 +889,23 @@ def swe_l1b(dependencies: ProcessingInputCollection) -> xr.Dataset:
888
889
  attrs=cdf_attrs.get_global_attributes("imap_swe_l1b_sci"),
889
890
  )
890
891
 
891
- dataset["science_data"] = xr.DataArray(
892
+ science_dataset["science_data"] = xr.DataArray(
892
893
  count_rate,
893
894
  dims=["epoch", "esa_step", "spin_sector", "cem_id"],
894
895
  attrs=cdf_attrs.get_variable_attributes("science_data"),
895
896
  )
896
- dataset["acquisition_time"] = xr.DataArray(
897
+ science_dataset["acquisition_time"] = xr.DataArray(
897
898
  acq_time,
898
899
  dims=["epoch", "esa_step", "spin_sector"],
899
900
  attrs=cdf_attrs.get_variable_attributes("acquisition_time"),
900
901
  )
901
- dataset["acq_duration"] = xr.DataArray(
902
+ science_dataset["acq_duration"] = xr.DataArray(
902
903
  acq_duration,
903
904
  dims=["epoch", "esa_step", "spin_sector"],
904
905
  attrs=cdf_attrs.get_variable_attributes("acq_duration"),
905
906
  )
906
907
 
907
- dataset["esa_energy"] = xr.DataArray(
908
+ science_dataset["esa_energy"] = xr.DataArray(
908
909
  esa_energies,
909
910
  dims=["epoch", "esa_step", "spin_sector"],
910
911
  attrs=cdf_attrs.get_variable_attributes("esa_energy"),
@@ -915,11 +916,69 @@ def swe_l1b(dependencies: ProcessingInputCollection) -> xr.Dataset:
915
916
  if key in ["science_data", "acq_duration"]:
916
917
  continue
917
918
  varname = key.lower()
918
- dataset[varname] = xr.DataArray(
919
+ science_dataset[varname] = xr.DataArray(
919
920
  value.data.reshape(-1, swe_constants.N_QUARTER_CYCLES),
920
921
  dims=["epoch", "cycle"],
921
922
  attrs=cdf_attrs.get_variable_attributes(varname),
922
923
  )
923
924
 
924
925
  logger.info("SWE L1b science processing completed")
925
- return [dataset]
926
+ return science_dataset
927
+
928
+
929
+ def swe_l1b(dependencies: ProcessingInputCollection) -> list[xr.Dataset]:
930
+ """
931
+ SWE L1B processing.
932
+
933
+ Parameters
934
+ ----------
935
+ dependencies : ProcessingInputCollection
936
+ Object containing lists of dependencies that CLI dependency
937
+ parameter received.
938
+
939
+ Returns
940
+ -------
941
+ list[xarray.Dataset]
942
+ List of processed datasets.
943
+ """
944
+ processed_datasets = []
945
+ has_science_data = dependencies.get_file_paths(descriptor="sci")
946
+ if has_science_data:
947
+ # Process science data to L1B
948
+ science_dataset = swe_l1b_science(dependencies)
949
+ processed_datasets.append(science_dataset)
950
+
951
+ # Process HK data using L0 file
952
+ l0_files = dependencies.get_file_paths(descriptor="raw")
953
+ if l0_files:
954
+ xtce_document = (
955
+ f"{imap_module_directory}/swe/packet_definitions/swe_packet_definition.xml"
956
+ )
957
+ datasets_by_apid = packet_file_to_datasets(
958
+ l0_files[0], xtce_document, use_derived_value=True
959
+ )
960
+ if SWEAPID.SWE_APP_HK in datasets_by_apid:
961
+ # Define minimal CDF attrs for the HK dataset
962
+ imap_attrs = ImapCdfAttributes()
963
+ imap_attrs.add_instrument_global_attrs("swe")
964
+ imap_attrs.add_instrument_variable_attrs("swe", "l1b")
965
+ hk_attrs = imap_attrs.get_variable_attributes("l1b_hk_attrs")
966
+ hk_str_attrs = imap_attrs.get_variable_attributes("l1b_hk_string_attrs")
967
+ epoch_attrs = imap_attrs.get_variable_attributes(
968
+ "epoch", check_schema=False
969
+ )
970
+
971
+ l1b_hk_dataset = datasets_by_apid[SWEAPID.SWE_APP_HK]
972
+ # Update CDF attrs
973
+ l1b_hk_dataset["epoch"].attrs.update(epoch_attrs)
974
+ l1b_hk_dataset.attrs.update(
975
+ imap_attrs.get_global_attributes("imap_swe_l1b_hk")
976
+ )
977
+ for hk_var in l1b_hk_dataset.data_vars:
978
+ if isinstance(l1b_hk_dataset[hk_var].data[0], str):
979
+ l1b_hk_dataset[hk_var].attrs.update(hk_str_attrs)
980
+ else:
981
+ l1b_hk_dataset[hk_var].attrs.update(hk_attrs)
982
+ processed_datasets.append(l1b_hk_dataset)
983
+
984
+ return processed_datasets
@@ -189,7 +189,7 @@ def process_ultra_events(ds: xr.Dataset) -> xr.Dataset:
189
189
  }
190
190
 
191
191
  # Add the event data to the expanded dataset.
192
- for key in event_data_list[0]:
192
+ for key in EVENT_FIELD_RANGES:
193
193
  expanded_data[key] = np.array([event[key] for event in all_events])
194
194
 
195
195
  event_ids = get_event_id(expanded_data["shcoarse"])
@@ -65,7 +65,6 @@ ULTRA_HK = PacketProperties(
65
65
  apid=[
66
66
  866,
67
67
  867,
68
- 868,
69
68
  869,
70
69
  870,
71
70
  873,
@@ -74,7 +73,6 @@ ULTRA_HK = PacketProperties(
74
73
  877,
75
74
  930,
76
75
  931,
77
- 932,
78
76
  933,
79
77
  934,
80
78
  937,
@@ -85,7 +83,6 @@ ULTRA_HK = PacketProperties(
85
83
  logical_source=[
86
84
  "imap_ultra_l1a_45sensor-alarm",
87
85
  "imap_ultra_l1a_45sensor-memchecksum",
88
- "imap_ultra_l1a_45sensor-memdump",
89
86
  "imap_ultra_l1a_45sensor-status",
90
87
  "imap_ultra_l1a_45sensor-bootstatus",
91
88
  "imap_ultra_l1a_45sensor-monitorlimits",
@@ -94,7 +91,6 @@ ULTRA_HK = PacketProperties(
94
91
  "imap_ultra_l1a_45sensor-imgparams",
95
92
  "imap_ultra_l1a_90sensor-alarm",
96
93
  "imap_ultra_l1a_90sensor-memchecksum",
97
- "imap_ultra_l1a_90sensor-memdump",
98
94
  "imap_ultra_l1a_90sensor-status",
99
95
  "imap_ultra_l1a_90sensor-bootstatus",
100
96
  "imap_ultra_l1a_90sensor-monitorlimits",
@@ -4,7 +4,7 @@ import numpy as np
4
4
  import xarray as xr
5
5
  from numpy.typing import NDArray
6
6
 
7
- from imap_processing.ultra.utils.ultra_l1_utils import create_dataset
7
+ from imap_processing.ultra.utils.ultra_l1_utils import create_dataset, extract_data_dict
8
8
 
9
9
  FILLVAL_UINT16 = 65535
10
10
  FILLVAL_FLOAT64 = -1.0e31
@@ -36,10 +36,14 @@ def calculate_badtimes(
36
36
  culled_spins = np.setdiff1d(
37
37
  extendedspin_dataset["spin_number"].values, cullingmask_spins
38
38
  )
39
-
39
+ extendedspin_dataset = extendedspin_dataset.assign_coords(
40
+ epoch=("spin_number", extendedspin_dataset["epoch"].values)
41
+ )
40
42
  filtered_dataset = extendedspin_dataset.sel(spin_number=culled_spins)
41
43
 
42
- badtimes_dataset = create_dataset(filtered_dataset, name, "l1b")
44
+ data_dict = extract_data_dict(filtered_dataset)
45
+
46
+ badtimes_dataset = create_dataset(data_dict, name, "l1b")
43
47
 
44
48
  if badtimes_dataset["spin_number"].size == 0:
45
49
  badtimes_dataset = badtimes_dataset.drop_dims("spin_number")
@@ -4,7 +4,7 @@ import numpy as np
4
4
  import xarray as xr
5
5
 
6
6
  from imap_processing.quality_flags import ImapAttitudeUltraFlags, ImapRatesUltraFlags
7
- from imap_processing.ultra.utils.ultra_l1_utils import create_dataset
7
+ from imap_processing.ultra.utils.ultra_l1_utils import create_dataset, extract_data_dict
8
8
 
9
9
  FILLVAL_UINT16 = 65535
10
10
  FILLVAL_FLOAT64 = -1.0e31
@@ -44,11 +44,16 @@ def calculate_cullingmask(extendedspin_dataset: xr.Dataset, name: str) -> xr.Dat
44
44
  == 0
45
45
  ).all(dim="energy_bin_geometric_mean")
46
46
  )
47
+ extendedspin_dataset = extendedspin_dataset.assign_coords(
48
+ epoch=("spin_number", extendedspin_dataset["epoch"].values)
49
+ )
47
50
  filtered_dataset = extendedspin_dataset.sel(
48
51
  spin_number=extendedspin_dataset["spin_number"][good_mask]
49
52
  )
50
53
 
51
- cullingmask_dataset = create_dataset(filtered_dataset, name, "l1b")
54
+ data_dict = extract_data_dict(filtered_dataset)
55
+
56
+ cullingmask_dataset = create_dataset(data_dict, name, "l1b")
52
57
 
53
58
  if cullingmask_dataset["spin_number"].size == 0:
54
59
  cullingmask_dataset = cullingmask_dataset.drop_dims("spin_number")
@@ -34,7 +34,9 @@ FILLVAL_UINT8 = 255
34
34
  FILLVAL_FLOAT32 = -1.0e31
35
35
 
36
36
 
37
- def calculate_de(de_dataset: xr.Dataset, name: str) -> xr.Dataset:
37
+ def calculate_de(
38
+ de_dataset: xr.Dataset, name: str, ancillary_files: dict
39
+ ) -> xr.Dataset:
38
40
  """
39
41
  Create dataset with defined datatypes for Direct Event Data.
40
42
 
@@ -44,6 +46,8 @@ def calculate_de(de_dataset: xr.Dataset, name: str) -> xr.Dataset:
44
46
  L1a dataset containing direct event data.
45
47
  name : str
46
48
  Name of the l1a dataset.
49
+ ancillary_files : dict
50
+ Ancillary files.
47
51
 
48
52
  Returns
49
53
  -------
@@ -106,7 +110,11 @@ def calculate_de(de_dataset: xr.Dataset, name: str) -> xr.Dataset:
106
110
  energy = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float32)
107
111
  species_bin = np.full(len(de_dataset["epoch"]), FILLVAL_UINT8, dtype=np.uint8)
108
112
  t2 = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float32)
109
- event_times = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float64)
113
+ event_times = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float32)
114
+ shape = (len(de_dataset["epoch"]), 3)
115
+ sc_velocity = np.full(shape, FILLVAL_FLOAT32, dtype=np.float32)
116
+ sc_dps_velocity = np.full(shape, FILLVAL_FLOAT32, dtype=np.float32)
117
+ helio_velocity = np.full(shape, FILLVAL_FLOAT32, dtype=np.float32)
110
118
  spin_starts = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float64)
111
119
  spin_period_sec = np.full(
112
120
  len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float64
@@ -224,13 +232,21 @@ def calculate_de(de_dataset: xr.Dataset, name: str) -> xr.Dataset:
224
232
 
225
233
  # Annotated Events.
226
234
  ultra_frame = getattr(SpiceFrame, f"IMAP_ULTRA_{sensor}")
227
- sc_velocity, sc_dps_velocity, helio_velocity = get_annotated_particle_velocity(
228
- event_times,
229
- de_dict["direct_event_velocity"],
230
- ultra_frame,
231
- SpiceFrame.IMAP_DPS,
232
- SpiceFrame.IMAP_SPACECRAFT,
233
- )
235
+
236
+ # Account for counts=0 (event times have FILL value)
237
+ valid_events = event_times != FILLVAL_FLOAT32
238
+ if np.any(valid_events):
239
+ (
240
+ sc_velocity[valid_events],
241
+ sc_dps_velocity[valid_events],
242
+ helio_velocity[valid_events],
243
+ ) = get_annotated_particle_velocity(
244
+ event_times[valid_events],
245
+ de_dict["direct_event_velocity"][valid_events],
246
+ ultra_frame,
247
+ SpiceFrame.IMAP_DPS,
248
+ SpiceFrame.IMAP_SPACECRAFT,
249
+ )
234
250
 
235
251
  de_dict["velocity_sc"] = sc_velocity
236
252
  de_dict["velocity_dps_sc"] = sc_dps_velocity
@@ -247,9 +263,7 @@ def calculate_de(de_dataset: xr.Dataset, name: str) -> xr.Dataset:
247
263
  de_dict["theta"],
248
264
  )
249
265
  de_dict["event_efficiency"] = get_efficiency(
250
- de_dict["tof_energy"],
251
- de_dict["phi"],
252
- de_dict["theta"],
266
+ de_dict["tof_energy"], de_dict["phi"], de_dict["theta"], ancillary_files
253
267
  )
254
268
 
255
269
  dataset = create_dataset(de_dict, name, "l1b")
@@ -206,23 +206,24 @@ def get_angular_profiles(start_type: str, sensor: str) -> pd.DataFrame:
206
206
  return lookup_table
207
207
 
208
208
 
209
- def get_energy_efficiencies() -> pd.DataFrame:
209
+ def get_energy_efficiencies(ancillary_files: dict) -> pd.DataFrame:
210
210
  """
211
211
  Lookup table for efficiencies for theta and phi.
212
212
 
213
213
  Further description is available starting on
214
214
  page 18 of the Algorithm Document.
215
215
 
216
+ Parameters
217
+ ----------
218
+ ancillary_files : dict[Path]
219
+ Ancillary files.
220
+
216
221
  Returns
217
222
  -------
218
223
  lookup_table : DataFrame
219
224
  Efficiencies lookup table for a given sensor.
220
225
  """
221
- # TODO: Move this out of tests directory once we have the aux api
222
- # TODO: ultra90 efficiencies
223
- path = imap_module_directory / "tests" / "ultra" / "data" / "l1"
224
- lookup_table = pd.read_csv(
225
- path / "Ultra_efficiencies_45_combined_logistic_interpolation.csv"
226
- )
226
+ # TODO: add sensor to input when new lookup tables are available.
227
+ lookup_table = pd.read_csv(ancillary_files["l1b-45sensor-logistic-interpolation"])
227
228
 
228
229
  return lookup_table
@@ -8,7 +8,7 @@ from imap_processing.ultra.l1b.de import calculate_de
8
8
  from imap_processing.ultra.l1b.extendedspin import calculate_extendedspin
9
9
 
10
10
 
11
- def ultra_l1b(data_dict: dict) -> list[xr.Dataset]:
11
+ def ultra_l1b(data_dict: dict, ancillary_files: dict) -> list[xr.Dataset]:
12
12
  """
13
13
  Will process ULTRA L1A data into L1B CDF files at output_filepath.
14
14
 
@@ -16,6 +16,8 @@ def ultra_l1b(data_dict: dict) -> list[xr.Dataset]:
16
16
  ----------
17
17
  data_dict : dict
18
18
  The data itself and its dependent data.
19
+ ancillary_files : dict
20
+ Ancillary files.
19
21
 
20
22
  Returns
21
23
  -------
@@ -30,54 +32,63 @@ def ultra_l1b(data_dict: dict) -> list[xr.Dataset]:
30
32
  3. l1b extended, culling, badtimes created here
31
33
  """
32
34
  output_datasets = []
33
- instrument_id = 45 if any("45" in key for key in data_dict.keys()) else 90
34
35
 
35
- # L1b de data will be created if L1a de data is available
36
- if f"imap_ultra_l1a_{instrument_id}sensor-de" in data_dict:
37
- de_dataset = calculate_de(
38
- data_dict[f"imap_ultra_l1a_{instrument_id}sensor-de"],
39
- f"imap_ultra_l1b_{instrument_id}sensor-de",
40
- )
41
- output_datasets.append(de_dataset)
42
- # L1b extended data will be created if L1a hk, rates,
43
- # aux, params, and l1b de data are available
44
- elif (
45
- f"imap_ultra_l1b_{instrument_id}sensor-de" in data_dict
46
- and f"imap_ultra_l1a_{instrument_id}sensor-rates" in data_dict
47
- and f"imap_ultra_l1a_{instrument_id}sensor-aux" in data_dict
48
- and f"imap_ultra_l1a_{instrument_id}sensor-params" in data_dict
49
- ):
50
- extendedspin_dataset = calculate_extendedspin(
51
- {
52
- f"imap_ultra_l1a_{instrument_id}sensor-aux": data_dict[
53
- f"imap_ultra_l1a_{instrument_id}sensor-aux"
54
- ],
55
- f"imap_ultra_l1a_{instrument_id}sensor-hk": data_dict[
56
- f"imap_ultra_l1a_{instrument_id}sensor-hk"
57
- ],
58
- f"imap_ultra_l1a_{instrument_id}sensor-rates": data_dict[
59
- f"imap_ultra_l1a_{instrument_id}sensor-rates"
60
- ],
61
- f"imap_ultra_l1b_{instrument_id}sensor-de": data_dict[
62
- f"imap_ultra_l1b_{instrument_id}sensor-de"
63
- ],
64
- },
65
- f"imap_ultra_l1b_{instrument_id}sensor-extendedspin",
66
- instrument_id,
67
- )
68
- cullingmask_dataset = calculate_cullingmask(
69
- extendedspin_dataset,
70
- f"imap_ultra_l1b_{instrument_id}sensor-cullingmask",
71
- )
72
- badtimes_dataset = calculate_badtimes(
73
- extendedspin_dataset,
74
- cullingmask_dataset["spin_number"].values,
75
- f"imap_ultra_l1b_{instrument_id}sensor-badtimes",
76
- )
77
- output_datasets.extend(
78
- [extendedspin_dataset, cullingmask_dataset, badtimes_dataset]
79
- )
80
- else:
36
+ # Account for possibility of having 45 and 90 in dictionary.
37
+ for instrument_id in [45, 90]:
38
+ # L1b de data will be created if L1a de data is available
39
+ if f"imap_ultra_l1a_{instrument_id}sensor-de" in data_dict:
40
+ de_dataset = calculate_de(
41
+ data_dict[f"imap_ultra_l1a_{instrument_id}sensor-de"],
42
+ f"imap_ultra_l1b_{instrument_id}sensor-de",
43
+ ancillary_files,
44
+ )
45
+ output_datasets.append(de_dataset)
46
+ # L1b extended data will be created if L1a hk, rates,
47
+ # aux, params, and l1b de data are available
48
+ elif (
49
+ f"imap_ultra_l1b_{instrument_id}sensor-de" in data_dict
50
+ and f"imap_ultra_l1a_{instrument_id}sensor-rates" in data_dict
51
+ and f"imap_ultra_l1a_{instrument_id}sensor-aux" in data_dict
52
+ and f"imap_ultra_l1a_{instrument_id}sensor-params" in data_dict
53
+ ):
54
+ extendedspin_dataset = calculate_extendedspin(
55
+ {
56
+ f"imap_ultra_l1a_{instrument_id}sensor-aux": data_dict[
57
+ f"imap_ultra_l1a_{instrument_id}sensor-aux"
58
+ ],
59
+ f"imap_ultra_l1a_{instrument_id}sensor-params": data_dict[
60
+ f"imap_ultra_l1a_{instrument_id}sensor-params"
61
+ ],
62
+ f"imap_ultra_l1a_{instrument_id}sensor-rates": data_dict[
63
+ f"imap_ultra_l1a_{instrument_id}sensor-rates"
64
+ ],
65
+ f"imap_ultra_l1b_{instrument_id}sensor-de": data_dict[
66
+ f"imap_ultra_l1b_{instrument_id}sensor-de"
67
+ ],
68
+ },
69
+ f"imap_ultra_l1b_{instrument_id}sensor-extendedspin",
70
+ instrument_id,
71
+ )
72
+ output_datasets.append(extendedspin_dataset)
73
+ elif (
74
+ f"imap_ultra_l1b_{instrument_id}sensor-extendedspin" in data_dict
75
+ and f"imap_ultra_l1b_{instrument_id}sensor-cullingmask" in data_dict
76
+ ):
77
+ badtimes_dataset = calculate_badtimes(
78
+ data_dict[f"imap_ultra_l1b_{instrument_id}sensor-extendedspin"],
79
+ data_dict[f"imap_ultra_l1b_{instrument_id}sensor-cullingmask"][
80
+ "spin_number"
81
+ ].values,
82
+ f"imap_ultra_l1b_{instrument_id}sensor-badtimes",
83
+ )
84
+ output_datasets.append(badtimes_dataset)
85
+ elif f"imap_ultra_l1b_{instrument_id}sensor-extendedspin" in data_dict:
86
+ cullingmask_dataset = calculate_cullingmask(
87
+ data_dict[f"imap_ultra_l1b_{instrument_id}sensor-extendedspin"],
88
+ f"imap_ultra_l1b_{instrument_id}sensor-cullingmask",
89
+ )
90
+ output_datasets.append(cullingmask_dataset)
91
+ if not output_datasets:
81
92
  raise ValueError("Data dictionary does not contain the expected keys.")
82
93
 
83
94
  return output_datasets
@@ -1,5 +1,7 @@
1
1
  """Culls Events for ULTRA L1b."""
2
2
 
3
+ import logging
4
+
3
5
  import numpy as np
4
6
  import pandas as pd
5
7
  import xarray as xr
@@ -9,6 +11,11 @@ from imap_processing.quality_flags import ImapAttitudeUltraFlags, ImapRatesUltra
9
11
  from imap_processing.spice.spin import get_spin_data
10
12
  from imap_processing.ultra.constants import UltraConstants
11
13
 
14
+ logging.basicConfig(level=logging.INFO)
15
+ logger = logging.getLogger(__name__)
16
+
17
+ SPIN_DURATION = 15 # Default spin duration in seconds.
18
+
12
19
 
13
20
  def get_energy_histogram(
14
21
  spin_number: NDArray, energy: NDArray
@@ -38,7 +45,8 @@ def get_energy_histogram(
38
45
  """
39
46
  spin_df = get_spin_data()
40
47
 
41
- spin_edges = np.unique(spin_number)
48
+ unique_spin_number = np.unique(spin_number)
49
+ spin_edges = unique_spin_number.astype(np.uint16)
42
50
  spin_edges = np.append(spin_edges, spin_edges.max() + 1)
43
51
 
44
52
  # Counts per spin at each energy bin.
@@ -52,9 +60,17 @@ def get_energy_histogram(
52
60
 
53
61
  # Count rate per spin at each energy bin.
54
62
  for i in range(hist.shape[1]):
55
- spin_duration = spin_df.spin_period_sec[spin_df.spin_number == i]
56
- hist[:, i] /= spin_duration.values[0]
57
- total_spin_duration += spin_duration.sum()
63
+ matched_spins = spin_df.spin_number == unique_spin_number[i]
64
+ if not np.any(matched_spins):
65
+ # TODO: we might throw an exception here instead.
66
+ logger.info(f"Unmatched spin number: {unique_spin_number[i]}")
67
+ spin_duration = SPIN_DURATION # Default to 15 seconds if no match found
68
+ else:
69
+ spin_duration = spin_df.spin_period_sec[
70
+ spin_df.spin_number == unique_spin_number[i]
71
+ ].values[0]
72
+ hist[:, i] /= spin_duration
73
+ total_spin_duration += spin_duration
58
74
 
59
75
  mean_duration = total_spin_duration / hist.shape[1]
60
76
 
@@ -96,7 +112,7 @@ def flag_attitude(
96
112
  )
97
113
 
98
114
  quality_flags = np.full(
99
- spin_rates.shape, ImapAttitudeUltraFlags.NONE.value, dtype=np.uint16
115
+ spins.shape, ImapAttitudeUltraFlags.NONE.value, dtype=np.uint16
100
116
  )
101
117
  quality_flags[bad_spin_rate_indices] |= ImapAttitudeUltraFlags.SPINRATE.value
102
118
  mismatch_indices = compare_aux_univ_spin_table(aux_dataset, spins, spin_df)
@@ -201,26 +217,42 @@ def compare_aux_univ_spin_table(
201
217
  mismatch_indices : np.ndarray
202
218
  Boolean array indicating which spins have mismatches.
203
219
  """
204
- univ_mask = np.isin(spin_df["spin_number"].values, spins)
205
- aux_mask = np.isin(aux_dataset["SPINNUMBER"].values, spins)
206
-
207
- filtered_univ = spin_df[univ_mask]
208
- filtered_aux = {field: aux_dataset[field].values[aux_mask] for field in aux_dataset}
220
+ # Identify valid spin matches
221
+ univ_spins = spin_df["spin_number"].values
222
+ aux_spins = aux_dataset["spinnumber"].values
223
+ present_in_both = np.intersect1d(univ_spins, aux_spins)
224
+
225
+ # Filter and align by spin number
226
+ df_univ = spin_df.set_index("spin_number").loc[present_in_both]
227
+ df_aux = (
228
+ pd.DataFrame({field: aux_dataset[field].values for field in aux_dataset})
229
+ .groupby("spinnumber", as_index=True)
230
+ .first()
231
+ .loc[present_in_both]
232
+ )
209
233
 
210
234
  mismatch_indices = np.zeros(len(spins), dtype=bool)
211
235
 
212
236
  fields_to_compare = [
213
- ("TIMESPINSTART", "spin_start_sec_sclk"),
214
- ("TIMESPINSTARTSUB", "spin_start_subsec_sclk"),
215
- ("DURATION", "spin_period_sec"),
216
- ("TIMESPINDATA", "spin_start_met"),
217
- ("SPINPERIOD", "spin_period_sec"),
237
+ ("timespinstart", "spin_start_sec_sclk"),
238
+ ("timespinstartsub", "spin_start_subsec_sclk"),
239
+ ("duration", "spin_period_sec"),
240
+ ("timespindata", "spin_start_met"),
241
+ ("spinperiod", "spin_period_sec"),
218
242
  ]
219
243
 
244
+ # Compare fields
245
+ mismatch = np.zeros(len(df_aux), dtype=bool)
220
246
  for aux_field, spin_field in fields_to_compare:
221
- aux_values = filtered_aux[aux_field]
222
- spin_values = filtered_univ[spin_field].values
247
+ mismatch |= df_aux[aux_field].values != df_univ[spin_field].values
248
+
249
+ # Get spin numbers where mismatch is True
250
+ mismatched_spin_numbers = present_in_both[mismatch]
251
+ # Find indices in `spins` that correspond to these mismatched spins
252
+ mismatch_indices[np.isin(spins, mismatched_spin_numbers)] = True
223
253
 
224
- mismatch_indices |= aux_values != spin_values
254
+ # Also flag any spins not present in the intersection
255
+ missing_spin_mask = ~np.isin(spins, present_in_both)
256
+ mismatch_indices[missing_spin_mask] = True
225
257
 
226
258
  return mismatch_indices
@@ -941,9 +941,7 @@ def get_fwhm(
941
941
 
942
942
 
943
943
  def get_efficiency(
944
- energy: NDArray,
945
- phi_inst: NDArray,
946
- theta_inst: NDArray,
944
+ energy: NDArray, phi_inst: NDArray, theta_inst: NDArray, ancillary_files: dict
947
945
  ) -> NDArray:
948
946
  """
949
947
  Interpolate efficiency values for each event.
@@ -956,13 +954,15 @@ def get_efficiency(
956
954
  Instrument-frame azimuth angle for each event.
957
955
  theta_inst : NDArray
958
956
  Instrument-frame elevation angle for each event.
957
+ ancillary_files : dict
958
+ Ancillary files.
959
959
 
960
960
  Returns
961
961
  -------
962
962
  efficiency : NDArray
963
963
  Interpolated efficiency values.
964
964
  """
965
- lookup_table = get_energy_efficiencies()
965
+ lookup_table = get_energy_efficiencies(ancillary_files)
966
966
 
967
967
  theta_vals = np.sort(lookup_table["theta (deg)"].unique())
968
968
  phi_vals = np.sort(lookup_table["phi (deg)"].unique())