imap-processing 0.18.0__py3-none-any.whl → 0.19.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (122) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/ancillary/ancillary_dataset_combiner.py +161 -1
  3. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -0
  4. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +221 -1057
  5. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +307 -283
  6. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1044 -203
  7. imap_processing/cdf/config/imap_constant_attrs.yaml +4 -2
  8. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +11 -0
  9. imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +15 -1
  10. imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +5 -0
  11. imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +10 -4
  12. imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +33 -4
  13. imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +8 -91
  14. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +106 -16
  15. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +5 -4
  16. imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +4 -15
  17. imap_processing/cdf/config/imap_lo_l1c_variable_attrs.yaml +189 -98
  18. imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +85 -2
  19. imap_processing/cdf/config/imap_mag_l1c_variable_attrs.yaml +24 -1
  20. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +20 -8
  21. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +45 -35
  22. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +110 -7
  23. imap_processing/cli.py +138 -93
  24. imap_processing/codice/codice_l0.py +2 -1
  25. imap_processing/codice/codice_l1a.py +167 -69
  26. imap_processing/codice/codice_l1b.py +42 -32
  27. imap_processing/codice/codice_l2.py +215 -9
  28. imap_processing/codice/constants.py +790 -603
  29. imap_processing/codice/data/lo_stepping_values.csv +1 -1
  30. imap_processing/decom.py +1 -4
  31. imap_processing/ena_maps/ena_maps.py +71 -43
  32. imap_processing/ena_maps/utils/corrections.py +291 -0
  33. imap_processing/ena_maps/utils/map_utils.py +20 -4
  34. imap_processing/ena_maps/utils/naming.py +8 -2
  35. imap_processing/glows/ancillary/imap_glows_exclusions-by-instr-team_20250923_v002.dat +10 -0
  36. imap_processing/glows/ancillary/imap_glows_map-of-excluded-regions_20250923_v002.dat +393 -0
  37. imap_processing/glows/ancillary/imap_glows_map-of-uv-sources_20250923_v002.dat +593 -0
  38. imap_processing/glows/ancillary/imap_glows_pipeline-settings_20250923_v002.json +54 -0
  39. imap_processing/glows/ancillary/imap_glows_suspected-transients_20250923_v002.dat +10 -0
  40. imap_processing/glows/l1b/glows_l1b.py +123 -18
  41. imap_processing/glows/l1b/glows_l1b_data.py +358 -47
  42. imap_processing/glows/l2/glows_l2.py +11 -0
  43. imap_processing/hi/hi_l1a.py +124 -3
  44. imap_processing/hi/hi_l1b.py +154 -71
  45. imap_processing/hi/hi_l1c.py +4 -109
  46. imap_processing/hi/hi_l2.py +104 -60
  47. imap_processing/hi/utils.py +262 -8
  48. imap_processing/hit/l0/constants.py +3 -0
  49. imap_processing/hit/l0/decom_hit.py +3 -6
  50. imap_processing/hit/l1a/hit_l1a.py +311 -21
  51. imap_processing/hit/l1b/hit_l1b.py +54 -126
  52. imap_processing/hit/l2/hit_l2.py +6 -6
  53. imap_processing/ialirt/calculate_ingest.py +219 -0
  54. imap_processing/ialirt/constants.py +12 -2
  55. imap_processing/ialirt/generate_coverage.py +15 -2
  56. imap_processing/ialirt/l0/ialirt_spice.py +6 -2
  57. imap_processing/ialirt/l0/parse_mag.py +293 -42
  58. imap_processing/ialirt/l0/process_hit.py +5 -3
  59. imap_processing/ialirt/l0/process_swapi.py +41 -25
  60. imap_processing/ialirt/process_ephemeris.py +70 -14
  61. imap_processing/ialirt/utils/create_xarray.py +1 -1
  62. imap_processing/idex/idex_l0.py +2 -2
  63. imap_processing/idex/idex_l1a.py +2 -3
  64. imap_processing/idex/idex_l1b.py +2 -3
  65. imap_processing/idex/idex_l2a.py +130 -4
  66. imap_processing/idex/idex_l2b.py +158 -143
  67. imap_processing/idex/idex_utils.py +1 -3
  68. imap_processing/lo/ancillary_data/imap_lo_hydrogen-geometric-factor_v001.csv +75 -0
  69. imap_processing/lo/ancillary_data/imap_lo_oxygen-geometric-factor_v001.csv +75 -0
  70. imap_processing/lo/l0/lo_science.py +25 -24
  71. imap_processing/lo/l1b/lo_l1b.py +93 -19
  72. imap_processing/lo/l1c/lo_l1c.py +273 -93
  73. imap_processing/lo/l2/lo_l2.py +949 -135
  74. imap_processing/lo/lo_ancillary.py +55 -0
  75. imap_processing/mag/l1a/mag_l1a.py +1 -0
  76. imap_processing/mag/l1a/mag_l1a_data.py +26 -0
  77. imap_processing/mag/l1b/mag_l1b.py +3 -2
  78. imap_processing/mag/l1c/interpolation_methods.py +14 -15
  79. imap_processing/mag/l1c/mag_l1c.py +23 -6
  80. imap_processing/mag/l1d/mag_l1d.py +57 -14
  81. imap_processing/mag/l1d/mag_l1d_data.py +202 -32
  82. imap_processing/mag/l2/mag_l2.py +2 -0
  83. imap_processing/mag/l2/mag_l2_data.py +14 -5
  84. imap_processing/quality_flags.py +23 -1
  85. imap_processing/spice/geometry.py +89 -39
  86. imap_processing/spice/pointing_frame.py +4 -8
  87. imap_processing/spice/repoint.py +78 -2
  88. imap_processing/spice/spin.py +28 -8
  89. imap_processing/spice/time.py +12 -22
  90. imap_processing/swapi/l1/swapi_l1.py +10 -4
  91. imap_processing/swapi/l2/swapi_l2.py +15 -17
  92. imap_processing/swe/l1b/swe_l1b.py +1 -2
  93. imap_processing/ultra/constants.py +30 -24
  94. imap_processing/ultra/l0/ultra_utils.py +9 -11
  95. imap_processing/ultra/l1a/ultra_l1a.py +1 -2
  96. imap_processing/ultra/l1b/badtimes.py +35 -11
  97. imap_processing/ultra/l1b/de.py +95 -31
  98. imap_processing/ultra/l1b/extendedspin.py +31 -16
  99. imap_processing/ultra/l1b/goodtimes.py +112 -0
  100. imap_processing/ultra/l1b/lookup_utils.py +281 -28
  101. imap_processing/ultra/l1b/quality_flag_filters.py +10 -1
  102. imap_processing/ultra/l1b/ultra_l1b.py +7 -7
  103. imap_processing/ultra/l1b/ultra_l1b_culling.py +169 -7
  104. imap_processing/ultra/l1b/ultra_l1b_extended.py +311 -69
  105. imap_processing/ultra/l1c/helio_pset.py +139 -37
  106. imap_processing/ultra/l1c/l1c_lookup_utils.py +289 -0
  107. imap_processing/ultra/l1c/spacecraft_pset.py +140 -29
  108. imap_processing/ultra/l1c/ultra_l1c.py +33 -24
  109. imap_processing/ultra/l1c/ultra_l1c_culling.py +92 -0
  110. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +400 -292
  111. imap_processing/ultra/l2/ultra_l2.py +54 -11
  112. imap_processing/ultra/utils/ultra_l1_utils.py +37 -7
  113. imap_processing/utils.py +3 -4
  114. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/METADATA +2 -2
  115. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/RECORD +118 -109
  116. imap_processing/idex/idex_l2c.py +0 -84
  117. imap_processing/spice/kernels.py +0 -187
  118. imap_processing/ultra/l1b/cullingmask.py +0 -87
  119. imap_processing/ultra/l1c/histogram.py +0 -36
  120. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/LICENSE +0 -0
  121. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/WHEEL +0 -0
  122. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,55 @@
1
+ """Ancillary file reading for IMAP-Lo processing."""
2
+
3
+ from pathlib import Path
4
+
5
+ import pandas as pd
6
+
7
+ # convert the YYYYDDD datetime format directly upon reading
8
+ _CONVERTERS = {
9
+ "YYYYDDD": lambda x: pd.to_datetime(str(x), format="%Y%j"),
10
+ "#YYYYDDD": lambda x: pd.to_datetime(str(x), format="%Y%j"),
11
+ "YYYYDDD_strt": lambda x: pd.to_datetime(str(x), format="%Y%j"),
12
+ "YYYYDDD_end": lambda x: pd.to_datetime(str(x), format="%Y%j"),
13
+ }
14
+
15
+ # Columns in the csv files to rename for consistency
16
+ _RENAME_COLUMNS = {
17
+ "YYYYDDD": "Date",
18
+ "#YYYYDDD": "Date",
19
+ "#Comments": "Comments",
20
+ "YYYYDDD_strt": "StartDate",
21
+ "YYYYDDD_end": "EndDate",
22
+ }
23
+
24
+
25
+ def read_ancillary_file(ancillary_file: str | Path) -> pd.DataFrame:
26
+ """
27
+ Read a generic ancillary CSV file into a pandas DataFrame.
28
+
29
+ Parameters
30
+ ----------
31
+ ancillary_file : str or Path
32
+ Path to the ancillary CSV file.
33
+
34
+ Returns
35
+ -------
36
+ pd.DataFrame
37
+ DataFrame containing the ancillary data.
38
+ """
39
+ skiprows = None
40
+ if "esa-mode-lut" in str(ancillary_file):
41
+ # skip the first row which is a comment
42
+ skiprows = [0]
43
+ elif "geometric-factor" in str(ancillary_file):
44
+ # skip the rows with comment headers indicating Hi_Res and Hi_Thr
45
+ skiprows = [1, 38]
46
+ df = pd.read_csv(ancillary_file, converters=_CONVERTERS, skiprows=skiprows)
47
+ df = df.rename(columns=_RENAME_COLUMNS)
48
+
49
+ if "geometric-factor" in str(ancillary_file):
50
+ # Add an ESA mode column based on the known structure of the file.
51
+ # The first 36 rows are ESA mode 0 (HiRes), the second 36 are ESA mode 1 (HiThr)
52
+ df["esa_mode"] = 0
53
+ df.loc[36:, "esa_mode"] = 1
54
+
55
+ return df
@@ -328,6 +328,7 @@ def generate_dataset(
328
328
  global_attributes = attribute_manager.get_global_attributes(logical_file_id)
329
329
  global_attributes["is_mago"] = str(bool(single_file_l1a.is_mago))
330
330
  global_attributes["is_active"] = str(bool(single_file_l1a.is_active))
331
+ global_attributes["all_vectors_primary"] = single_file_l1a.all_vectors_primary()
331
332
  global_attributes["vectors_per_second"] = (
332
333
  single_file_l1a.vectors_per_second_attribute()
333
334
  )
@@ -15,6 +15,7 @@ from imap_processing.mag.constants import (
15
15
  MAX_COMPRESSED_VECTOR_BITS,
16
16
  MAX_FINE_TIME,
17
17
  RANGE_BIT_WIDTH,
18
+ PrimarySensor,
18
19
  )
19
20
  from imap_processing.spice.time import met_to_ttj2000ns
20
21
 
@@ -241,6 +242,7 @@ class MagL1a:
241
242
  twos_complement()
242
243
  update_compression_array()
243
244
  vectors_per_second_attribute()
245
+ all_vectors_primary()
244
246
  """
245
247
 
246
248
  is_mago: bool
@@ -1117,3 +1119,27 @@ class MagL1a:
1117
1119
  last_vectors_per_second = vecsec
1118
1120
 
1119
1121
  return output_str
1122
+
1123
+ def all_vectors_primary(self) -> bool:
1124
+ """
1125
+ Check if all vectors in the file are from the primary sensor.
1126
+
1127
+ For MAGO datasets, this checks if MAGO was consistently the primary sensor
1128
+ across all packets. For MAGI datasets, this checks if MAGI was consistently
1129
+ the primary sensor across all packets.
1130
+
1131
+ Returns
1132
+ -------
1133
+ bool
1134
+ True if all vectors are from the primary sensor across all packets,
1135
+ False otherwise.
1136
+ """
1137
+ expected_primary_value = (
1138
+ PrimarySensor.MAGO.value if self.is_mago else PrimarySensor.MAGI.value
1139
+ )
1140
+
1141
+ for _, packet in self.packet_definitions.items():
1142
+ if packet.mago_is_primary != expected_primary_value:
1143
+ return False
1144
+
1145
+ return True
@@ -74,7 +74,6 @@ def mag_l1b(
74
74
  calibration_matrix, time_shift = retrieve_matrix_from_l1b_calibration(
75
75
  calibration_dataset, day_to_process, is_mago
76
76
  )
77
- print(f"Using calibration matrix: {calibration_matrix}")
78
77
 
79
78
  output_dataset = mag_l1b_processing(
80
79
  input_dataset, calibration_matrix, time_shift, mag_attributes, source
@@ -182,6 +181,9 @@ def mag_l1b_processing(
182
181
  try:
183
182
  global_attributes["is_mago"] = input_dataset.attrs["is_mago"]
184
183
  global_attributes["is_active"] = input_dataset.attrs["is_active"]
184
+ global_attributes["all_vectors_primary"] = input_dataset.attrs[
185
+ "all_vectors_primary"
186
+ ]
185
187
  global_attributes["vectors_per_second"] = timeshift_vectors_per_second(
186
188
  input_dataset.attrs["vectors_per_second"], time_shift
187
189
  )
@@ -245,7 +247,6 @@ def retrieve_matrix_from_l1b_calibration(
245
247
  The calibration matrix and time shift. These can be passed directly into
246
248
  update_vector, calibrate_vector, and shift_time.
247
249
  """
248
- print(f"Finding data for day {day}")
249
250
  if is_mago:
250
251
  calibration_matrix = calibration_dataset.sel(epoch=day)["MFOTOURFO"]
251
252
  time_shift = calibration_dataset.sel(epoch=day)["OTS"]
@@ -3,7 +3,6 @@
3
3
 
4
4
  import logging
5
5
  from enum import Enum
6
- from typing import Optional
7
6
 
8
7
  import numpy as np
9
8
  from scipy.interpolate import make_interp_spline
@@ -44,8 +43,8 @@ def linear(
44
43
  input_vectors: np.ndarray,
45
44
  input_timestamps: np.ndarray,
46
45
  output_timestamps: np.ndarray,
47
- input_rate: Optional[VecSec] = None,
48
- output_rate: Optional[VecSec] = None,
46
+ input_rate: VecSec | None = None,
47
+ output_rate: VecSec | None = None,
49
48
  ) -> np.ndarray:
50
49
  """
51
50
  Linear interpolation of input vectors to output timestamps.
@@ -80,8 +79,8 @@ def quadratic(
80
79
  input_vectors: np.ndarray,
81
80
  input_timestamps: np.ndarray,
82
81
  output_timestamps: np.ndarray,
83
- input_rate: Optional[VecSec] = None,
84
- output_rate: Optional[VecSec] = None,
82
+ input_rate: VecSec | None = None,
83
+ output_rate: VecSec | None = None,
85
84
  ) -> np.ndarray:
86
85
  """
87
86
  Quadratic interpolation of input vectors to output timestamps.
@@ -115,8 +114,8 @@ def cubic(
115
114
  input_vectors: np.ndarray,
116
115
  input_timestamps: np.ndarray,
117
116
  output_timestamps: np.ndarray,
118
- input_rate: Optional[VecSec] = None,
119
- output_rate: Optional[VecSec] = None,
117
+ input_rate: VecSec | None = None,
118
+ output_rate: VecSec | None = None,
120
119
  ) -> np.ndarray:
121
120
  """
122
121
  Cubic interpolation of input vectors to output timestamps.
@@ -175,8 +174,8 @@ def cic_filter(
175
174
  input_vectors: np.ndarray,
176
175
  input_timestamps: np.ndarray,
177
176
  output_timestamps: np.ndarray,
178
- input_rate: Optional[VecSec],
179
- output_rate: Optional[VecSec],
177
+ input_rate: VecSec | None,
178
+ output_rate: VecSec | None,
180
179
  ):
181
180
  """
182
181
  Apply CIC filter to data before interpolating.
@@ -242,8 +241,8 @@ def linear_filtered(
242
241
  input_vectors: np.ndarray,
243
242
  input_timestamps: np.ndarray,
244
243
  output_timestamps: np.ndarray,
245
- input_rate: Optional[VecSec] = None,
246
- output_rate: Optional[VecSec] = None,
244
+ input_rate: VecSec | None = None,
245
+ output_rate: VecSec | None = None,
247
246
  ) -> np.ndarray:
248
247
  """
249
248
  Linear filtered interpolation of input vectors to output timestamps.
@@ -281,8 +280,8 @@ def quadratic_filtered(
281
280
  input_vectors: np.ndarray,
282
281
  input_timestamps: np.ndarray,
283
282
  output_timestamps: np.ndarray,
284
- input_rate: Optional[VecSec] = None,
285
- output_rate: Optional[VecSec] = None,
283
+ input_rate: VecSec | None = None,
284
+ output_rate: VecSec | None = None,
286
285
  ) -> np.ndarray:
287
286
  """
288
287
  Quadratic filtered interpolation of input vectors to output timestamps.
@@ -320,8 +319,8 @@ def cubic_filtered(
320
319
  input_vectors: np.ndarray,
321
320
  input_timestamps: np.ndarray,
322
321
  output_timestamps: np.ndarray,
323
- input_rate: Optional[VecSec] = None,
324
- output_rate: Optional[VecSec] = None,
322
+ input_rate: VecSec | None = None,
323
+ output_rate: VecSec | None = None,
325
324
  ) -> np.ndarray:
326
325
  """
327
326
  Cubic filtered interpolation of input vectors to output timestamps.
@@ -1,7 +1,6 @@
1
1
  """MAG L1C processing module."""
2
2
 
3
3
  import logging
4
- from typing import Optional
5
4
 
6
5
  import numpy as np
7
6
  import xarray as xr
@@ -125,6 +124,23 @@ def mag_l1c(
125
124
  try:
126
125
  global_attributes["is_mago"] = normal_mode_dataset.attrs["is_mago"]
127
126
  global_attributes["is_active"] = normal_mode_dataset.attrs["is_active"]
127
+
128
+ # Check if all vectors are primary in both normal and burst datasets
129
+ is_mago = normal_mode_dataset.attrs.get("is_mago", "False") == "True"
130
+ normal_all_primary = normal_mode_dataset.attrs.get("all_vectors_primary", False)
131
+
132
+ # Default for missing burst dataset: 1 if MAGO (expected primary), 0 if MAGI
133
+ burst_all_primary = is_mago
134
+ if burst_mode_dataset is not None:
135
+ burst_all_primary = burst_mode_dataset.attrs.get(
136
+ "all_vectors_primary", False
137
+ )
138
+
139
+ # Both datasets must have all vectors primary for the combined result to be True
140
+ global_attributes["all_vectors_primary"] = (
141
+ normal_all_primary and burst_all_primary
142
+ )
143
+
128
144
  global_attributes["missing_sequences"] = normal_mode_dataset.attrs[
129
145
  "missing_sequences"
130
146
  ]
@@ -162,8 +178,9 @@ def mag_l1c(
162
178
  output_core_dims=[[]],
163
179
  vectorize=True,
164
180
  )
165
- # output_dataset['vector_magnitude'].attrs =
166
- # attribute_manager.get_variable_attributes("vector_magnitude_attrs")
181
+ output_dataset[
182
+ "vector_magnitude"
183
+ ].attrs = attribute_manager.get_variable_attributes("vector_magnitude_attrs")
167
184
 
168
185
  output_dataset["compression_flags"] = xr.DataArray(
169
186
  completed_timeline[:, 6:8],
@@ -176,14 +193,14 @@ def mag_l1c(
176
193
  completed_timeline[:, 5],
177
194
  name="generated_flag",
178
195
  dims=["epoch"],
179
- # attrs=attribute_manager.get_variable_attributes("generated_flag_attrs"),
196
+ attrs=attribute_manager.get_variable_attributes("generated_flag_attrs"),
180
197
  )
181
198
 
182
199
  return output_dataset
183
200
 
184
201
 
185
202
  def select_datasets(
186
- first_input_dataset: xr.Dataset, second_input_dataset: Optional[xr.Dataset] = None
203
+ first_input_dataset: xr.Dataset, second_input_dataset: xr.Dataset | None = None
187
204
  ) -> tuple[xr.Dataset, xr.Dataset]:
188
205
  """
189
206
  Given one or two datasets, assign one to norm and one to burst.
@@ -503,7 +520,7 @@ def generate_timeline(epoch_data: np.ndarray, gaps: np.ndarray) -> np.ndarray:
503
520
 
504
521
 
505
522
  def find_all_gaps(
506
- epoch_data: np.ndarray, vecsec_dict: Optional[dict] = None
523
+ epoch_data: np.ndarray, vecsec_dict: dict | None = None
507
524
  ) -> np.ndarray:
508
525
  """
509
526
  Find all the gaps in the epoch data.
@@ -9,7 +9,7 @@ from imap_processing.mag.l1d.mag_l1d_data import MagL1d, MagL1dConfiguration
9
9
  from imap_processing.mag.l2.mag_l2_data import ValidFrames
10
10
 
11
11
 
12
- def mag_l1d(
12
+ def mag_l1d( # noqa: PLR0912
13
13
  science_data: list[xr.Dataset],
14
14
  calibration_dataset: xr.Dataset,
15
15
  day_to_process: np.datetime64,
@@ -45,16 +45,18 @@ def mag_l1d(
45
45
  input_mago_burst = None
46
46
  for dataset in science_data:
47
47
  source = dataset.attrs.get("Logical_source", "")
48
- if "norm-magi" in source:
49
- input_magi_norm = dataset
50
- elif "norm-mago" in source:
51
- input_mago_norm = dataset
52
- elif "burst-magi" in source:
53
- input_magi_burst = dataset
54
- elif "burst-mago" in source:
55
- input_mago_burst = dataset
56
- else:
57
- raise ValueError(f"Input data has invalid logical source {source}")
48
+ instrument_mode = source.split("_")[-1]
49
+ match instrument_mode:
50
+ case "norm-magi":
51
+ input_magi_norm = dataset
52
+ case "norm-mago":
53
+ input_mago_norm = dataset
54
+ case "burst-magi":
55
+ input_magi_burst = dataset
56
+ case "burst-mago":
57
+ input_mago_burst = dataset
58
+ case _:
59
+ raise ValueError(f"Input data has invalid logical source {source}")
58
60
 
59
61
  if input_magi_norm is None or input_mago_norm is None:
60
62
  raise ValueError(
@@ -72,8 +74,9 @@ def mag_l1d(
72
74
  mago_vectors = input_mago_norm["vectors"].data[:, :3]
73
75
  magi_vectors = input_magi_norm["vectors"].data[:, :3]
74
76
 
75
- # TODO: verify that MAGO is primary sensor for all vectors before applying
76
- # gradiometry
77
+ # Verify that MAGO is primary sensor for all vectors before applying gradiometry
78
+ if not input_mago_norm.attrs.get("all_vectors_primary", 1):
79
+ config.apply_gradiometry = False
77
80
 
78
81
  # TODO: L1D attributes
79
82
  attributes = ImapCdfAttributes()
@@ -95,12 +98,21 @@ def mag_l1d(
95
98
  day=day,
96
99
  )
97
100
 
101
+ # Nominally, this is expected to create MAGO data. However, if the configuration
102
+ # setting for always_output_mago is set to False, it will create MAGI data.
103
+
98
104
  l1d_norm.rotate_frame(ValidFrames.SRF)
99
105
  norm_srf_dataset = l1d_norm.generate_dataset(attributes, day_to_process)
100
106
  l1d_norm.rotate_frame(ValidFrames.DSRF)
101
107
  norm_dsrf_dataset = l1d_norm.generate_dataset(attributes, day_to_process)
108
+ l1d_norm.rotate_frame(ValidFrames.GSE)
109
+ norm_gse_dataset = l1d_norm.generate_dataset(attributes, day_to_process)
110
+ l1d_norm.rotate_frame(ValidFrames.RTN)
111
+ norm_rtn_dataset = l1d_norm.generate_dataset(attributes, day_to_process)
102
112
  output_datasets.append(norm_srf_dataset)
103
113
  output_datasets.append(norm_dsrf_dataset)
114
+ output_datasets.append(norm_gse_dataset)
115
+ output_datasets.append(norm_rtn_dataset)
104
116
 
105
117
  if input_mago_burst is not None and input_magi_burst is not None:
106
118
  # If burst data is provided, use it to create the burst L1d dataset
@@ -122,12 +134,43 @@ def mag_l1d(
122
134
  spin_offsets=l1d_norm.spin_offsets,
123
135
  day=day,
124
136
  )
137
+
138
+ # TODO: frame specific attributes may be required
125
139
  l1d_burst.rotate_frame(ValidFrames.SRF)
126
140
  burst_srf_dataset = l1d_burst.generate_dataset(attributes, day_to_process)
127
141
  l1d_burst.rotate_frame(ValidFrames.DSRF)
128
142
  burst_dsrf_dataset = l1d_burst.generate_dataset(attributes, day_to_process)
143
+ l1d_burst.rotate_frame(ValidFrames.GSE)
144
+ burst_gse_dataset = l1d_burst.generate_dataset(attributes, day_to_process)
145
+ l1d_burst.rotate_frame(ValidFrames.RTN)
146
+ burst_rtn_dataset = l1d_burst.generate_dataset(attributes, day_to_process)
129
147
  output_datasets.append(burst_srf_dataset)
130
148
  output_datasets.append(burst_dsrf_dataset)
149
+ output_datasets.append(burst_gse_dataset)
150
+ output_datasets.append(burst_rtn_dataset)
151
+
152
+ # Output ancillary files
153
+ # Add spin offsets dataset from normal mode processing
154
+ if l1d_norm.spin_offsets is not None:
155
+ spin_offset_dataset = l1d_norm.generate_spin_offset_dataset()
156
+ spin_offset_dataset.attrs["Logical_source"] = "imap_mag_l1d-spin-offsets"
157
+ output_datasets.append(spin_offset_dataset)
158
+
159
+ # Add gradiometry offsets dataset if gradiometry was applied
160
+ if l1d_norm.config.apply_gradiometry and hasattr(l1d_norm, "gradiometry_offsets"):
161
+ gradiometry_dataset = l1d_norm.gradiometry_offsets.copy()
162
+ gradiometry_dataset.attrs["Logical_source"] = (
163
+ "imap_mag_l1d-gradiometry-offsets-norm"
164
+ )
165
+ output_datasets.append(gradiometry_dataset)
166
+
167
+ # Also add burst gradiometry offsets if burst data was processed
168
+ if input_mago_burst is not None and input_magi_burst is not None:
169
+ if hasattr(l1d_burst, "gradiometry_offsets"):
170
+ burst_gradiometry_dataset = l1d_burst.gradiometry_offsets.copy()
171
+ burst_gradiometry_dataset.attrs["Logical_source"] = (
172
+ "imap_mag_l1d-gradiometry-offsets-burst"
173
+ )
174
+ output_datasets.append(burst_gradiometry_dataset)
131
175
 
132
- # TODO: Output ancillary files
133
176
  return output_datasets