imap-processing 0.19.4__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (50) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +13 -1
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +44 -44
  4. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +127 -126
  5. imap_processing/cdf/config/imap_codice_l2-hi-omni_variable_attrs.yaml +635 -0
  6. imap_processing/cdf/config/imap_codice_l2-hi-sectored_variable_attrs.yaml +422 -0
  7. imap_processing/cdf/config/imap_constant_attrs.yaml +1 -1
  8. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +61 -55
  9. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +3 -2
  10. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +17 -5
  11. imap_processing/cli.py +6 -11
  12. imap_processing/codice/codice_l1a.py +42 -21
  13. imap_processing/codice/codice_l2.py +640 -127
  14. imap_processing/codice/constants.py +224 -129
  15. imap_processing/ena_maps/ena_maps.py +124 -70
  16. imap_processing/ena_maps/utils/coordinates.py +5 -0
  17. imap_processing/ena_maps/utils/corrections.py +268 -0
  18. imap_processing/ena_maps/utils/map_utils.py +143 -42
  19. imap_processing/hi/hi_l2.py +10 -15
  20. imap_processing/ialirt/constants.py +7 -1
  21. imap_processing/ialirt/generate_coverage.py +1 -1
  22. imap_processing/ialirt/l0/ialirt_spice.py +1 -1
  23. imap_processing/ialirt/l0/parse_mag.py +33 -0
  24. imap_processing/ialirt/l0/process_codice.py +66 -0
  25. imap_processing/ialirt/utils/create_xarray.py +2 -0
  26. imap_processing/idex/idex_l2a.py +2 -2
  27. imap_processing/idex/idex_l2b.py +1 -1
  28. imap_processing/lo/l1c/lo_l1c.py +61 -3
  29. imap_processing/lo/l2/lo_l2.py +79 -11
  30. imap_processing/mag/l1a/mag_l1a.py +2 -2
  31. imap_processing/mag/l1a/mag_l1a_data.py +71 -13
  32. imap_processing/mag/l1c/interpolation_methods.py +34 -13
  33. imap_processing/mag/l1c/mag_l1c.py +117 -67
  34. imap_processing/mag/l1d/mag_l1d_data.py +3 -1
  35. imap_processing/spice/geometry.py +39 -28
  36. imap_processing/spice/pointing_frame.py +77 -50
  37. imap_processing/swapi/l1/swapi_l1.py +12 -4
  38. imap_processing/swe/utils/swe_constants.py +7 -7
  39. imap_processing/ultra/l1b/extendedspin.py +1 -1
  40. imap_processing/ultra/l1b/ultra_l1b_culling.py +2 -2
  41. imap_processing/ultra/l1b/ultra_l1b_extended.py +1 -1
  42. imap_processing/ultra/l1c/helio_pset.py +1 -1
  43. imap_processing/ultra/l1c/spacecraft_pset.py +2 -2
  44. imap_processing/ultra/l2/ultra_l2.py +3 -3
  45. imap_processing-1.0.1.dist-info/METADATA +121 -0
  46. {imap_processing-0.19.4.dist-info → imap_processing-1.0.1.dist-info}/RECORD +49 -47
  47. imap_processing-0.19.4.dist-info/METADATA +0 -120
  48. {imap_processing-0.19.4.dist-info → imap_processing-1.0.1.dist-info}/LICENSE +0 -0
  49. {imap_processing-0.19.4.dist-info → imap_processing-1.0.1.dist-info}/WHEEL +0 -0
  50. {imap_processing-0.19.4.dist-info → imap_processing-1.0.1.dist-info}/entry_points.txt +0 -0
@@ -63,17 +63,16 @@ def mag_l1c(
63
63
  )
64
64
 
65
65
  interp_function = InterpolationFunction[configuration.L1C_INTERPOLATION_METHOD]
66
- if normal_mode_dataset and burst_mode_dataset:
67
- full_interpolated_timeline = process_mag_l1c(
68
- normal_mode_dataset, burst_mode_dataset, interp_function
66
+ if burst_mode_dataset is not None:
67
+ # Only use day_to_process if there is no norm data
68
+ day_to_process_arg = day_to_process if normal_mode_dataset is None else None
69
+ full_interpolated_timeline: np.ndarray = process_mag_l1c(
70
+ normal_mode_dataset, burst_mode_dataset, interp_function, day_to_process_arg
69
71
  )
70
72
  elif normal_mode_dataset is not None:
71
- full_interpolated_timeline = fill_normal_data(
72
- normal_mode_dataset, normal_mode_dataset["epoch"].data
73
- )
73
+ full_interpolated_timeline = fill_normal_data(normal_mode_dataset)
74
74
  else:
75
- # TODO: With only burst data, downsample by retrieving the timeline
76
- raise NotImplementedError
75
+ raise ValueError("At least one of norm or burst dataset must be provided.")
77
76
 
78
77
  completed_timeline = remove_missing_data(full_interpolated_timeline)
79
78
 
@@ -127,12 +126,14 @@ def mag_l1c(
127
126
  global_attributes["missing_sequences"] = ""
128
127
 
129
128
  try:
130
- global_attributes["is_mago"] = normal_mode_dataset.attrs["is_mago"]
131
- global_attributes["is_active"] = normal_mode_dataset.attrs["is_active"]
129
+ active_dataset = normal_mode_dataset or burst_mode_dataset
130
+
131
+ global_attributes["is_mago"] = active_dataset.attrs["is_mago"]
132
+ global_attributes["is_active"] = active_dataset.attrs["is_active"]
132
133
 
133
134
  # Check if all vectors are primary in both normal and burst datasets
134
- is_mago = normal_mode_dataset.attrs.get("is_mago", "False") == "True"
135
- normal_all_primary = normal_mode_dataset.attrs.get("all_vectors_primary", False)
135
+ is_mago = active_dataset.attrs.get("is_mago", "False") == "True"
136
+ normal_all_primary = active_dataset.attrs.get("all_vectors_primary", False)
136
137
 
137
138
  # Default for missing burst dataset: 1 if MAGO (expected primary), 0 if MAGI
138
139
  burst_all_primary = is_mago
@@ -146,14 +147,14 @@ def mag_l1c(
146
147
  normal_all_primary and burst_all_primary
147
148
  )
148
149
 
149
- global_attributes["missing_sequences"] = normal_mode_dataset.attrs[
150
+ global_attributes["missing_sequences"] = active_dataset.attrs[
150
151
  "missing_sequences"
151
152
  ]
152
153
  except KeyError as e:
153
154
  logger.info(
154
155
  f"Key error when assigning global attributes, attribute not found in "
155
156
  f"L1B file with logical source "
156
- f"{normal_mode_dataset.attrs['Logical_source']}: {e}"
157
+ f"{active_dataset.attrs['Logical_source']}: {e}"
157
158
  )
158
159
 
159
160
  global_attributes["interpolation_method"] = interp_function.name
@@ -176,16 +177,24 @@ def mag_l1c(
176
177
  attrs=attribute_manager.get_variable_attributes("vector_attrs"),
177
178
  )
178
179
 
179
- output_dataset["vector_magnitude"] = xr.apply_ufunc(
180
- lambda x: np.linalg.norm(x[:4]),
181
- output_dataset["vectors"],
182
- input_core_dims=[["direction"]],
183
- output_core_dims=[[]],
184
- vectorize=True,
185
- )
186
- output_dataset[
187
- "vector_magnitude"
188
- ].attrs = attribute_manager.get_variable_attributes("vector_magnitude_attrs")
180
+ if len(output_dataset["vectors"]) > 0:
181
+ output_dataset["vector_magnitude"] = xr.apply_ufunc(
182
+ lambda x: np.linalg.norm(x[:4]),
183
+ output_dataset["vectors"],
184
+ input_core_dims=[["direction"]],
185
+ output_core_dims=[[]],
186
+ vectorize=True,
187
+ )
188
+ output_dataset[
189
+ "vector_magnitude"
190
+ ].attrs = attribute_manager.get_variable_attributes("vector_magnitude_attrs")
191
+ else:
192
+ output_dataset["vector_magnitude"] = xr.DataArray(
193
+ np.empty((0, 1)),
194
+ name="vector_magnitude",
195
+ dims=["epoch", "vector_magnitude"],
196
+ attrs=attribute_manager.get_variable_attributes("vector_magnitude_attrs"),
197
+ )
189
198
 
190
199
  output_dataset["compression_flags"] = xr.DataArray(
191
200
  completed_timeline[:, 6:8],
@@ -265,7 +274,7 @@ def select_datasets(
265
274
 
266
275
 
267
276
  def process_mag_l1c(
268
- normal_mode_dataset: xr.Dataset,
277
+ normal_mode_dataset: xr.Dataset | None,
269
278
  burst_mode_dataset: xr.Dataset,
270
279
  interpolation_function: InterpolationFunction,
271
280
  day_to_process: np.datetime64 | None = None,
@@ -305,38 +314,51 @@ def process_mag_l1c(
305
314
  np.ndarray
306
315
  An (n, 8) shaped array containing the completed timeline.
307
316
  """
308
- norm_epoch = normal_mode_dataset["epoch"].data
309
- if "vectors_per_second" in normal_mode_dataset.attrs:
310
- normal_vecsec_dict = vectors_per_second_from_string(
311
- normal_mode_dataset.attrs["vectors_per_second"]
312
- )
313
- else:
314
- normal_vecsec_dict = None
315
-
316
- output_dataset = normal_mode_dataset.copy(deep=True)
317
- output_dataset["sample_interpolated"] = xr.DataArray(
318
- np.zeros(len(normal_mode_dataset))
319
- )
320
317
  day_start_ns = None
321
318
  day_end_ns = None
322
319
 
323
320
  if day_to_process is not None:
324
- day_start = day_to_process.astype("datetime64[s]") - np.timedelta64(15, "m")
321
+ day_start = day_to_process.astype("datetime64[s]") - np.timedelta64(30, "m")
325
322
 
326
- # get the end of the day plus 15 minutes
323
+ # get the end of the day plus 30 minutes
327
324
  day_end = (
328
325
  day_to_process.astype("datetime64[s]")
329
326
  + np.timedelta64(1, "D")
330
- + np.timedelta64(15, "m")
327
+ + np.timedelta64(30, "m")
331
328
  )
332
329
 
333
330
  day_start_ns = et_to_ttj2000ns(str_to_et(str(day_start)))
334
331
  day_end_ns = et_to_ttj2000ns(str_to_et(str(day_end)))
335
332
 
336
- gaps = find_all_gaps(norm_epoch, normal_vecsec_dict, day_start_ns, day_end_ns)
333
+ if normal_mode_dataset:
334
+ norm_epoch = normal_mode_dataset["epoch"].data
335
+ if "vectors_per_second" in normal_mode_dataset.attrs:
336
+ normal_vecsec_dict = vectors_per_second_from_string(
337
+ normal_mode_dataset.attrs["vectors_per_second"]
338
+ )
339
+ else:
340
+ normal_vecsec_dict = None
341
+
342
+ gaps = find_all_gaps(norm_epoch, normal_vecsec_dict, day_start_ns, day_end_ns)
343
+ else:
344
+ norm_epoch = [day_start_ns, day_end_ns]
345
+ gaps = np.array(
346
+ [
347
+ [
348
+ day_start_ns,
349
+ day_end_ns,
350
+ VecSec.TWO_VECS_PER_S.value,
351
+ ]
352
+ ]
353
+ )
337
354
 
338
355
  new_timeline = generate_timeline(norm_epoch, gaps)
339
- norm_filled = fill_normal_data(normal_mode_dataset, new_timeline)
356
+
357
+ if normal_mode_dataset:
358
+ norm_filled: np.ndarray = fill_normal_data(normal_mode_dataset, new_timeline)
359
+ else:
360
+ norm_filled = generate_empty_norm_array(new_timeline)
361
+
340
362
  interpolated = interpolate_gaps(
341
363
  burst_mode_dataset, gaps, norm_filled, interpolation_function
342
364
  )
@@ -344,10 +366,32 @@ def process_mag_l1c(
344
366
  return interpolated
345
367
 
346
368
 
369
+ def generate_empty_norm_array(new_timeline: np.ndarray) -> np.ndarray:
370
+ """
371
+ Generate an empty Normal mode array with the new timeline.
372
+
373
+ Parameters
374
+ ----------
375
+ new_timeline : np.ndarray
376
+ A 1D array of timestamps to fill.
377
+
378
+ Returns
379
+ -------
380
+ np.ndarray
381
+ An (n, 8) shaped array containing the timeline filled with `FILLVAL` data.
382
+ """
383
+ # TODO: fill with FILLVAL
384
+ norm_filled: np.ndarray = np.zeros((len(new_timeline), 8))
385
+ norm_filled[:, 0] = new_timeline
386
+ # Flags, will also indicate any missed timestamps
387
+ norm_filled[:, 5] = ModeFlags.MISSING.value
388
+
389
+ return norm_filled
390
+
391
+
347
392
  def fill_normal_data(
348
393
  normal_dataset: xr.Dataset,
349
- new_timeline: np.ndarray,
350
- day_to_process: np.datetime64 | None = None,
394
+ new_timeline: np.ndarray | None = None,
351
395
  ) -> np.ndarray:
352
396
  """
353
397
  Fill the new timeline with the normal mode data.
@@ -358,26 +402,23 @@ def fill_normal_data(
358
402
  ----------
359
403
  normal_dataset : xr.Dataset
360
404
  The normal mode dataset.
361
- new_timeline : np.ndarray
362
- A 1D array of timestamps to fill.
363
- day_to_process : np.datetime64, optional
364
- The day to process, in np.datetime64[D] format. This is used to fill
365
- gaps at the beginning or end of the day if needed. If not included, these
366
- gaps will not be filled.
405
+ new_timeline : np.ndarray, optional
406
+ A 1D array of timestamps to fill. If not provided, the normal mode timestamps
407
+ will be used.
367
408
 
368
409
  Returns
369
410
  -------
370
- np.ndarray
411
+ filled_timeline : np.ndarray
371
412
  An (n, 8) shaped array containing the timeline filled with normal mode data.
372
413
  Gaps are marked as -1 in the generated flag column at index 5.
373
414
  Indices: 0 - epoch, 1-4 - vector x, y, z, and range, 5 - generated flag,
374
415
  6-7 - compression flags.
375
416
  """
376
- # TODO: fill with FILLVAL
377
- filled_timeline: np.ndarray = np.zeros((len(new_timeline), 8))
378
- filled_timeline[:, 0] = new_timeline
379
- # Flags, will also indicate any missed timestamps
380
- filled_timeline[:, 5] = ModeFlags.MISSING.value
417
+ if new_timeline is None:
418
+ new_timeline = normal_dataset["epoch"].data
419
+
420
+ filled_timeline = generate_empty_norm_array(new_timeline)
421
+
381
422
  for index, timestamp in enumerate(normal_dataset["epoch"].data):
382
423
  timeline_index = np.searchsorted(new_timeline, timestamp)
383
424
  filled_timeline[timeline_index, 1:5] = normal_dataset["vectors"].data[index]
@@ -463,20 +504,17 @@ def interpolate_gaps(
463
504
  ]
464
505
 
465
506
  short = (gap_timeline >= burst_epochs[burst_start]) & (
466
- gap_timeline <= burst_epochs[burst_gap_end]
507
+ gap_timeline <= burst_epochs[burst_end]
467
508
  )
468
- if len(gap_timeline) != (short).sum():
469
- print(f"Chopping timeline from {len(gap_timeline)} to {short.sum()}")
509
+ num_short = int(short.sum())
510
+
511
+ if len(gap_timeline) != num_short:
512
+ print(f"Chopping timeline from {len(gap_timeline)} to {num_short}")
470
513
 
471
514
  # Limit timestamps to only include the areas with burst data
472
- gap_timeline = gap_timeline[
473
- (
474
- (gap_timeline >= burst_epochs[burst_start])
475
- & (gap_timeline <= burst_epochs[burst_gap_end])
476
- )
477
- ]
515
+ gap_timeline = gap_timeline[short]
478
516
  # do not include range
479
- gap_fill = interpolation_function(
517
+ adjusted_gap_timeline, gap_fill = interpolation_function(
480
518
  burst_vectors[burst_start:burst_end, :3],
481
519
  burst_epochs[burst_start:burst_end],
482
520
  gap_timeline,
@@ -485,7 +523,7 @@ def interpolate_gaps(
485
523
  )
486
524
 
487
525
  # gaps should not have data in timeline, still check it
488
- for index, timestamp in enumerate(gap_timeline):
526
+ for index, timestamp in enumerate(adjusted_gap_timeline):
489
527
  timeline_index = np.searchsorted(filled_norm_timeline[:, 0], timestamp)
490
528
  if sum(
491
529
  filled_norm_timeline[timeline_index, 1:4]
@@ -500,6 +538,18 @@ def interpolate_gaps(
500
538
  "compression_flags"
501
539
  ].data[burst_gap_start + index]
502
540
 
541
+ # for any timestamp that was not filled and is still missing, remove it
542
+ missing_timeline = np.setdiff1d(gap_timeline, adjusted_gap_timeline)
543
+
544
+ for timestamp in missing_timeline:
545
+ timeline_index = np.searchsorted(filled_norm_timeline[:, 0], timestamp)
546
+ if filled_norm_timeline[timeline_index, 5] != ModeFlags.MISSING.value:
547
+ raise RuntimeError(
548
+ "Self-inconsistent data. "
549
+ "Gaps not included in final timeline should be missing."
550
+ )
551
+ np.delete(filled_norm_timeline, timeline_index)
552
+
503
553
  return filled_norm_timeline
504
554
 
505
555
 
@@ -693,10 +693,12 @@ class MagL1d(MagL2L1dBase): # type: ignore[misc]
693
693
  - gradiometer_offset_magnitude: magnitude of the offset vector
694
694
  - quality_flags: quality flags (1 if magnitude > threshold, 0 otherwise)
695
695
  """
696
- aligned_magi = linear(
696
+ # TODO: should this extrapolate or should non-overlapping data be removed?
697
+ _, aligned_magi = linear(
697
698
  magi_vectors,
698
699
  magi_epoch,
699
700
  mago_epoch,
701
+ extrapolate=True,
700
702
  )
701
703
 
702
704
  diff = aligned_magi - mago_vectors
@@ -21,7 +21,7 @@ from numpy.typing import NDArray
21
21
  class SpiceBody(IntEnum):
22
22
  """Enum containing SPICE IDs for bodies that we use."""
23
23
 
24
- # A subset of IMAP Specific bodies as defined in imap_001.tf
24
+ # A subset of IMAP Specific bodies as defined in imap_xxx.tf
25
25
  IMAP = -43
26
26
  IMAP_SPACECRAFT = -43000
27
27
  # IMAP Pointing Frame (Despun) as defined in imap_science_xxx.tf
@@ -58,7 +58,7 @@ class SpiceFrame(IntEnum):
58
58
  IMAP_CODICE = -43400
59
59
  IMAP_HIT = -43500
60
60
  IMAP_IDEX = -43700
61
- IMAP_GLOWS = -43751
61
+ IMAP_GLOWS = -43750
62
62
 
63
63
  # IMAP Science Frames (new additions from imap_science_xxx.tf)
64
64
  IMAP_OMD = -43900
@@ -85,6 +85,8 @@ class SpiceFrame(IntEnum):
85
85
 
86
86
  BORESIGHT_LOOKUP = {
87
87
  SpiceFrame.IMAP_LO_BASE: np.array([0, -1, 0]),
88
+ SpiceFrame.IMAP_LO: np.array([0, -1, 0]),
89
+ SpiceFrame.IMAP_LO_STAR_SENSOR: np.array([0, -1, 0]),
88
90
  SpiceFrame.IMAP_HI_45: np.array([0, 1, 0]),
89
91
  SpiceFrame.IMAP_HI_90: np.array([0, 1, 0]),
90
92
  SpiceFrame.IMAP_ULTRA_45: np.array([0, 0, 1]),
@@ -127,7 +129,7 @@ def imap_state(
127
129
  -------
128
130
  state : np.ndarray
129
131
  The Cartesian state vector representing the position and velocity of the
130
- IMAP spacecraft.
132
+ IMAP spacecraft. Units are km and km/s.
131
133
  """
132
134
  state, _ = spiceypy.spkezr(
133
135
  SpiceBody.IMAP.name, et, ref_frame.name, abcorr, observer.name
@@ -160,7 +162,7 @@ def get_instrument_mounting_az_el(instrument: SpiceFrame) -> np.ndarray:
160
162
  # frame that is used to compute the s/c to instrument mounting.
161
163
  # Most of these vectors are the same as the instrument boresight vector.
162
164
  mounting_normal_vector = {
163
- SpiceFrame.IMAP_LO_BASE: np.array([0, -1, 0]),
165
+ SpiceFrame.IMAP_LO_BASE: np.array([0, 0, -1]),
164
166
  SpiceFrame.IMAP_HI_45: np.array([0, 1, 0]),
165
167
  SpiceFrame.IMAP_HI_90: np.array([0, 1, 0]),
166
168
  SpiceFrame.IMAP_ULTRA_45: np.array([0, 0, 1]),
@@ -190,11 +192,16 @@ def get_spacecraft_to_instrument_spin_phase_offset(instrument: SpiceFrame) -> fl
190
192
  """
191
193
  Get the spin phase offset from the spacecraft to the instrument.
192
194
 
193
- For now, the offset is a fixed lookup based on `Table 1: Nominal Instrument
195
+ Nominal offset values were determined using `Table 1: Nominal Instrument
194
196
  to S/C CS Transformations` in document `7516-0011_drw.pdf`. That Table
195
- defines the angle from the spacecraft y-axis. We add 90 and take the modulous
196
- with 360 in order to get the angle from the spacecraft x-axis. These fixed
197
- values will need to be updated based on calibration data.
197
+ defines the angle from the spacecraft y-axis. We add 90-degrees and take the
198
+ modulus with 360 to get the angle from the spacecraft x-axis. This math is
199
+ shown in the comments after each key value pair in the dictionary defined
200
+ in code. The true values differ slightly from the nominal values. True
201
+ values are derived from the frame definitions in the IMAP frames kernel
202
+ which uses ground calibration measurements to define the as-built mounting
203
+ of each instrument. The function in this module, `get_instrument_mounting_az_el`,
204
+ was used to retrieve the true azimuth angles from the IMAP frames kernel.
198
205
 
199
206
  Parameters
200
207
  ----------
@@ -207,19 +214,21 @@ def get_spacecraft_to_instrument_spin_phase_offset(instrument: SpiceFrame) -> fl
207
214
  The spin phase offset from the spacecraft to the instrument.
208
215
  """
209
216
  phase_offset_lookup = {
210
- SpiceFrame.IMAP_LO_BASE: 60 / 360, # (330 + 90) % 360 = 60
211
- SpiceFrame.IMAP_HI_45: 345 / 360, # 255 + 90 = 345
212
- SpiceFrame.IMAP_HI_90: 15 / 360, # (285 + 90) % 360 = 15
213
- SpiceFrame.IMAP_ULTRA_45: 123 / 360, # 33 + 90 = 123
214
- SpiceFrame.IMAP_ULTRA_90: 300 / 360, # 210 + 90 = 300
217
+ # Phase offset values based on imap_100.tf frame kernel
218
+ # See docstring notes for details on how these values were determined.
219
+ SpiceFrame.IMAP_LO: 60 / 360, # (330 + 90) % 360 = 60
220
+ SpiceFrame.IMAP_HI_45: 344.8264 / 360, # 255 + 90 = 345
221
+ SpiceFrame.IMAP_HI_90: 15.1649 / 360, # (285 + 90) % 360 = 15
222
+ SpiceFrame.IMAP_ULTRA_45: 122.8642 / 360, # 33 + 90 = 123
223
+ SpiceFrame.IMAP_ULTRA_90: 299.9511 / 360, # 210 + 90 = 300
215
224
  SpiceFrame.IMAP_SWAPI: 258 / 360, # 168 + 90 = 258
216
- SpiceFrame.IMAP_IDEX: 180 / 360, # 90 + 90 = 180
217
- SpiceFrame.IMAP_CODICE: 226 / 360, # 136 + 90 = 226
218
- SpiceFrame.IMAP_HIT: 120 / 360, # 30 + 90 = 120
219
- SpiceFrame.IMAP_SWE: 243 / 360, # 153 + 90 = 243
220
- SpiceFrame.IMAP_GLOWS: 217 / 360, # 127 + 90 = 217
221
- SpiceFrame.IMAP_MAG_I: 90 / 360, # 0 + 90 = 90
222
- SpiceFrame.IMAP_MAG_O: 90 / 360, # 0 + 90 = 90
225
+ SpiceFrame.IMAP_IDEX: 179.9229 / 360, # 90 + 90 = 180
226
+ SpiceFrame.IMAP_CODICE: 225.9086 / 360, # 136 + 90 = 226
227
+ SpiceFrame.IMAP_HIT: 119.6452 / 360, # 30 + 90 = 120
228
+ SpiceFrame.IMAP_SWE: 243.0155 / 360, # 153 + 90 = 243
229
+ SpiceFrame.IMAP_GLOWS: 217.1384 / 360, # 127 + 90 = 217
230
+ SpiceFrame.IMAP_MAG_I: 89.9709 / 360, # 0 + 90 = 90
231
+ SpiceFrame.IMAP_MAG_O: 89.4077 / 360, # 0 + 90 = 90
223
232
  }
224
233
  return phase_offset_lookup[instrument]
225
234
 
@@ -314,6 +323,7 @@ def frame_transform_az_el(
314
323
  Ephemeris time(s) corresponding to position(s).
315
324
  az_el : np.ndarray
316
325
  <azimuth, elevation> vector or array of vectors in reference frame `from_frame`.
326
+ Azimuth and elevation pairs are always the final dimension of the array.
317
327
  There are several possible shapes for the input az_el and et:
318
328
  1. A single az_el vector may be provided for multiple `et` query times
319
329
  2. A single `et` may be provided for multiple az_el vectors,
@@ -331,15 +341,16 @@ def frame_transform_az_el(
331
341
  to_frame_az_el : np.ndarray
332
342
  Azimuth/elevation coordinates in reference frame `to_frame`. This
333
343
  output coordinate vector will have shape (2,) if a single `az_el` position
334
- vector and single `et` time are input. Otherwise, it will have shape (n, 2)
335
- where n is the number of input position vector or ephemeris times. The last
336
- axis of the output vector contains azimuth in the 0th position and elevation
337
- in the 1st position.
344
+ vector and single `et` time are input. Otherwise, it will have shape (..., 2)
345
+ where ... matches the leading dimensions of the input position vector or
346
+ ephemeris times. The last axis of the output vector contains azimuth in
347
+ the 0th position and elevation in the 1st position.
338
348
  """
339
349
  # Convert input az/el to Cartesian vectors
340
- spherical_coords_in = np.array(
341
- [np.ones_like(az_el[..., 0]), az_el[..., 0], az_el[..., 1]]
342
- ).T
350
+ spherical_coords_in = np.stack(
351
+ [np.ones_like(az_el[..., 0]), az_el[..., 0], az_el[..., 1]],
352
+ axis=-1,
353
+ )
343
354
  from_frame_cartesian = spherical_to_cartesian(spherical_coords_in)
344
355
  # Transform to to_frame
345
356
  to_frame_cartesian = frame_transform(et, from_frame_cartesian, from_frame, to_frame)
@@ -522,7 +533,7 @@ def cartesian_to_spherical(
522
533
  az = np.degrees(az)
523
534
  el = np.degrees(el)
524
535
 
525
- spherical_coords = np.stack((np.squeeze(magnitude_v), az, el), axis=-1)
536
+ spherical_coords = np.stack((np.squeeze(magnitude_v, -1), az, el), axis=-1)
526
537
 
527
538
  return spherical_coords
528
539