imap-processing 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +97 -254
  3. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +1 -1
  4. imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +2 -13
  5. imap_processing/cdf/utils.py +2 -2
  6. imap_processing/cli.py +4 -16
  7. imap_processing/codice/codice_l1a_lo_angular.py +362 -0
  8. imap_processing/codice/codice_l1a_lo_species.py +282 -0
  9. imap_processing/codice/codice_l1b.py +62 -97
  10. imap_processing/codice/codice_l2.py +210 -96
  11. imap_processing/codice/codice_new_l1a.py +64 -0
  12. imap_processing/codice/constants.py +37 -2
  13. imap_processing/codice/utils.py +270 -0
  14. imap_processing/ena_maps/ena_maps.py +50 -39
  15. imap_processing/ena_maps/utils/corrections.py +196 -14
  16. imap_processing/ena_maps/utils/naming.py +3 -1
  17. imap_processing/hi/hi_l1c.py +34 -12
  18. imap_processing/hi/hi_l2.py +79 -36
  19. imap_processing/ialirt/generate_coverage.py +3 -1
  20. imap_processing/ialirt/l0/parse_mag.py +1 -0
  21. imap_processing/ialirt/l0/process_hit.py +1 -0
  22. imap_processing/ialirt/l0/process_swapi.py +1 -0
  23. imap_processing/ialirt/l0/process_swe.py +2 -0
  24. imap_processing/ialirt/process_ephemeris.py +6 -2
  25. imap_processing/ialirt/utils/create_xarray.py +3 -2
  26. imap_processing/lo/l1c/lo_l1c.py +1 -1
  27. imap_processing/lo/l2/lo_l2.py +6 -4
  28. imap_processing/quality_flags.py +1 -0
  29. imap_processing/swapi/constants.py +4 -0
  30. imap_processing/swapi/l1/swapi_l1.py +47 -20
  31. imap_processing/swapi/l2/swapi_l2.py +17 -3
  32. imap_processing/ultra/l1a/ultra_l1a.py +121 -72
  33. imap_processing/ultra/l1b/de.py +57 -1
  34. imap_processing/ultra/l1b/ultra_l1b_annotated.py +0 -1
  35. imap_processing/ultra/l1b/ultra_l1b_extended.py +24 -11
  36. imap_processing/ultra/l1c/helio_pset.py +28 -5
  37. imap_processing/ultra/l1c/l1c_lookup_utils.py +4 -2
  38. imap_processing/ultra/l1c/spacecraft_pset.py +9 -5
  39. imap_processing/ultra/l1c/ultra_l1c.py +6 -6
  40. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +82 -20
  41. imap_processing/ultra/l2/ultra_l2.py +2 -2
  42. {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/METADATA +1 -1
  43. {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/RECORD +46 -42
  44. {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/LICENSE +0 -0
  45. {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/WHEEL +0 -0
  46. {imap_processing-1.0.1.dist-info → imap_processing-1.0.2.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,8 @@
1
1
  """L2 corrections common to multiple IMAP ENA instruments."""
2
2
 
3
+ import logging
3
4
  from pathlib import Path
5
+ from typing import TypeVar
4
6
 
5
7
  import numpy as np
6
8
  import pandas as pd
@@ -8,11 +10,23 @@ import xarray as xr
8
10
  from numpy.polynomial import Polynomial
9
11
  from scipy.constants import electron_volt, erg, proton_mass
10
12
 
11
- from imap_processing.ena_maps.ena_maps import LoHiBasePointingSet
13
+ from imap_processing.ena_maps.ena_maps import (
14
+ LoHiBasePointingSet,
15
+ )
12
16
  from imap_processing.ena_maps.utils.coordinates import CoordNames
13
17
  from imap_processing.spice import geometry
14
18
  from imap_processing.spice.time import ttj2000ns_to_et
15
19
 
20
+ logger = logging.getLogger(__name__)
21
+
22
+ # Tell ruff to ignore ambiguous Greek letters in formulas in this file
23
+ # ruff: noqa: RUF003
24
+
25
+ # Create a TypeVar to represent the specific class being passed in
26
+ # Bound to LoHiBasePointingSet, meaning it must be LoHiBasePointingSet
27
+ # or a subclass of it
28
+ LoHiBasePsetSubclass = TypeVar("LoHiBasePsetSubclass", bound=LoHiBasePointingSet)
29
+
16
30
  # Physical constants for Compton-Getting correction
17
31
  # Units: electron_volt = [J / eV]
18
32
  # erg = [J / erg]
@@ -307,7 +321,9 @@ class PowerLawFluxCorrector:
307
321
  return corrected_flux, corrected_flux_stat_unc
308
322
 
309
323
 
310
- def _add_spacecraft_velocity_to_pset(pset: LoHiBasePointingSet) -> None:
324
+ def _add_spacecraft_velocity_to_pset(
325
+ pset: LoHiBasePsetSubclass,
326
+ ) -> LoHiBasePsetSubclass:
311
327
  """
312
328
  Calculate and add spacecraft velocity data to pointing set.
313
329
 
@@ -316,6 +332,11 @@ def _add_spacecraft_velocity_to_pset(pset: LoHiBasePointingSet) -> None:
316
332
  pset : LoHiBasePointingSet
317
333
  Pointing set object to be updated.
318
334
 
335
+ Returns
336
+ -------
337
+ pset : LoHiBasePointingSet
338
+ Pointing set object with spacecraft velocity data added.
339
+
319
340
  Notes
320
341
  -----
321
342
  Adds the following DataArrays to pset.data:
@@ -342,8 +363,10 @@ def _add_spacecraft_velocity_to_pset(pset: LoHiBasePointingSet) -> None:
342
363
  )
343
364
  pset.data["sc_direction_vector"] = pset.data["sc_velocity"] / sc_velocity_km_per_sec
344
365
 
366
+ return pset
367
+
345
368
 
346
- def _add_cartesian_look_direction(pset: LoHiBasePointingSet) -> None:
369
+ def _add_cartesian_look_direction(pset: LoHiBasePsetSubclass) -> LoHiBasePsetSubclass:
347
370
  """
348
371
  Calculate and add look direction vectors to pointing set.
349
372
 
@@ -352,6 +375,11 @@ def _add_cartesian_look_direction(pset: LoHiBasePointingSet) -> None:
352
375
  pset : LoHiBasePointingSet
353
376
  Pointing set object to be updated.
354
377
 
378
+ Returns
379
+ -------
380
+ pset : LoHiBasePointingSet
381
+ Pointing set object with look direction vectors added.
382
+
355
383
  Notes
356
384
  -----
357
385
  Adds the following DataArray to pset.data:
@@ -376,11 +404,13 @@ def _add_cartesian_look_direction(pset: LoHiBasePointingSet) -> None:
376
404
  dims=[*longitudes.dims, CoordNames.CARTESIAN_VECTOR.value],
377
405
  )
378
406
 
407
+ return pset
408
+
379
409
 
380
410
  def _calculate_compton_getting_transform(
381
- pset: LoHiBasePointingSet,
411
+ pset: LoHiBasePsetSubclass,
382
412
  energy_hf: xr.DataArray,
383
- ) -> None:
413
+ ) -> LoHiBasePsetSubclass:
384
414
  """
385
415
  Apply Compton-Getting transformation to compute ENA source directions.
386
416
 
@@ -400,14 +430,24 @@ def _calculate_compton_getting_transform(
400
430
  energy_hf : xr.DataArray
401
431
  ENA energies in the heliosphere frame in eV.
402
432
 
433
+ Returns
434
+ -------
435
+ pset : LoHiBasePointingSet
436
+ Pointing set object with Compton-Getting related variables added and
437
+ updated az_el_points.
438
+
403
439
  Notes
404
440
  -----
405
441
  The algorithm is based on the "Appendix A. The IMAP-Lo Mapping Algorithms"
406
442
  document.
407
443
  Adds the following DataArrays to pset.data:
408
444
  - "energy_sc": ENA energies in spacecraft frame (eV)
409
- - "ena_source_hae_longitude": ENA source longitudes in heliosphere frame (degrees)
410
- - "ena_source_hae_latitude": ENA source latitudes in heliosphere frame (degrees)
445
+ - "energy_hf": ENA energies in the heliosphere frame (eV)
446
+ - "ram_mask": Mask indicating whether ENA source direction is from the ram
447
+ direction.
448
+ Updates the following DataArrays in pset.data:
449
+ - "hae_longitude": ENA source longitudes in heliosphere frame (degrees)
450
+ - "hae_latitude": ENA source latitudes in heliosphere frame (degrees)
411
451
  """
412
452
  # Store heliosphere frame energies
413
453
  pset.data["energy_hf"] = energy_hf
@@ -455,6 +495,8 @@ def _calculate_compton_getting_transform(
455
495
  # Velocity magnitude factor calculation (Equation 62)
456
496
  # x_k = (êₛ · û_sc) + sqrt(y² + (êₛ · û_sc)² - 1)
457
497
  x = dot_product + np.sqrt(y**2 + dot_product**2 - 1)
498
+ # Get the dimensions in the right order so that spatial is last
499
+ x = x.transpose(dot_product.dims[0], y.dims[0], dot_product.dims[1])
458
500
 
459
501
  # Calculate ENA speed in the spacecraft frame
460
502
  # |v⃗_sc| = x_k * U_sc
@@ -504,11 +546,13 @@ def _calculate_compton_getting_transform(
504
546
  dims=velocity_vector_helio.dims[:-1],
505
547
  )
506
548
 
549
+ return pset
550
+
507
551
 
508
552
  def apply_compton_getting_correction(
509
- pset: LoHiBasePointingSet,
553
+ pset: LoHiBasePsetSubclass,
510
554
  energy_hf: xr.DataArray,
511
- ) -> None:
555
+ ) -> LoHiBasePsetSubclass:
512
556
  """
513
557
  Apply Compton-Getting correction to a pointing set and update coordinates.
514
558
 
@@ -532,6 +576,11 @@ def apply_compton_getting_correction(
532
576
  ENA energies in the heliosphere frame in eV. Must be 1D with an
533
577
  energy dimension.
534
578
 
579
+ Returns
580
+ -------
581
+ pset : LoHiBasePointingSet
582
+ Updated pointing set object with Compton-Getting related variables added.
583
+
535
584
  Notes
536
585
  -----
537
586
  This function adds the following variables to the pointing set dataset:
@@ -540,20 +589,153 @@ def apply_compton_getting_correction(
540
589
  - "look_direction": Cartesian unit vectors of observation directions
541
590
  - "energy_hf": ENA energies in heliosphere frame (eV)
542
591
  - "energy_sc": ENA energies in spacecraft frame (eV)
543
- - "ena_source_hae_longitude": ENA source longitudes in heliosphere frame (degrees)
544
- - "ena_source_hae_latitude": ENA source latitudes in heliosphere frame (degrees)
592
+ This function modifies the following variables in the pointing set dataset:
593
+ - "hae_longitude": ENA source longitudes in heliosphere frame (degrees)
594
+ - "hae_latitude": ENA source latitudes in heliosphere frame (degrees)
545
595
 
546
596
  The az_el_points attribute is updated to use the corrected coordinates,
547
597
  which will be used for subsequent binning operations.
548
598
  """
549
599
  # Step 1: Add spacecraft velocity and direction to pset
550
- _add_spacecraft_velocity_to_pset(pset)
600
+ pset = _add_spacecraft_velocity_to_pset(pset)
551
601
 
552
602
  # Step 2: Calculate and add look direction vectors to pset
553
- _add_cartesian_look_direction(pset)
603
+ pset = _add_cartesian_look_direction(pset)
554
604
 
555
605
  # Step 3: Apply Compton-Getting transformation
556
- _calculate_compton_getting_transform(pset, energy_hf)
606
+ pset = _calculate_compton_getting_transform(pset, energy_hf)
557
607
 
558
608
  # Step 4: Update az_el_points to use the corrected coordinates
559
609
  pset.update_az_el_points()
610
+
611
+ return pset
612
+
613
+
614
+ def interpolate_map_flux_to_helio_frame(
615
+ map_ds: xr.Dataset,
616
+ esa_energies_ev: xr.DataArray,
617
+ helio_energies_ev: xr.DataArray,
618
+ ) -> xr.Dataset:
619
+ """
620
+ Interpolate flux from spacecraft frame to heliocentric frame energies.
621
+
622
+ This implements the Compton-Getting interpolation step that transforms
623
+ flux measurements from the spacecraft frame to the heliocentric frame.
624
+ The algorithm follows these steps:
625
+ 1. For each spatial pixel and energy step, get the spacecraft energy
626
+ 2. Find bounding ESA energy channels for interpolation
627
+ 3. Perform power-law interpolation between bounding channels to spacecraft energy
628
+ 4. Apply energy scaling transformation to heliocentric frame
629
+
630
+ Parameters
631
+ ----------
632
+ map_ds : xarray.Dataset
633
+ Map dataset with `energy_sc` data variable containing the spacecraft
634
+ frame energies for each spatial pixel and ESA energy step.
635
+ esa_energies_ev : xarray.DataArray
636
+ The ESA nominal central energies (in eV).
637
+ helio_energies_ev : xarray.DataArray
638
+ The heliocentric frame energies to interpolate to (in eV).
639
+ In practice, these are the same as esa_energies_ev.
640
+
641
+ Returns
642
+ -------
643
+ map_ds : xarray.Dataset
644
+ Updated map dataset with interpolated heliocentric frame fluxes.
645
+ """
646
+ logger.info("Performing Compton-Getting interpolation to heliocentric frame")
647
+
648
+ # Work with xarray DataArrays to handle arbitrary spatial dimensions
649
+ energy_sc = map_ds["energy_sc"]
650
+ intensity = map_ds["ena_intensity"]
651
+ stat_unc = map_ds["ena_intensity_stat_uncert"]
652
+ sys_err = map_ds["ena_intensity_sys_err"]
653
+
654
+ # Step 1: Find bounding ESA energy indices for each position
655
+ # Use np.searchsorted on flattened array, then reshape back
656
+ esa_energy_vals = esa_energies_ev.values
657
+ energy_sc_flat = energy_sc.values.ravel()
658
+
659
+ # Find right bound index for each element (vectorized)
660
+ right_idx_flat = np.searchsorted(esa_energy_vals, energy_sc_flat, side="right")
661
+ right_idx_flat = np.clip(right_idx_flat, 1, len(esa_energy_vals) - 1)
662
+ left_idx_flat = right_idx_flat - 1
663
+
664
+ # Reshape indices back to match energy_sc shape
665
+ right_idx = right_idx_flat.reshape(energy_sc.shape)
666
+ left_idx = left_idx_flat.reshape(energy_sc.shape)
667
+
668
+ # Create DataArrays for indices with same dims as energy_sc
669
+ # Note: we need to avoid coordinate name conflicts when using isel()
670
+ # The energy dimension should be present in dims but not as a coordinate
671
+ # since we're using these as indices into the energy dimension
672
+ # Create coordinates dict without the energy coordinate
673
+ coords_without_energy = {k: v for k, v in energy_sc.coords.items() if k != "energy"}
674
+
675
+ right_idx_da = xr.DataArray(
676
+ right_idx, dims=energy_sc.dims, coords=coords_without_energy
677
+ )
678
+ left_idx_da = xr.DataArray(
679
+ left_idx, dims=energy_sc.dims, coords=coords_without_energy
680
+ )
681
+
682
+ # Step 2: Extract flux values at bounding energy channels
683
+ # Use xarray's advanced indexing to get fluxes at left and right indices
684
+ flux_left = intensity.isel({"energy": left_idx_da})
685
+ flux_right = intensity.isel({"energy": right_idx_da})
686
+ stat_unc_left = stat_unc.isel({"energy": left_idx_da})
687
+ stat_unc_right = stat_unc.isel({"energy": right_idx_da})
688
+ sys_err_left = sys_err.isel({"energy": left_idx_da})
689
+
690
+ # Get energy values at boundaries - select from esa_energies_ev using indices
691
+ energy_left = esa_energies_ev.isel({"energy": left_idx_da})
692
+ energy_right = esa_energies_ev.isel({"energy": right_idx_da})
693
+
694
+ # Step 3: Perform power-law interpolation to spacecraft energy
695
+ # slope = log(f_right/f_left) / log(e_right/e_left)
696
+ # flux_sc = f_left * (energy_sc / e_left)^slope
697
+ with np.errstate(divide="ignore", invalid="ignore"):
698
+ # Calculate slope for power-law interpolation
699
+ slope = np.log(flux_right / flux_left) / np.log(energy_right / energy_left)
700
+
701
+ # Interpolate flux using power-law
702
+ flux_sc = flux_left * ((energy_sc / energy_left) ** slope)
703
+
704
+ # Interpolation factor for uncertainty propagation (Equations 75 & 76)
705
+ unc_factor = np.log(energy_sc / energy_left) / np.log(
706
+ energy_right / energy_left
707
+ )
708
+
709
+ # Statistical uncertainty propagation (Equation 75):
710
+ # δJ = J * sqrt((δJ_left/J_left)^2 * (1 + unc_factor^2) + (δJ_right/J_right)^2)
711
+ stat_unc_sc = flux_sc * np.sqrt(
712
+ (stat_unc_left / flux_left) ** 2 * (1.0 + unc_factor**2)
713
+ + (stat_unc_right / flux_right) ** 2
714
+ )
715
+
716
+ # Systematic uncertainty propagation (Equation 76):
717
+ # σJ^g = σJ^src_kref * (⟨E^s_kref⟩ / E^ESA_kref)^γ_kref * (E^h / ⟨E^s_kref⟩)
718
+ # Systematic error scales proportionally with flux during power-law
719
+ # interpolation
720
+ sys_err_sc = sys_err_left * ((energy_sc / energy_left) ** slope)
721
+
722
+ # Step 4: Energy scaling transformation (Liouville theorem)
723
+ # flux_helio = flux_sc * (helio_energy / energy_sc)
724
+ # Use xarray broadcasting - helio_energies_ev will broadcast along esa_energy_step
725
+ with np.errstate(divide="ignore", invalid="ignore"):
726
+ energy_ratio = helio_energies_ev / energy_sc
727
+ flux_helio = flux_sc * energy_ratio
728
+ stat_unc_helio = stat_unc_sc * energy_ratio
729
+ sys_err_helio = sys_err_sc * energy_ratio
730
+
731
+ # Set any location where the value is not finite to NaN (converts +/-inf to NaN)
732
+ flux_helio = flux_helio.where(np.isfinite(flux_helio), np.nan)
733
+ stat_unc_helio = stat_unc_helio.where(np.isfinite(stat_unc_helio), np.nan)
734
+ sys_err_helio = sys_err_helio.where(np.isfinite(sys_err_helio), np.nan)
735
+
736
+ # Update the dataset with interpolated values
737
+ map_ds["ena_intensity"] = flux_helio
738
+ map_ds["ena_intensity_stat_uncert"] = stat_unc_helio
739
+ map_ds["ena_intensity_sys_err"] = sys_err_helio
740
+
741
+ return map_ds
@@ -342,7 +342,9 @@ class MapDescriptor:
342
342
  elif frame_str == "gcs":
343
343
  return SpiceFrame.IMAP_GCS
344
344
  else:
345
- raise NotImplementedError("Coordinate frame is not yet implemented.")
345
+ raise NotImplementedError(
346
+ f"Coordinate frame {frame_str} is not yet implemented."
347
+ )
346
348
 
347
349
  def to_empty_map(
348
350
  self,
@@ -102,7 +102,7 @@ def generate_pset_dataset(
102
102
 
103
103
  pset_dataset = empty_pset_dataset(
104
104
  de_dataset.epoch.data[0],
105
- de_dataset.esa_energy_step.data,
105
+ de_dataset.esa_energy_step,
106
106
  config_df.cal_prod_config.number_of_products,
107
107
  logical_source_parts["sensor"],
108
108
  )
@@ -121,7 +121,7 @@ def generate_pset_dataset(
121
121
 
122
122
 
123
123
  def empty_pset_dataset(
124
- epoch_val: int, l1b_energy_steps: np.ndarray, n_cal_prods: int, sensor_str: str
124
+ epoch_val: int, l1b_energy_steps: xr.DataArray, n_cal_prods: int, sensor_str: str
125
125
  ) -> xr.Dataset:
126
126
  """
127
127
  Allocate an empty xarray.Dataset with appropriate pset coordinates.
@@ -130,7 +130,7 @@ def empty_pset_dataset(
130
130
  ----------
131
131
  epoch_val : int
132
132
  The starting epoch in J2000 TT nanoseconds for data in the PSET.
133
- l1b_energy_steps : np.ndarray
133
+ l1b_energy_steps : xarray.DataArray
134
134
  The array of esa_energy_step data from the L1B DE product.
135
135
  n_cal_prods : int
136
136
  Number of calibration products to allocate.
@@ -164,8 +164,12 @@ def empty_pset_dataset(
164
164
  "hi_pset_esa_energy_step", check_schema=False
165
165
  ).copy()
166
166
  dtype = attrs.pop("dtype")
167
- # Find the unique, non-zero esa_energy_steps from the L1B data
168
- esa_energy_steps = np.array(sorted(set(l1b_energy_steps) - {0}), dtype=dtype)
167
+ # Find the unique esa_energy_steps from the L1B data
168
+ # Exclude 0 and FILLVAL
169
+ esa_energy_steps = np.array(
170
+ sorted(set(l1b_energy_steps.values) - {0, l1b_energy_steps.attrs["FILLVAL"]}),
171
+ dtype=dtype,
172
+ )
169
173
  coords["esa_energy_step"] = xr.DataArray(
170
174
  esa_energy_steps,
171
175
  name="esa_energy_step",
@@ -571,11 +575,26 @@ def find_second_de_packet_data(l1b_dataset: xr.Dataset) -> xr.Dataset:
571
575
  # We should get two CCSDS packets per 8-spin ESA step.
572
576
  # Get the indices of the packet before each ESA change.
573
577
  esa_step = epoch_dataset["esa_step"].values
578
+ esa_energy_step = epoch_dataset["esa_energy_step"].values
579
+ # A change in esa_step should indicate the location of the second packet in
580
+ # each pair of DE packets at an esa_energy_step. In practice, during some
581
+ # calibration activities, it was observed that the esa_energy_step can change
582
+ # when the esa_step did not. So, we look for either to change and use the
583
+ # indices of those changes to identify the second packet in each pair. We
584
+ # also need to add the last packet index and assume an energy step change
585
+ # occurs after the last packet.
574
586
  second_esa_packet_idx = np.append(
575
- np.flatnonzero(np.diff(esa_step) != 0), len(esa_step) - 1
587
+ np.flatnonzero((np.diff(esa_step) != 0) | (np.diff(esa_energy_step) != 0)),
588
+ len(esa_step) - 1,
589
+ )
590
+ # Remove esa energy steps at 0 - these are calibrations
591
+ keep_mask = esa_energy_step[second_esa_packet_idx] != 0
592
+ # Remove esa energy steps at FILLVAL - these are unidentified
593
+ keep_mask &= (
594
+ esa_energy_step[second_esa_packet_idx]
595
+ != l1b_dataset["esa_energy_step"].attrs["FILLVAL"]
576
596
  )
577
- # Remove esa steps at 0 - these are calibrations
578
- second_esa_packet_idx = second_esa_packet_idx[esa_step[second_esa_packet_idx] != 0]
597
+ second_esa_packet_idx = second_esa_packet_idx[keep_mask]
579
598
  # Remove indices where we don't have two consecutive packets at the same ESA
580
599
  if second_esa_packet_idx[0] == 0:
581
600
  logger.warning(
@@ -584,7 +603,8 @@ def find_second_de_packet_data(l1b_dataset: xr.Dataset) -> xr.Dataset:
584
603
  )
585
604
  second_esa_packet_idx = second_esa_packet_idx[1:]
586
605
  missing_esa_pair_mask = (
587
- esa_step[second_esa_packet_idx - 1] != esa_step[second_esa_packet_idx]
606
+ esa_energy_step[second_esa_packet_idx - 1]
607
+ != esa_energy_step[second_esa_packet_idx]
588
608
  )
589
609
  if missing_esa_pair_mask.any():
590
610
  logger.warning(
@@ -629,9 +649,11 @@ def get_de_clock_ticks_for_esa_step(
629
649
  # ESA step group so this match is the end time. The start time is
630
650
  # 8-spins earlier.
631
651
  spin_start_mets = spin_df.spin_start_met.to_numpy()
632
- # CCSDS MET has one second resolution, add one to it to make sure it is
633
- # greater than the spin start time it ended on.
634
- end_time_ind = np.flatnonzero(ccsds_met + 1 >= spin_start_mets).max()
652
+ # CCSDS MET has one second resolution, add two to it to make sure it is
653
+ # greater than the spin start time it ended on. Theotretically, adding
654
+ # one second should be sufficeint, but in practice, with flight data, adding
655
+ # two seconds was found to be necessary.
656
+ end_time_ind = np.flatnonzero(ccsds_met + 2 >= spin_start_mets).max()
635
657
 
636
658
  # If the minimum absolute difference is greater than 1/2 the spin-phase
637
659
  # we have a problem.
@@ -11,14 +11,26 @@ from imap_processing.ena_maps.ena_maps import (
11
11
  HiPointingSet,
12
12
  RectangularSkyMap,
13
13
  )
14
- from imap_processing.ena_maps.utils.corrections import PowerLawFluxCorrector
14
+ from imap_processing.ena_maps.utils.corrections import (
15
+ PowerLawFluxCorrector,
16
+ apply_compton_getting_correction,
17
+ interpolate_map_flux_to_helio_frame,
18
+ )
15
19
  from imap_processing.ena_maps.utils.naming import MapDescriptor
16
20
  from imap_processing.hi.utils import CalibrationProductConfig
17
21
 
18
22
  logger = logging.getLogger(__name__)
19
23
 
24
+ SC_FRAME_VARS_TO_PROJECT = {
25
+ "counts",
26
+ "exposure_factor",
27
+ "bg_rates",
28
+ "bg_rates_unc",
29
+ "obs_date",
30
+ }
31
+ HELIO_FRAME_VARS_TO_PROJECT = SC_FRAME_VARS_TO_PROJECT | {"energy_sc"}
20
32
  # TODO: is an exposure time weighted average for obs_date appropriate?
21
- VARS_TO_EXPOSURE_TIME_AVERAGE = ["bg_rates", "bg_rates_unc", "obs_date"]
33
+ FULL_EXPOSURE_TIME_AVERAGE_SET = {"bg_rates", "bg_rates_unc", "obs_date", "energy_sc"}
22
34
 
23
35
 
24
36
  def hi_l2(
@@ -98,33 +110,64 @@ def generate_hi_map(
98
110
  The sky map with all the PSET data projected into the map.
99
111
  """
100
112
  output_map = descriptor.to_empty_map()
113
+ vars_to_bin = (
114
+ HELIO_FRAME_VARS_TO_PROJECT
115
+ if descriptor.frame_descriptor == "hf"
116
+ else SC_FRAME_VARS_TO_PROJECT
117
+ )
118
+ vars_to_exposure_time_average = FULL_EXPOSURE_TIME_AVERAGE_SET & vars_to_bin
101
119
 
102
120
  if not isinstance(output_map, RectangularSkyMap):
103
121
  raise NotImplementedError("Healpix map output not supported for Hi")
104
122
 
105
- # TODO: Implement Compton-Getting correction
106
- if descriptor.frame_descriptor != "sf":
107
- raise NotImplementedError("CG correction not implemented for Hi")
123
+ cached_esa_steps = None
108
124
 
109
125
  for pset_path in psets:
110
126
  logger.info(f"Processing {pset_path}")
111
- pset = HiPointingSet(pset_path, spin_phase=descriptor.spin_phase)
112
-
113
- # Background rate and uncertainty are exposure time weighted means in
114
- # the map.
115
- for var in VARS_TO_EXPOSURE_TIME_AVERAGE:
116
- pset.data[var] *= pset.data["exposure_factor"]
127
+ pset = HiPointingSet(pset_path)
128
+
129
+ # Store the first PSET esa_energy_step values and make sure every PSET
130
+ # contains the same set of esa_energy_step values.
131
+ # TODO: Correctly handle PSETs with different esa_energy_step values.
132
+ if cached_esa_steps is None:
133
+ cached_esa_steps = pset.data["esa_energy_step"].values.copy()
134
+ esa_ds = esa_energy_df(
135
+ l2_ancillary_path_dict["esa-energies"],
136
+ pset.data["esa_energy_step"].values,
137
+ ).to_xarray()
138
+ energy_kev = esa_ds["nominal_central_energy"]
139
+ if not np.array_equal(cached_esa_steps, pset.data["esa_energy_step"].values):
140
+ raise ValueError(
141
+ "All PSETs must have the same set of esa_energy_step values."
142
+ )
143
+
144
+ if descriptor.frame_descriptor == "hf":
145
+ # convert esa nominal central energy from keV to eV
146
+ esa_energy_ev = energy_kev * 1000
147
+ pset = apply_compton_getting_correction(pset, esa_energy_ev)
148
+
149
+ # Multiply variables that need to be exposure time weighted average by
150
+ # exposure factor.
151
+ for var in vars_to_exposure_time_average:
152
+ if var in pset.data:
153
+ pset.data[var] *= pset.data["exposure_factor"]
154
+
155
+ # Set the mask used to filter ram/anti-ram pixels
156
+ pset_valid_mask = None # Default to no mask (full spin)
157
+ if descriptor.spin_phase == "ram":
158
+ pset_valid_mask = pset.data["ram_mask"]
159
+ elif descriptor.spin_phase == "anti":
160
+ pset_valid_mask = ~pset.data["ram_mask"]
117
161
 
118
162
  # Project (bin) the PSET variables into the map pixels
119
163
  output_map.project_pset_values_to_map(
120
- pset,
121
- ["counts", "exposure_factor", "bg_rates", "bg_rates_unc", "obs_date"],
164
+ pset, list(vars_to_bin), pset_valid_mask=pset_valid_mask
122
165
  )
123
166
 
124
167
  # Finish the exposure time weighted mean calculation of backgrounds
125
168
  # Allow divide by zero to fill set pixels with zero exposure time to NaN
126
169
  with np.errstate(divide="ignore"):
127
- for var in VARS_TO_EXPOSURE_TIME_AVERAGE:
170
+ for var in vars_to_exposure_time_average:
128
171
  output_map.data_1d[var] /= output_map.data_1d["exposure_factor"]
129
172
 
130
173
  output_map.data_1d.update(calculate_ena_signal_rates(output_map.data_1d))
@@ -138,30 +181,27 @@ def generate_hi_map(
138
181
  # TODO: Figure out how to compute obs_date_range (stddev of obs_date)
139
182
  output_map.data_1d["obs_date_range"] = xr.zeros_like(output_map.data_1d["obs_date"])
140
183
 
184
+ # Set the energy_step_delta values to the energy bandpass half-width-half-max
185
+ energy_delta = esa_ds["bandpass_fwhm"] / 2
186
+ output_map.data_1d["energy_delta_minus"] = energy_delta
187
+ output_map.data_1d["energy_delta_plus"] = energy_delta
188
+
141
189
  # Rename and convert coordinate from esa_energy_step energy
142
- esa_df = esa_energy_df(
143
- l2_ancillary_path_dict["esa-energies"],
144
- output_map.data_1d["esa_energy_step"].data,
145
- )
146
190
  output_map.data_1d = output_map.data_1d.rename({"esa_energy_step": "energy"})
147
- output_map.data_1d = output_map.data_1d.assign_coords(
148
- energy=esa_df["nominal_central_energy"].values
149
- )
150
- # Set the energy_step_delta values to the energy bandpass half-width-half-max
151
- energy_delta = esa_df["bandpass_fwhm"].values / 2
152
- output_map.data_1d["energy_delta_minus"] = xr.DataArray(
153
- energy_delta,
154
- name="energy_delta_minus",
155
- dims=["energy"],
156
- )
157
- output_map.data_1d["energy_delta_plus"] = xr.DataArray(
158
- energy_delta,
159
- name="energy_delta_plus",
160
- dims=["energy"],
161
- )
191
+ output_map.data_1d = output_map.data_1d.assign_coords(energy=energy_kev.values)
162
192
 
163
193
  output_map.data_1d = output_map.data_1d.drop("esa_energy_step_label")
164
194
 
195
+ # Apply Compton-Getting interpolation for heliocentric frame maps
196
+ if descriptor.frame_descriptor == "hf":
197
+ esa_energy_ev = esa_energy_ev.rename({"esa_energy_step": "energy"})
198
+ esa_energy_ev = esa_energy_ev.assign_coords(energy=energy_kev.values)
199
+ output_map.data_1d = interpolate_map_flux_to_helio_frame(
200
+ output_map.data_1d,
201
+ output_map.data_1d["energy"] * 1000, # Convert ESA energies to eV
202
+ esa_energy_ev, # heliocentric energies (same as ESA energies)
203
+ )
204
+
165
205
  return output_map
166
206
 
167
207
 
@@ -420,7 +460,7 @@ def _calculate_improved_stat_variance(
420
460
 
421
461
 
422
462
  def esa_energy_df(
423
- esa_energies_path: str | Path, esa_energy_steps: np.ndarray
463
+ esa_energies_path: str | Path, esa_energy_steps: np.ndarray | slice | None = None
424
464
  ) -> pd.DataFrame:
425
465
  """
426
466
  Lookup the nominal central energy values for given esa energy steps.
@@ -429,8 +469,9 @@ def esa_energy_df(
429
469
  ----------
430
470
  esa_energies_path : str or pathlib.Path
431
471
  Location of the calibration csv file containing the lookup data.
432
- esa_energy_steps : numpy.ndarray
433
- The ESA energy steps to get energies for.
472
+ esa_energy_steps : numpy.ndarray, slice, or None
473
+ The ESA energy steps to get energies for. If not provided (default is None),
474
+ the full dataframe is returned.
434
475
 
435
476
  Returns
436
477
  -------
@@ -438,6 +479,8 @@ def esa_energy_df(
438
479
  Full data frame from the csv file filtered to only include the
439
480
  esa_energy_steps input.
440
481
  """
482
+ if esa_energy_steps is None:
483
+ esa_energy_steps = slice(None)
441
484
  esa_energies_lut = pd.read_csv(
442
485
  esa_energies_path, comment="#", index_col="esa_energy_step"
443
486
  )
@@ -77,7 +77,9 @@ def generate_coverage(
77
77
  dsn_outage_mask |= (time_range >= start_et) & (time_range <= end_et)
78
78
 
79
79
  for station_name, (lon, lat, alt, min_elevation) in stations.items():
80
- _azimuth, elevation = calculate_azimuth_and_elevation(lon, lat, alt, time_range)
80
+ _azimuth, elevation = calculate_azimuth_and_elevation(
81
+ lon, lat, alt, time_range, obsref="IAU_EARTH"
82
+ )
81
83
  visible = elevation > min_elevation
82
84
 
83
85
  outage_mask = np.zeros(time_range.shape, dtype=bool)
@@ -710,6 +710,7 @@ def process_packet(
710
710
  "met": int(met_all[i]),
711
711
  "met_in_utc": met_to_utc(met_all[i]).split(".")[0],
712
712
  "ttj2000ns": int(met_to_ttj2000ns(met_all[i])),
713
+ "instrument": "mag",
713
714
  "mag_epoch": int(mago_times_all[i]),
714
715
  "mag_B_GSE": [Decimal(str(v)) for v in gse_vector[i]],
715
716
  "mag_B_GSM": [Decimal(str(v)) for v in gsm_vector[i]],
@@ -171,6 +171,7 @@ def process_hit(xarray_data: xr.Dataset) -> list[dict]:
171
171
  "met": int(met),
172
172
  "met_in_utc": met_to_utc(met).split(".")[0],
173
173
  "ttj2000ns": int(met_to_ttj2000ns(met)),
174
+ "instrument": "hit",
174
175
  "hit_e_a_side_low_en": int(l1["IALRT_RATE_1"] + l1["IALRT_RATE_2"]),
175
176
  "hit_e_a_side_med_en": int(l1["IALRT_RATE_5"] + l1["IALRT_RATE_6"]),
176
177
  "hit_e_a_side_high_en": int(l1["IALRT_RATE_7"]),
@@ -226,6 +226,7 @@ def process_swapi_ialirt(
226
226
  "met": int(met_values[entry]),
227
227
  "met_in_utc": met_to_utc(met_values[entry]).split(".")[0],
228
228
  "ttj2000ns": int(met_to_ttj2000ns(met_values[entry])),
229
+ "instrument": "swapi",
229
230
  "swapi_pseudo_proton_speed": Decimal(solution["pseudo_speed"][entry]),
230
231
  "swapi_pseudo_proton_density": Decimal(
231
232
  solution["pseudo_density"][entry]
@@ -553,6 +553,7 @@ def process_swe(accumulated_data: xr.Dataset, in_flight_cal_files: list) -> list
553
553
  "met": met_first_half,
554
554
  "met_in_utc": met_to_utc(met_first_half).split(".")[0],
555
555
  "ttj2000ns": int(met_to_ttj2000ns(met_first_half)),
556
+ "instrument": "swe",
556
557
  "swe_normalized_counts": [int(val) for val in summed_first],
557
558
  "swe_counterstreaming_electrons": bde_first_half,
558
559
  },
@@ -563,6 +564,7 @@ def process_swe(accumulated_data: xr.Dataset, in_flight_cal_files: list) -> list
563
564
  "met": met_second_half,
564
565
  "met_in_utc": met_to_utc(met_second_half).split(".")[0],
565
566
  "ttj2000ns": int(met_to_ttj2000ns(met_second_half)),
567
+ "instrument": "swe",
566
568
  "swe_normalized_counts": [int(val) for val in summed_second],
567
569
  "swe_counterstreaming_electrons": bde_second_half,
568
570
  },