imap-processing 0.19.3__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (39) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +132 -133
  3. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +133 -132
  4. imap_processing/cdf/config/imap_constant_attrs.yaml +1 -1
  5. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +54 -60
  6. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +1 -2
  7. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +5 -3
  8. imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +1 -2
  9. imap_processing/cdf/config/imap_hi_variable_attrs.yaml +1 -0
  10. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +8 -6
  11. imap_processing/cdf/utils.py +5 -0
  12. imap_processing/cli.py +72 -54
  13. imap_processing/codice/codice_l1a.py +82 -23
  14. imap_processing/codice/codice_l1b.py +35 -6
  15. imap_processing/codice/constants.py +173 -135
  16. imap_processing/ena_maps/ena_maps.py +15 -17
  17. imap_processing/glows/l1b/glows_l1b.py +29 -21
  18. imap_processing/hi/hi_l1a.py +49 -29
  19. imap_processing/hi/hi_l1b.py +34 -0
  20. imap_processing/hi/hi_l1c.py +23 -17
  21. imap_processing/hi/hi_l2.py +225 -81
  22. imap_processing/ialirt/l0/ialirt_spice.py +1 -1
  23. imap_processing/ialirt/l0/parse_mag.py +33 -0
  24. imap_processing/ialirt/utils/create_xarray.py +12 -1
  25. imap_processing/lo/l1b/lo_l1b.py +111 -77
  26. imap_processing/lo/l1c/lo_l1c.py +10 -11
  27. imap_processing/lo/l2/lo_l2.py +43 -22
  28. imap_processing/mag/l1c/interpolation_methods.py +9 -1
  29. imap_processing/mag/l1c/mag_l1c.py +99 -45
  30. imap_processing/spice/geometry.py +28 -19
  31. imap_processing/ultra/l1c/helio_pset.py +2 -2
  32. imap_processing/ultra/l1c/spacecraft_pset.py +7 -4
  33. imap_processing/ultra/l2/ultra_l2.py +54 -27
  34. imap_processing/ultra/utils/ultra_l1_utils.py +4 -4
  35. {imap_processing-0.19.3.dist-info → imap_processing-1.0.0.dist-info}/METADATA +1 -1
  36. {imap_processing-0.19.3.dist-info → imap_processing-1.0.0.dist-info}/RECORD +39 -39
  37. {imap_processing-0.19.3.dist-info → imap_processing-1.0.0.dist-info}/LICENSE +0 -0
  38. {imap_processing-0.19.3.dist-info → imap_processing-1.0.0.dist-info}/WHEEL +0 -0
  39. {imap_processing-0.19.3.dist-info → imap_processing-1.0.0.dist-info}/entry_points.txt +0 -0
@@ -9,12 +9,14 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
9
9
  from imap_processing.mag import imap_mag_sdc_configuration_v001 as configuration
10
10
  from imap_processing.mag.constants import ModeFlags, VecSec
11
11
  from imap_processing.mag.l1c.interpolation_methods import InterpolationFunction
12
+ from imap_processing.spice.time import et_to_ttj2000ns, str_to_et
12
13
 
13
14
  logger = logging.getLogger(__name__)
14
15
 
15
16
 
16
17
  def mag_l1c(
17
18
  first_input_dataset: xr.Dataset,
19
+ day_to_process: np.datetime64,
18
20
  second_input_dataset: xr.Dataset = None,
19
21
  ) -> xr.Dataset:
20
22
  """
@@ -27,6 +29,9 @@ def mag_l1c(
27
29
  first_input_dataset : xr.Dataset
28
30
  The first input dataset to process. This can be either burst or norm data, for
29
31
  mago or magi.
32
+ day_to_process : np.datetime64
33
+ The day to process, in np.datetime64[D] format. This is used to fill gaps at
34
+ the beginning or end of the day if needed.
30
35
  second_input_dataset : xr.Dataset, optional
31
36
  The second input dataset to process. This should be burst if first_input_dataset
32
37
  was norm, or norm if first_input_dataset was burst. It should match the
@@ -263,13 +268,15 @@ def process_mag_l1c(
263
268
  normal_mode_dataset: xr.Dataset,
264
269
  burst_mode_dataset: xr.Dataset,
265
270
  interpolation_function: InterpolationFunction,
271
+ day_to_process: np.datetime64 | None = None,
266
272
  ) -> np.ndarray:
267
273
  """
268
274
  Create MAG L1C data from L1B datasets.
269
275
 
270
276
  This function starts from the normal mode dataset and completes the following steps:
271
277
  1. find all the gaps in the dataset
272
- 2. generate a new timeline with the gaps filled
278
+ 2. generate a new timeline with the gaps filled, including new timestamps to fill
279
+ out the rest of the day to +/- 15 minutes on either side
273
280
  3. fill the timeline with normal mode data (so, all the non-gap timestamps)
274
281
  4. interpolate the gaps using the burst mode data and the method specified in
275
282
  interpolation_function.
@@ -288,6 +295,10 @@ def process_mag_l1c(
288
295
  The burst mode dataset, which is used to fill in the gaps in the normal mode.
289
296
  interpolation_function : InterpolationFunction
290
297
  The interpolation function to use to fill in the gaps.
298
+ day_to_process : np.datetime64, optional
299
+ The day to process, in np.datetime64[D] format. This is used to fill
300
+ gaps at the beginning or end of the day if needed. If not included, these
301
+ gaps will not be filled.
291
302
 
292
303
  Returns
293
304
  -------
@@ -306,8 +317,23 @@ def process_mag_l1c(
306
317
  output_dataset["sample_interpolated"] = xr.DataArray(
307
318
  np.zeros(len(normal_mode_dataset))
308
319
  )
320
+ day_start_ns = None
321
+ day_end_ns = None
309
322
 
310
- gaps = find_all_gaps(norm_epoch, normal_vecsec_dict)
323
+ if day_to_process is not None:
324
+ day_start = day_to_process.astype("datetime64[s]") - np.timedelta64(15, "m")
325
+
326
+ # get the end of the day plus 15 minutes
327
+ day_end = (
328
+ day_to_process.astype("datetime64[s]")
329
+ + np.timedelta64(1, "D")
330
+ + np.timedelta64(15, "m")
331
+ )
332
+
333
+ day_start_ns = et_to_ttj2000ns(str_to_et(str(day_start)))
334
+ day_end_ns = et_to_ttj2000ns(str_to_et(str(day_end)))
335
+
336
+ gaps = find_all_gaps(norm_epoch, normal_vecsec_dict, day_start_ns, day_end_ns)
311
337
 
312
338
  new_timeline = generate_timeline(norm_epoch, gaps)
313
339
  norm_filled = fill_normal_data(normal_mode_dataset, new_timeline)
@@ -319,7 +345,9 @@ def process_mag_l1c(
319
345
 
320
346
 
321
347
  def fill_normal_data(
322
- normal_dataset: xr.Dataset, new_timeline: np.ndarray
348
+ normal_dataset: xr.Dataset,
349
+ new_timeline: np.ndarray,
350
+ day_to_process: np.datetime64 | None = None,
323
351
  ) -> np.ndarray:
324
352
  """
325
353
  Fill the new timeline with the normal mode data.
@@ -332,6 +360,10 @@ def fill_normal_data(
332
360
  The normal mode dataset.
333
361
  new_timeline : np.ndarray
334
362
  A 1D array of timestamps to fill.
363
+ day_to_process : np.datetime64, optional
364
+ The day to process, in np.datetime64[D] format. This is used to fill
365
+ gaps at the beginning or end of the day if needed. If not included, these
366
+ gaps will not be filled.
335
367
 
336
368
  Returns
337
369
  -------
@@ -341,12 +373,11 @@ def fill_normal_data(
341
373
  Indices: 0 - epoch, 1-4 - vector x, y, z, and range, 5 - generated flag,
342
374
  6-7 - compression flags.
343
375
  """
344
- # TODO: fill with FILLVAL?
376
+ # TODO: fill with FILLVAL
345
377
  filled_timeline: np.ndarray = np.zeros((len(new_timeline), 8))
346
378
  filled_timeline[:, 0] = new_timeline
347
379
  # Flags, will also indicate any missed timestamps
348
380
  filled_timeline[:, 5] = ModeFlags.MISSING.value
349
-
350
381
  for index, timestamp in enumerate(normal_dataset["epoch"].data):
351
382
  timeline_index = np.searchsorted(new_timeline, timestamp)
352
383
  filled_timeline[timeline_index, 1:5] = normal_dataset["vectors"].data[index]
@@ -399,9 +430,11 @@ def interpolate_gaps(
399
430
  )
400
431
 
401
432
  for gap in gaps:
402
- # TODO: we might need a few inputs before or after start/end
433
+ # TODO: we need extra data at the beginning and end of the gap
403
434
  burst_gap_start = (np.abs(burst_epochs - gap[0])).argmin()
404
435
  burst_gap_end = (np.abs(burst_epochs - gap[1])).argmin()
436
+ # if this gap is too big, we may be missing burst data at the start or end of
437
+ # the day and shouldn't use it here.
405
438
 
406
439
  # for the CIC filter, we need 2x normal mode cadence seconds
407
440
 
@@ -428,10 +461,6 @@ def interpolate_gaps(
428
461
  gap_timeline = filled_norm_timeline[
429
462
  (filled_norm_timeline > gap[0]) & (filled_norm_timeline < gap[1])
430
463
  ]
431
- logger.info(
432
- f"difference between gap start and burst start: "
433
- f"{gap_timeline[0] - burst_epochs[burst_start]}"
434
- )
435
464
 
436
465
  short = (gap_timeline >= burst_epochs[burst_start]) & (
437
466
  gap_timeline <= burst_epochs[burst_gap_end]
@@ -487,40 +516,46 @@ def generate_timeline(epoch_data: np.ndarray, gaps: np.ndarray) -> np.ndarray:
487
516
  The existing timeline data, in the shape (n,).
488
517
  gaps : numpy.ndarray
489
518
  An array of gaps to fill, with shape (n, 2) where n is the number of gaps.
490
- The gap is specified as (start, end) where start and end both exist in the
491
- timeline already.
519
+ The gap is specified as (start, end).
492
520
 
493
521
  Returns
494
522
  -------
495
523
  numpy.ndarray
496
524
  The new timeline, filled with the existing data and the generated gaps.
497
525
  """
498
- full_timeline: np.ndarray = np.zeros(0)
499
-
500
- # When we have our gaps, generate the full timeline
501
- last_gap = 0
526
+ full_timeline: np.ndarray = np.array([])
527
+ last_index = 0
502
528
  for gap in gaps:
503
- gap_start_index = np.where(epoch_data == gap[0])[0]
504
- gap_end_index = np.where(epoch_data == gap[1])[0]
505
- if gap_start_index.size != 1 or gap_end_index.size != 1:
506
- raise ValueError("Gap start or end not found in input timeline")
507
-
529
+ epoch_start_index = np.searchsorted(epoch_data, gap[0], side="left")
508
530
  full_timeline = np.concatenate(
509
- (
510
- full_timeline,
511
- epoch_data[last_gap : gap_start_index[0]],
512
- generate_missing_timestamps(gap),
513
- )
531
+ (full_timeline, epoch_data[last_index:epoch_start_index])
514
532
  )
515
- last_gap = gap_end_index[0]
533
+ generated_timestamps = generate_missing_timestamps(gap)
534
+ if generated_timestamps.size == 0:
535
+ continue
536
+
537
+ # Remove any generated timestamps that are already in the timeline
538
+ # Use np.isin to check for exact matches
539
+ mask = ~np.isin(generated_timestamps, full_timeline)
540
+ generated_timestamps = generated_timestamps[mask]
541
+
542
+ if generated_timestamps.size == 0:
543
+ print("All generated timestamps already exist in timeline")
544
+ continue
516
545
 
517
- full_timeline = np.concatenate((full_timeline, epoch_data[last_gap:]))
546
+ full_timeline = np.concatenate((full_timeline, generated_timestamps))
547
+ last_index = int(np.searchsorted(epoch_data, gap[1], side="left"))
548
+
549
+ full_timeline = np.concatenate((full_timeline, epoch_data[last_index:]))
518
550
 
519
551
  return full_timeline
520
552
 
521
553
 
522
554
  def find_all_gaps(
523
- epoch_data: np.ndarray, vecsec_dict: dict | None = None
555
+ epoch_data: np.ndarray,
556
+ vecsec_dict: dict | None = None,
557
+ start_of_day_ns: float | None = None,
558
+ end_of_day_ns: float | None = None,
524
559
  ) -> np.ndarray:
525
560
  """
526
561
  Find all the gaps in the epoch data.
@@ -529,6 +564,9 @@ def find_all_gaps(
529
564
  it will assume a nominal 1/2 second gap. A gap is defined as missing data from the
530
565
  expected sequence as defined by vectors_per_second_attr.
531
566
 
567
+ If start_of_day_ns and end_of_day_ns are provided, gaps at the beginning and end of
568
+ the day will be added if the epoch_data does not cover the full day.
569
+
532
570
  Parameters
533
571
  ----------
534
572
  epoch_data : numpy.ndarray
@@ -537,6 +575,12 @@ def find_all_gaps(
537
575
  A dictionary of the form {start: vecsec, start: vecsec} where start is the time
538
576
  in nanoseconds and vecsec is the number of vectors per second. This will be
539
577
  used to find the gaps. If not provided, a 1/2 second gap is assumed.
578
+ start_of_day_ns : float, optional
579
+ The start of the day in nanoseconds since TTJ2000. If provided, a gap will be
580
+ added from this time to the first epoch if they don't match.
581
+ end_of_day_ns : float, optional
582
+ The end of the day in nanoseconds since TTJ2000. If provided, a gap will be
583
+ added from the last epoch to this time if they don't match.
540
584
 
541
585
  Returns
542
586
  -------
@@ -546,15 +590,23 @@ def find_all_gaps(
546
590
  timeline.
547
591
  """
548
592
  gaps: np.ndarray = np.zeros((0, 3))
549
- if vecsec_dict is None:
550
- # TODO: when we go back to the previous file, also retrieve expected
551
- # vectors per second
552
- # If no vecsec is provided, assume 2 vectors per second
553
- vecsec_dict = {0: VecSec.TWO_VECS_PER_S.value}
593
+
594
+ # TODO: when we go back to the previous file, also retrieve expected
595
+ # vectors per second
596
+
597
+ vecsec_dict = {0: VecSec.TWO_VECS_PER_S.value} | (vecsec_dict or {})
554
598
 
555
599
  end_index = epoch_data.shape[0]
600
+
601
+ if start_of_day_ns is not None and epoch_data[0] > start_of_day_ns:
602
+ # Add a gap from the start of the day to the first timestamp
603
+ gaps = np.concatenate(
604
+ (gaps, np.array([[start_of_day_ns, epoch_data[0], vecsec_dict[0]]]))
605
+ )
606
+
556
607
  for start_time in reversed(sorted(vecsec_dict.keys())):
557
- start_index = np.where(start_time == epoch_data)[0][0]
608
+ # Find the start index that is equal to or immediately after start_time
609
+ start_index = np.searchsorted(epoch_data, start_time, side="left")
558
610
  gaps = np.concatenate(
559
611
  (
560
612
  find_gaps(
@@ -565,6 +617,11 @@ def find_all_gaps(
565
617
  )
566
618
  end_index = start_index
567
619
 
620
+ if end_of_day_ns is not None and epoch_data[-1] < end_of_day_ns:
621
+ gaps = np.concatenate(
622
+ (gaps, np.array([[epoch_data[-1], end_of_day_ns, vecsec_dict[start_time]]]))
623
+ )
624
+
568
625
  return gaps
569
626
 
570
627
 
@@ -592,11 +649,9 @@ def find_gaps(timeline_data: np.ndarray, vectors_per_second: int) -> np.ndarray:
592
649
  # Expected difference between timestamps in nanoseconds.
593
650
  expected_gap = 1 / vectors_per_second * 1e9
594
651
 
595
- # TODO: timestamps can vary by a few ms. Per Alastair, this can be around 7.5% of
596
- # cadence without counting as a "gap".
597
652
  diffs = abs(np.diff(timeline_data))
598
- # 3.5e7 == 7.5% of 0.5s in nanoseconds, a common gap. In the future, this number
599
- # will be calculated from the expected gap.
653
+
654
+ # Gap can be up to 7.5% larger than expected vectors per second due to clock drift
600
655
  gap_index = np.asarray(diffs - expected_gap > expected_gap * 0.075).nonzero()[0]
601
656
  output: np.ndarray = np.zeros((len(gap_index), 3))
602
657
 
@@ -607,7 +662,6 @@ def find_gaps(timeline_data: np.ndarray, vectors_per_second: int) -> np.ndarray:
607
662
  vectors_per_second,
608
663
  ]
609
664
 
610
- # TODO: How should I handle/find gaps at the end?
611
665
  return output
612
666
 
613
667
 
@@ -622,7 +676,8 @@ def generate_missing_timestamps(gap: np.ndarray) -> np.ndarray:
622
676
  ----------
623
677
  gap : numpy.ndarray
624
678
  Array of timestamps of shape (2,) containing n gaps with start_gap and
625
- end_gap. Start_gap and end_gap both correspond to points in timeline_data.
679
+ end_gap. Start_gap and end_gap both correspond to points in timeline_data and
680
+ are included in the output timespan.
626
681
 
627
682
  Returns
628
683
  -------
@@ -630,9 +685,7 @@ def generate_missing_timestamps(gap: np.ndarray) -> np.ndarray:
630
685
  Completed timeline.
631
686
  """
632
687
  # Generated timestamps should always be 0.5 seconds apart
633
- # TODO: is this in the configuration file?
634
688
  difference_ns = 0.5 * 1e9
635
-
636
689
  output: np.ndarray = np.arange(gap[0], gap[1], difference_ns)
637
690
  return output
638
691
 
@@ -657,8 +710,9 @@ def vectors_per_second_from_string(vecsec_string: str) -> dict:
657
710
  vecsec_dict = {}
658
711
  vecsec_segments = vecsec_string.split(",")
659
712
  for vecsec_segment in vecsec_segments:
660
- start_time, vecsec = vecsec_segment.split(":")
661
- vecsec_dict[int(start_time)] = int(vecsec)
713
+ if vecsec_segment:
714
+ start_time, vecsec = vecsec_segment.split(":")
715
+ vecsec_dict[int(start_time)] = int(vecsec)
662
716
 
663
717
  return vecsec_dict
664
718
 
@@ -21,7 +21,7 @@ from numpy.typing import NDArray
21
21
  class SpiceBody(IntEnum):
22
22
  """Enum containing SPICE IDs for bodies that we use."""
23
23
 
24
- # A subset of IMAP Specific bodies as defined in imap_001.tf
24
+ # A subset of IMAP Specific bodies as defined in imap_xxx.tf
25
25
  IMAP = -43
26
26
  IMAP_SPACECRAFT = -43000
27
27
  # IMAP Pointing Frame (Despun) as defined in imap_science_xxx.tf
@@ -58,7 +58,7 @@ class SpiceFrame(IntEnum):
58
58
  IMAP_CODICE = -43400
59
59
  IMAP_HIT = -43500
60
60
  IMAP_IDEX = -43700
61
- IMAP_GLOWS = -43751
61
+ IMAP_GLOWS = -43750
62
62
 
63
63
  # IMAP Science Frames (new additions from imap_science_xxx.tf)
64
64
  IMAP_OMD = -43900
@@ -85,6 +85,8 @@ class SpiceFrame(IntEnum):
85
85
 
86
86
  BORESIGHT_LOOKUP = {
87
87
  SpiceFrame.IMAP_LO_BASE: np.array([0, -1, 0]),
88
+ SpiceFrame.IMAP_LO: np.array([0, -1, 0]),
89
+ SpiceFrame.IMAP_LO_STAR_SENSOR: np.array([0, -1, 0]),
88
90
  SpiceFrame.IMAP_HI_45: np.array([0, 1, 0]),
89
91
  SpiceFrame.IMAP_HI_90: np.array([0, 1, 0]),
90
92
  SpiceFrame.IMAP_ULTRA_45: np.array([0, 0, 1]),
@@ -160,7 +162,7 @@ def get_instrument_mounting_az_el(instrument: SpiceFrame) -> np.ndarray:
160
162
  # frame that is used to compute the s/c to instrument mounting.
161
163
  # Most of these vectors are the same as the instrument boresight vector.
162
164
  mounting_normal_vector = {
163
- SpiceFrame.IMAP_LO_BASE: np.array([0, -1, 0]),
165
+ SpiceFrame.IMAP_LO_BASE: np.array([0, 0, -1]),
164
166
  SpiceFrame.IMAP_HI_45: np.array([0, 1, 0]),
165
167
  SpiceFrame.IMAP_HI_90: np.array([0, 1, 0]),
166
168
  SpiceFrame.IMAP_ULTRA_45: np.array([0, 0, 1]),
@@ -190,11 +192,16 @@ def get_spacecraft_to_instrument_spin_phase_offset(instrument: SpiceFrame) -> fl
190
192
  """
191
193
  Get the spin phase offset from the spacecraft to the instrument.
192
194
 
193
- For now, the offset is a fixed lookup based on `Table 1: Nominal Instrument
195
+ Nominal offset values were determined using `Table 1: Nominal Instrument
194
196
  to S/C CS Transformations` in document `7516-0011_drw.pdf`. That Table
195
- defines the angle from the spacecraft y-axis. We add 90 and take the modulous
196
- with 360 in order to get the angle from the spacecraft x-axis. These fixed
197
- values will need to be updated based on calibration data.
197
+ defines the angle from the spacecraft y-axis. We add 90-degrees and take the
198
+ modulus with 360 to get the angle from the spacecraft x-axis. This math is
199
+ shown in the comments after each key value pair in the dictionary defined
200
+ in code. The true values differ slightly from the nominal values. True
201
+ values are derived from the frame definitions in the IMAP frames kernel
202
+ which uses ground calibration measurements to define the as-built mounting
203
+ of each instrument. The function in this module, `get_instrument_mounting_az_el`,
204
+ was used to retrieve the true azimuth angles from the IMAP frames kernel.
198
205
 
199
206
  Parameters
200
207
  ----------
@@ -207,19 +214,21 @@ def get_spacecraft_to_instrument_spin_phase_offset(instrument: SpiceFrame) -> fl
207
214
  The spin phase offset from the spacecraft to the instrument.
208
215
  """
209
216
  phase_offset_lookup = {
210
- SpiceFrame.IMAP_LO_BASE: 60 / 360, # (330 + 90) % 360 = 60
211
- SpiceFrame.IMAP_HI_45: 345 / 360, # 255 + 90 = 345
212
- SpiceFrame.IMAP_HI_90: 15 / 360, # (285 + 90) % 360 = 15
213
- SpiceFrame.IMAP_ULTRA_45: 123 / 360, # 33 + 90 = 123
214
- SpiceFrame.IMAP_ULTRA_90: 300 / 360, # 210 + 90 = 300
217
+ # Phase offset values based on imap_100.tf frame kernel
218
+ # See docstring notes for details on how these values were determined.
219
+ SpiceFrame.IMAP_LO: 60 / 360, # (330 + 90) % 360 = 60
220
+ SpiceFrame.IMAP_HI_45: 344.8264 / 360, # 255 + 90 = 345
221
+ SpiceFrame.IMAP_HI_90: 15.1649 / 360, # (285 + 90) % 360 = 15
222
+ SpiceFrame.IMAP_ULTRA_45: 122.8642 / 360, # 33 + 90 = 123
223
+ SpiceFrame.IMAP_ULTRA_90: 299.9511 / 360, # 210 + 90 = 300
215
224
  SpiceFrame.IMAP_SWAPI: 258 / 360, # 168 + 90 = 258
216
- SpiceFrame.IMAP_IDEX: 180 / 360, # 90 + 90 = 180
217
- SpiceFrame.IMAP_CODICE: 226 / 360, # 136 + 90 = 226
218
- SpiceFrame.IMAP_HIT: 120 / 360, # 30 + 90 = 120
219
- SpiceFrame.IMAP_SWE: 243 / 360, # 153 + 90 = 243
220
- SpiceFrame.IMAP_GLOWS: 217 / 360, # 127 + 90 = 217
221
- SpiceFrame.IMAP_MAG_I: 90 / 360, # 0 + 90 = 90
222
- SpiceFrame.IMAP_MAG_O: 90 / 360, # 0 + 90 = 90
225
+ SpiceFrame.IMAP_IDEX: 179.9229 / 360, # 90 + 90 = 180
226
+ SpiceFrame.IMAP_CODICE: 225.9086 / 360, # 136 + 90 = 226
227
+ SpiceFrame.IMAP_HIT: 119.6452 / 360, # 30 + 90 = 120
228
+ SpiceFrame.IMAP_SWE: 243.0155 / 360, # 153 + 90 = 243
229
+ SpiceFrame.IMAP_GLOWS: 217.1384 / 360, # 127 + 90 = 217
230
+ SpiceFrame.IMAP_MAG_I: 89.9709 / 360, # 0 + 90 = 90
231
+ SpiceFrame.IMAP_MAG_O: 89.4077 / 360, # 0 + 90 = 90
223
232
  }
224
233
  return phase_offset_lookup[instrument]
225
234
 
@@ -71,7 +71,7 @@ def calculate_helio_pset(
71
71
  """
72
72
  pset_dict: dict[str, np.ndarray] = {}
73
73
  # Select only the species we are interested in.
74
- indices = np.where(np.isin(de_dataset["e_bin"].values, species_id))[0]
74
+ indices = np.where(np.isin(de_dataset["ebin"].values, species_id))[0]
75
75
  species_dataset = de_dataset.isel(epoch=indices)
76
76
 
77
77
  rejected = get_de_rejection_mask(
@@ -176,7 +176,7 @@ def calculate_helio_pset(
176
176
  pset_dict["latitude"] = latitude[np.newaxis, ...]
177
177
  pset_dict["longitude"] = longitude[np.newaxis, ...]
178
178
  pset_dict["energy_bin_geometric_mean"] = energy_bin_geometric_means
179
- pset_dict["helio_exposure_factor"] = exposure_time
179
+ pset_dict["helio_exposure_factor"] = exposure_time[np.newaxis, ...]
180
180
  pset_dict["pixel_index"] = healpix
181
181
  pset_dict["energy_bin_delta"] = np.diff(intervals, axis=1).squeeze()[
182
182
  np.newaxis, ...
@@ -72,7 +72,7 @@ def calculate_spacecraft_pset(
72
72
  pset_dict: dict[str, np.ndarray] = {}
73
73
 
74
74
  sensor = parse_filename_like(name)["sensor"][0:2]
75
- indices = np.where(np.isin(de_dataset["e_bin"].values, species_id))[0]
75
+ indices = np.where(np.isin(de_dataset["ebin"].values, species_id))[0]
76
76
  species_dataset = de_dataset.isel(epoch=indices)
77
77
 
78
78
  # If there are no species return None.
@@ -184,7 +184,7 @@ def calculate_spacecraft_pset(
184
184
  pset_dict["longitude"] = longitude[np.newaxis, ...]
185
185
  pset_dict["energy_bin_geometric_mean"] = energy_bin_geometric_means
186
186
  pset_dict["background_rates"] = background_rates[np.newaxis, ...]
187
- pset_dict["exposure_factor"] = exposure_pointing
187
+ pset_dict["exposure_factor"] = exposure_pointing[np.newaxis, ...]
188
188
  pset_dict["pixel_index"] = healpix
189
189
  pset_dict["energy_bin_delta"] = np.diff(intervals, axis=1).squeeze()[
190
190
  np.newaxis, ...
@@ -197,8 +197,11 @@ def calculate_spacecraft_pset(
197
197
  pset_dict["dead_time_ratio"] = deadtime_ratios
198
198
  pset_dict["spin_phase_step"] = np.arange(len(deadtime_ratios))
199
199
 
200
- pset_dict["scatter_theta"] = scattering_theta
201
- pset_dict["scatter_phi"] = scattering_phi
200
+ # Convert FWHM to gaussian uncertainty by dividing by 2.355
201
+ # See algorithm documentation (section 3.5.7, third bullet point) for more details
202
+ pset_dict["scatter_theta"] = scattering_theta / 2.355
203
+ pset_dict["scatter_phi"] = scattering_phi / 2.355
204
+
202
205
  pset_dict["scatter_threshold"] = scattering_thresholds
203
206
 
204
207
  # Add the energy delta plus/minus to the dataset
@@ -56,18 +56,18 @@ REQUIRED_L1C_VARIABLES_PUSH = [
56
56
  ]
57
57
  REQUIRED_L1C_VARIABLES_PULL = [
58
58
  "exposure_factor",
59
- "sensitivity",
60
59
  "background_rates",
61
60
  "obs_date",
62
61
  ]
63
62
  # These variables are expected but not strictly required. In certain test scenarios,
64
63
  # they may be missing, in which case we will raise a warning and continue.
65
64
  # All psets must be consistent and either have these variables or not.
66
- EXPECTED_L1C_VARIABLES_PULL = [
65
+ EXPECTED_L1C_POINTING_INDEPENDENT_VARIABLES_PULL = [
67
66
  "geometric_function",
68
- "efficiency",
69
67
  "scatter_theta",
70
68
  "scatter_phi",
69
+ "sensitivity",
70
+ "efficiency",
71
71
  ]
72
72
  # These variables are projected to the map as the mean of pointing set pixels value,
73
73
  # weighted by that pointing set pixel's exposure and solid angle
@@ -75,10 +75,10 @@ VARIABLES_TO_WEIGHT_BY_POINTING_SET_EXPOSURE_TIMES_SOLID_ANGLE = [
75
75
  "sensitivity",
76
76
  "background_rates",
77
77
  "obs_date",
78
- "geometric_function",
79
- "efficiency",
80
78
  "scatter_theta",
81
79
  "scatter_phi",
80
+ "geometric_function",
81
+ "efficiency",
82
82
  ]
83
83
 
84
84
  # These variables are dropped after they are used to
@@ -148,7 +148,7 @@ def get_variable_attributes_optional_energy_dependence(
148
148
  return metadata
149
149
 
150
150
 
151
- def generate_ultra_healpix_skymap(
151
+ def generate_ultra_healpix_skymap( # noqa: PLR0912
152
152
  ultra_l1c_psets: list[str | xr.Dataset],
153
153
  output_map_structure: (
154
154
  ena_maps.RectangularSkyMap | ena_maps.HealpixSkyMap
@@ -160,7 +160,7 @@ def generate_ultra_healpix_skymap(
160
160
  This function combines IMAP Ultra L1C pointing sets into a single L2 HealpixSkyMap.
161
161
  It handles the projection of values from pointing sets to the map, applies necessary
162
162
  weighting and background subtraction, and calculates ena_intensity
163
- and ena_intensity_stat_unc.
163
+ and ena_intensity_stat_uncert.
164
164
 
165
165
  Parameters
166
166
  ----------
@@ -251,23 +251,36 @@ def generate_ultra_healpix_skymap(
251
251
  # TODO remove this in the future once all test data includes these variables
252
252
  # Add expected but not required variables to the pull projection list
253
253
  # Log a warning if they are missing from any PSET but continue processing.
254
- expected_present_vars = []
254
+ expected_present_vars_pointing_ind = []
255
255
  first_pset = (
256
256
  load_cdf(ultra_l1c_psets[0])
257
257
  if isinstance(ultra_l1c_psets[0], (str, Path))
258
258
  else ultra_l1c_psets[0]
259
259
  )
260
- for var in EXPECTED_L1C_VARIABLES_PULL:
260
+
261
+ for var in EXPECTED_L1C_POINTING_INDEPENDENT_VARIABLES_PULL:
261
262
  if var not in first_pset.variables:
262
263
  logger.warning(
263
264
  f"Expected variable {var} not found in the first L1C PSET. "
264
265
  "This variable will not be projected to the map."
265
266
  )
266
267
  else:
267
- expected_present_vars.append(var)
268
+ expected_present_vars_pointing_ind.append(var)
269
+
270
+ # Get existing variables that should be weighted by exposure and solid angle
271
+ existing_vars_to_weight = []
272
+ pointing_indep_vars = []
273
+ for var in VARIABLES_TO_WEIGHT_BY_POINTING_SET_EXPOSURE_TIMES_SOLID_ANGLE:
274
+ if var in first_pset:
275
+ existing_vars_to_weight.append(var)
276
+ if "epoch" not in first_pset[var].dims:
277
+ pointing_indep_vars.append(var)
268
278
 
269
279
  output_map_structure.values_to_pull_project = list(
270
- set(output_map_structure.values_to_pull_project + expected_present_vars)
280
+ set(
281
+ output_map_structure.values_to_pull_project
282
+ + expected_present_vars_pointing_ind
283
+ )
271
284
  )
272
285
 
273
286
  all_pset_epochs = []
@@ -313,13 +326,12 @@ def generate_ultra_healpix_skymap(
313
326
  pointing_set.data["pointing_set_exposure_times_solid_angle"] = (
314
327
  pointing_set.data["exposure_factor"] * pointing_set.solid_angle
315
328
  )
316
-
317
- # Get variables that should be weighted by exposure and solid angle
318
- existing_vars_to_weight = []
319
- for var in VARIABLES_TO_WEIGHT_BY_POINTING_SET_EXPOSURE_TIMES_SOLID_ANGLE:
320
- if var in pointing_set.data:
321
- existing_vars_to_weight.append(var)
322
-
329
+ # TODO add generalized code in ena_maps to handle this
330
+ # if the variable does not have an epoch dimension, add one temporarily
331
+ # to allow for correct broadcasting during weighting.
332
+ # Keep track of which variables were modified so we can revert them later.
333
+ for var in pointing_indep_vars:
334
+ pointing_set.data[var] = pointing_set.data[var].expand_dims("epoch", axis=0)
323
335
  # Initial processing for weighted quantities at PSET level
324
336
  # Weight the values by exposure and solid angle
325
337
  # Ensure only valid pointing set pixels contribute to the weighted mean.
@@ -348,6 +360,9 @@ def generate_ultra_healpix_skymap(
348
360
  skymap.data_1d[existing_vars_to_weight] /= skymap.data_1d[
349
361
  "pointing_set_exposure_times_solid_angle"
350
362
  ]
363
+ # Revert any pointing independent variables back to their original dims
364
+ for var in pointing_indep_vars:
365
+ skymap.data_1d[var] = skymap.data_1d[var].squeeze("epoch", drop=True)
351
366
 
352
367
  # Background rates must be scaled by the ratio of the solid angles of the
353
368
  # map pixel / pointing set pixel
@@ -376,7 +391,7 @@ def generate_ultra_healpix_skymap(
376
391
  skymap.data_1d["sensitivity"] * skymap.solid_angle * delta_energy
377
392
  )
378
393
 
379
- skymap.data_1d["ena_intensity_stat_unc"] = (
394
+ skymap.data_1d["ena_intensity_stat_uncert"] = (
380
395
  skymap.data_1d["counts"].astype(float) ** 0.5
381
396
  ) / (
382
397
  skymap.data_1d["exposure_factor"]
@@ -413,7 +428,7 @@ def generate_ultra_healpix_skymap(
413
428
  return skymap, np.array(all_pset_epochs)
414
429
 
415
430
 
416
- def ultra_l2( # noqa: PLR0912
431
+ def ultra_l2(
417
432
  data_dict: dict[str, xr.Dataset | str | Path],
418
433
  output_map_structure: (
419
434
  ena_maps.RectangularSkyMap | ena_maps.HealpixSkyMap
@@ -592,9 +607,11 @@ def ultra_l2( # noqa: PLR0912
592
607
  map_dataset = map_dataset.rename({"energy_bin_geometric_mean": "energy"})
593
608
 
594
609
  # Rename positional uncertainty variables if present
595
- if "scatter_theta" in map_dataset and "scatter_phi" in map_dataset:
596
- map_dataset = map_dataset.rename({"scatter_theta": "positional_uncert_theta"})
597
- map_dataset = map_dataset.rename({"scatter_phi": "positional_uncert_phi"})
610
+ map_dataset = map_dataset.rename({"scatter_theta": "positional_uncert_theta"})
611
+ map_dataset = map_dataset.rename({"scatter_phi": "positional_uncert_phi"})
612
+
613
+ # Rename background rates to be compliant with the l2 map definitions
614
+ map_dataset = map_dataset.rename({"background_rates": "bg_rate"})
598
615
 
599
616
  # Add the defined attributes to the map's global attrs
600
617
  map_dataset.attrs.update(map_attrs)
@@ -613,7 +630,7 @@ def ultra_l2( # noqa: PLR0912
613
630
  # Add systematic error as all zeros with shape matching statistical unc
614
631
  # TODO: update once we have information from the instrument team
615
632
  map_dataset["ena_intensity_sys_err"] = xr.zeros_like(
616
- map_dataset["ena_intensity_stat_unc"],
633
+ map_dataset["ena_intensity_stat_uncert"],
617
634
  )
618
635
 
619
636
  # Add epoch_delta
@@ -635,7 +652,6 @@ def ultra_l2( # noqa: PLR0912
635
652
  energy_delta_plus,
636
653
  dims=(CoordNames.ENERGY_L2.value,),
637
654
  )
638
-
639
655
  # Add variable specific attributes to the map's data_vars and coords
640
656
  for variable in map_dataset.data_vars:
641
657
  # Skip the subdivision depth variables, as these will only be
@@ -643,14 +659,25 @@ def ultra_l2( # noqa: PLR0912
643
659
  if "subdivision_depth" in variable:
644
660
  continue
645
661
 
662
+ # Support variables do not have epoch as the first dimension
663
+ # skip schema check for support variables or choords
664
+ skip_schema_check = not (
665
+ "epoch" not in map_dataset[variable].dims # Support data
666
+ or variable
667
+ in [
668
+ "longitude",
669
+ "latitude",
670
+ "longitude_delta",
671
+ "latitude_delta",
672
+ ] # Coordinate vars
673
+ )
646
674
  # The longitude and latitude variables will be present only in Healpix tiled
647
675
  # map, and, as support_data, should not have schema validation
648
676
  map_dataset[variable].attrs.update(
649
677
  get_variable_attributes_optional_energy_dependence(
650
678
  cdf_attrs=cdf_attrs,
651
679
  variable_array=map_dataset[variable],
652
- check_schema=variable
653
- not in ["longitude", "latitude", "longitude_delta", "latitude_delta"],
680
+ check_schema=skip_schema_check,
654
681
  )
655
682
  )
656
683
  for coord_variable in map_dataset.coords: