imap-processing 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +13 -1
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +97 -254
  4. imap_processing/cdf/config/imap_codice_l2-hi-omni_variable_attrs.yaml +635 -0
  5. imap_processing/cdf/config/imap_codice_l2-hi-sectored_variable_attrs.yaml +422 -0
  6. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +29 -22
  7. imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +2 -0
  8. imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +12 -2
  9. imap_processing/cdf/config/imap_swapi_variable_attrs.yaml +2 -13
  10. imap_processing/cdf/utils.py +2 -2
  11. imap_processing/cli.py +10 -27
  12. imap_processing/codice/codice_l1a_lo_angular.py +362 -0
  13. imap_processing/codice/codice_l1a_lo_species.py +282 -0
  14. imap_processing/codice/codice_l1b.py +62 -97
  15. imap_processing/codice/codice_l2.py +801 -174
  16. imap_processing/codice/codice_new_l1a.py +64 -0
  17. imap_processing/codice/constants.py +96 -0
  18. imap_processing/codice/utils.py +270 -0
  19. imap_processing/ena_maps/ena_maps.py +157 -95
  20. imap_processing/ena_maps/utils/coordinates.py +5 -0
  21. imap_processing/ena_maps/utils/corrections.py +450 -0
  22. imap_processing/ena_maps/utils/map_utils.py +143 -42
  23. imap_processing/ena_maps/utils/naming.py +3 -1
  24. imap_processing/hi/hi_l1c.py +34 -12
  25. imap_processing/hi/hi_l2.py +82 -44
  26. imap_processing/ialirt/constants.py +7 -1
  27. imap_processing/ialirt/generate_coverage.py +3 -1
  28. imap_processing/ialirt/l0/parse_mag.py +1 -0
  29. imap_processing/ialirt/l0/process_codice.py +66 -0
  30. imap_processing/ialirt/l0/process_hit.py +1 -0
  31. imap_processing/ialirt/l0/process_swapi.py +1 -0
  32. imap_processing/ialirt/l0/process_swe.py +2 -0
  33. imap_processing/ialirt/process_ephemeris.py +6 -2
  34. imap_processing/ialirt/utils/create_xarray.py +4 -2
  35. imap_processing/idex/idex_l2a.py +2 -2
  36. imap_processing/idex/idex_l2b.py +1 -1
  37. imap_processing/lo/l1c/lo_l1c.py +62 -4
  38. imap_processing/lo/l2/lo_l2.py +85 -15
  39. imap_processing/mag/l1a/mag_l1a.py +2 -2
  40. imap_processing/mag/l1a/mag_l1a_data.py +71 -13
  41. imap_processing/mag/l1c/interpolation_methods.py +34 -13
  42. imap_processing/mag/l1c/mag_l1c.py +117 -67
  43. imap_processing/mag/l1d/mag_l1d_data.py +3 -1
  44. imap_processing/quality_flags.py +1 -0
  45. imap_processing/spice/geometry.py +11 -9
  46. imap_processing/spice/pointing_frame.py +77 -50
  47. imap_processing/swapi/constants.py +4 -0
  48. imap_processing/swapi/l1/swapi_l1.py +59 -24
  49. imap_processing/swapi/l2/swapi_l2.py +17 -3
  50. imap_processing/swe/utils/swe_constants.py +7 -7
  51. imap_processing/ultra/l1a/ultra_l1a.py +121 -72
  52. imap_processing/ultra/l1b/de.py +57 -1
  53. imap_processing/ultra/l1b/extendedspin.py +1 -1
  54. imap_processing/ultra/l1b/ultra_l1b_annotated.py +0 -1
  55. imap_processing/ultra/l1b/ultra_l1b_culling.py +2 -2
  56. imap_processing/ultra/l1b/ultra_l1b_extended.py +25 -12
  57. imap_processing/ultra/l1c/helio_pset.py +29 -6
  58. imap_processing/ultra/l1c/l1c_lookup_utils.py +4 -2
  59. imap_processing/ultra/l1c/spacecraft_pset.py +10 -6
  60. imap_processing/ultra/l1c/ultra_l1c.py +6 -6
  61. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +82 -20
  62. imap_processing/ultra/l2/ultra_l2.py +2 -2
  63. imap_processing-1.0.2.dist-info/METADATA +121 -0
  64. {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/RECORD +67 -61
  65. imap_processing-1.0.0.dist-info/METADATA +0 -120
  66. {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/LICENSE +0 -0
  67. {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/WHEEL +0 -0
  68. {imap_processing-1.0.0.dist-info → imap_processing-1.0.2.dist-info}/entry_points.txt +0 -0
@@ -63,17 +63,16 @@ def mag_l1c(
63
63
  )
64
64
 
65
65
  interp_function = InterpolationFunction[configuration.L1C_INTERPOLATION_METHOD]
66
- if normal_mode_dataset and burst_mode_dataset:
67
- full_interpolated_timeline = process_mag_l1c(
68
- normal_mode_dataset, burst_mode_dataset, interp_function
66
+ if burst_mode_dataset is not None:
67
+ # Only use day_to_process if there is no norm data
68
+ day_to_process_arg = day_to_process if normal_mode_dataset is None else None
69
+ full_interpolated_timeline: np.ndarray = process_mag_l1c(
70
+ normal_mode_dataset, burst_mode_dataset, interp_function, day_to_process_arg
69
71
  )
70
72
  elif normal_mode_dataset is not None:
71
- full_interpolated_timeline = fill_normal_data(
72
- normal_mode_dataset, normal_mode_dataset["epoch"].data
73
- )
73
+ full_interpolated_timeline = fill_normal_data(normal_mode_dataset)
74
74
  else:
75
- # TODO: With only burst data, downsample by retrieving the timeline
76
- raise NotImplementedError
75
+ raise ValueError("At least one of norm or burst dataset must be provided.")
77
76
 
78
77
  completed_timeline = remove_missing_data(full_interpolated_timeline)
79
78
 
@@ -127,12 +126,14 @@ def mag_l1c(
127
126
  global_attributes["missing_sequences"] = ""
128
127
 
129
128
  try:
130
- global_attributes["is_mago"] = normal_mode_dataset.attrs["is_mago"]
131
- global_attributes["is_active"] = normal_mode_dataset.attrs["is_active"]
129
+ active_dataset = normal_mode_dataset or burst_mode_dataset
130
+
131
+ global_attributes["is_mago"] = active_dataset.attrs["is_mago"]
132
+ global_attributes["is_active"] = active_dataset.attrs["is_active"]
132
133
 
133
134
  # Check if all vectors are primary in both normal and burst datasets
134
- is_mago = normal_mode_dataset.attrs.get("is_mago", "False") == "True"
135
- normal_all_primary = normal_mode_dataset.attrs.get("all_vectors_primary", False)
135
+ is_mago = active_dataset.attrs.get("is_mago", "False") == "True"
136
+ normal_all_primary = active_dataset.attrs.get("all_vectors_primary", False)
136
137
 
137
138
  # Default for missing burst dataset: 1 if MAGO (expected primary), 0 if MAGI
138
139
  burst_all_primary = is_mago
@@ -146,14 +147,14 @@ def mag_l1c(
146
147
  normal_all_primary and burst_all_primary
147
148
  )
148
149
 
149
- global_attributes["missing_sequences"] = normal_mode_dataset.attrs[
150
+ global_attributes["missing_sequences"] = active_dataset.attrs[
150
151
  "missing_sequences"
151
152
  ]
152
153
  except KeyError as e:
153
154
  logger.info(
154
155
  f"Key error when assigning global attributes, attribute not found in "
155
156
  f"L1B file with logical source "
156
- f"{normal_mode_dataset.attrs['Logical_source']}: {e}"
157
+ f"{active_dataset.attrs['Logical_source']}: {e}"
157
158
  )
158
159
 
159
160
  global_attributes["interpolation_method"] = interp_function.name
@@ -176,16 +177,24 @@ def mag_l1c(
176
177
  attrs=attribute_manager.get_variable_attributes("vector_attrs"),
177
178
  )
178
179
 
179
- output_dataset["vector_magnitude"] = xr.apply_ufunc(
180
- lambda x: np.linalg.norm(x[:4]),
181
- output_dataset["vectors"],
182
- input_core_dims=[["direction"]],
183
- output_core_dims=[[]],
184
- vectorize=True,
185
- )
186
- output_dataset[
187
- "vector_magnitude"
188
- ].attrs = attribute_manager.get_variable_attributes("vector_magnitude_attrs")
180
+ if len(output_dataset["vectors"]) > 0:
181
+ output_dataset["vector_magnitude"] = xr.apply_ufunc(
182
+ lambda x: np.linalg.norm(x[:4]),
183
+ output_dataset["vectors"],
184
+ input_core_dims=[["direction"]],
185
+ output_core_dims=[[]],
186
+ vectorize=True,
187
+ )
188
+ output_dataset[
189
+ "vector_magnitude"
190
+ ].attrs = attribute_manager.get_variable_attributes("vector_magnitude_attrs")
191
+ else:
192
+ output_dataset["vector_magnitude"] = xr.DataArray(
193
+ np.empty((0, 1)),
194
+ name="vector_magnitude",
195
+ dims=["epoch", "vector_magnitude"],
196
+ attrs=attribute_manager.get_variable_attributes("vector_magnitude_attrs"),
197
+ )
189
198
 
190
199
  output_dataset["compression_flags"] = xr.DataArray(
191
200
  completed_timeline[:, 6:8],
@@ -265,7 +274,7 @@ def select_datasets(
265
274
 
266
275
 
267
276
  def process_mag_l1c(
268
- normal_mode_dataset: xr.Dataset,
277
+ normal_mode_dataset: xr.Dataset | None,
269
278
  burst_mode_dataset: xr.Dataset,
270
279
  interpolation_function: InterpolationFunction,
271
280
  day_to_process: np.datetime64 | None = None,
@@ -305,38 +314,51 @@ def process_mag_l1c(
305
314
  np.ndarray
306
315
  An (n, 8) shaped array containing the completed timeline.
307
316
  """
308
- norm_epoch = normal_mode_dataset["epoch"].data
309
- if "vectors_per_second" in normal_mode_dataset.attrs:
310
- normal_vecsec_dict = vectors_per_second_from_string(
311
- normal_mode_dataset.attrs["vectors_per_second"]
312
- )
313
- else:
314
- normal_vecsec_dict = None
315
-
316
- output_dataset = normal_mode_dataset.copy(deep=True)
317
- output_dataset["sample_interpolated"] = xr.DataArray(
318
- np.zeros(len(normal_mode_dataset))
319
- )
320
317
  day_start_ns = None
321
318
  day_end_ns = None
322
319
 
323
320
  if day_to_process is not None:
324
- day_start = day_to_process.astype("datetime64[s]") - np.timedelta64(15, "m")
321
+ day_start = day_to_process.astype("datetime64[s]") - np.timedelta64(30, "m")
325
322
 
326
- # get the end of the day plus 15 minutes
323
+ # get the end of the day plus 30 minutes
327
324
  day_end = (
328
325
  day_to_process.astype("datetime64[s]")
329
326
  + np.timedelta64(1, "D")
330
- + np.timedelta64(15, "m")
327
+ + np.timedelta64(30, "m")
331
328
  )
332
329
 
333
330
  day_start_ns = et_to_ttj2000ns(str_to_et(str(day_start)))
334
331
  day_end_ns = et_to_ttj2000ns(str_to_et(str(day_end)))
335
332
 
336
- gaps = find_all_gaps(norm_epoch, normal_vecsec_dict, day_start_ns, day_end_ns)
333
+ if normal_mode_dataset:
334
+ norm_epoch = normal_mode_dataset["epoch"].data
335
+ if "vectors_per_second" in normal_mode_dataset.attrs:
336
+ normal_vecsec_dict = vectors_per_second_from_string(
337
+ normal_mode_dataset.attrs["vectors_per_second"]
338
+ )
339
+ else:
340
+ normal_vecsec_dict = None
341
+
342
+ gaps = find_all_gaps(norm_epoch, normal_vecsec_dict, day_start_ns, day_end_ns)
343
+ else:
344
+ norm_epoch = [day_start_ns, day_end_ns]
345
+ gaps = np.array(
346
+ [
347
+ [
348
+ day_start_ns,
349
+ day_end_ns,
350
+ VecSec.TWO_VECS_PER_S.value,
351
+ ]
352
+ ]
353
+ )
337
354
 
338
355
  new_timeline = generate_timeline(norm_epoch, gaps)
339
- norm_filled = fill_normal_data(normal_mode_dataset, new_timeline)
356
+
357
+ if normal_mode_dataset:
358
+ norm_filled: np.ndarray = fill_normal_data(normal_mode_dataset, new_timeline)
359
+ else:
360
+ norm_filled = generate_empty_norm_array(new_timeline)
361
+
340
362
  interpolated = interpolate_gaps(
341
363
  burst_mode_dataset, gaps, norm_filled, interpolation_function
342
364
  )
@@ -344,10 +366,32 @@ def process_mag_l1c(
344
366
  return interpolated
345
367
 
346
368
 
369
+ def generate_empty_norm_array(new_timeline: np.ndarray) -> np.ndarray:
370
+ """
371
+ Generate an empty Normal mode array with the new timeline.
372
+
373
+ Parameters
374
+ ----------
375
+ new_timeline : np.ndarray
376
+ A 1D array of timestamps to fill.
377
+
378
+ Returns
379
+ -------
380
+ np.ndarray
381
+ An (n, 8) shaped array containing the timeline filled with `FILLVAL` data.
382
+ """
383
+ # TODO: fill with FILLVAL
384
+ norm_filled: np.ndarray = np.zeros((len(new_timeline), 8))
385
+ norm_filled[:, 0] = new_timeline
386
+ # Flags, will also indicate any missed timestamps
387
+ norm_filled[:, 5] = ModeFlags.MISSING.value
388
+
389
+ return norm_filled
390
+
391
+
347
392
  def fill_normal_data(
348
393
  normal_dataset: xr.Dataset,
349
- new_timeline: np.ndarray,
350
- day_to_process: np.datetime64 | None = None,
394
+ new_timeline: np.ndarray | None = None,
351
395
  ) -> np.ndarray:
352
396
  """
353
397
  Fill the new timeline with the normal mode data.
@@ -358,26 +402,23 @@ def fill_normal_data(
358
402
  ----------
359
403
  normal_dataset : xr.Dataset
360
404
  The normal mode dataset.
361
- new_timeline : np.ndarray
362
- A 1D array of timestamps to fill.
363
- day_to_process : np.datetime64, optional
364
- The day to process, in np.datetime64[D] format. This is used to fill
365
- gaps at the beginning or end of the day if needed. If not included, these
366
- gaps will not be filled.
405
+ new_timeline : np.ndarray, optional
406
+ A 1D array of timestamps to fill. If not provided, the normal mode timestamps
407
+ will be used.
367
408
 
368
409
  Returns
369
410
  -------
370
- np.ndarray
411
+ filled_timeline : np.ndarray
371
412
  An (n, 8) shaped array containing the timeline filled with normal mode data.
372
413
  Gaps are marked as -1 in the generated flag column at index 5.
373
414
  Indices: 0 - epoch, 1-4 - vector x, y, z, and range, 5 - generated flag,
374
415
  6-7 - compression flags.
375
416
  """
376
- # TODO: fill with FILLVAL
377
- filled_timeline: np.ndarray = np.zeros((len(new_timeline), 8))
378
- filled_timeline[:, 0] = new_timeline
379
- # Flags, will also indicate any missed timestamps
380
- filled_timeline[:, 5] = ModeFlags.MISSING.value
417
+ if new_timeline is None:
418
+ new_timeline = normal_dataset["epoch"].data
419
+
420
+ filled_timeline = generate_empty_norm_array(new_timeline)
421
+
381
422
  for index, timestamp in enumerate(normal_dataset["epoch"].data):
382
423
  timeline_index = np.searchsorted(new_timeline, timestamp)
383
424
  filled_timeline[timeline_index, 1:5] = normal_dataset["vectors"].data[index]
@@ -463,20 +504,17 @@ def interpolate_gaps(
463
504
  ]
464
505
 
465
506
  short = (gap_timeline >= burst_epochs[burst_start]) & (
466
- gap_timeline <= burst_epochs[burst_gap_end]
507
+ gap_timeline <= burst_epochs[burst_end]
467
508
  )
468
- if len(gap_timeline) != (short).sum():
469
- print(f"Chopping timeline from {len(gap_timeline)} to {short.sum()}")
509
+ num_short = int(short.sum())
510
+
511
+ if len(gap_timeline) != num_short:
512
+ print(f"Chopping timeline from {len(gap_timeline)} to {num_short}")
470
513
 
471
514
  # Limit timestamps to only include the areas with burst data
472
- gap_timeline = gap_timeline[
473
- (
474
- (gap_timeline >= burst_epochs[burst_start])
475
- & (gap_timeline <= burst_epochs[burst_gap_end])
476
- )
477
- ]
515
+ gap_timeline = gap_timeline[short]
478
516
  # do not include range
479
- gap_fill = interpolation_function(
517
+ adjusted_gap_timeline, gap_fill = interpolation_function(
480
518
  burst_vectors[burst_start:burst_end, :3],
481
519
  burst_epochs[burst_start:burst_end],
482
520
  gap_timeline,
@@ -485,7 +523,7 @@ def interpolate_gaps(
485
523
  )
486
524
 
487
525
  # gaps should not have data in timeline, still check it
488
- for index, timestamp in enumerate(gap_timeline):
526
+ for index, timestamp in enumerate(adjusted_gap_timeline):
489
527
  timeline_index = np.searchsorted(filled_norm_timeline[:, 0], timestamp)
490
528
  if sum(
491
529
  filled_norm_timeline[timeline_index, 1:4]
@@ -500,6 +538,18 @@ def interpolate_gaps(
500
538
  "compression_flags"
501
539
  ].data[burst_gap_start + index]
502
540
 
541
+ # for any timestamp that was not filled and is still missing, remove it
542
+ missing_timeline = np.setdiff1d(gap_timeline, adjusted_gap_timeline)
543
+
544
+ for timestamp in missing_timeline:
545
+ timeline_index = np.searchsorted(filled_norm_timeline[:, 0], timestamp)
546
+ if filled_norm_timeline[timeline_index, 5] != ModeFlags.MISSING.value:
547
+ raise RuntimeError(
548
+ "Self-inconsistent data. "
549
+ "Gaps not included in final timeline should be missing."
550
+ )
551
+ np.delete(filled_norm_timeline, timeline_index)
552
+
503
553
  return filled_norm_timeline
504
554
 
505
555
 
@@ -693,10 +693,12 @@ class MagL1d(MagL2L1dBase): # type: ignore[misc]
693
693
  - gradiometer_offset_magnitude: magnitude of the offset vector
694
694
  - quality_flags: quality flags (1 if magnitude > threshold, 0 otherwise)
695
695
  """
696
- aligned_magi = linear(
696
+ # TODO: should this extrapolate or should non-overlapping data be removed?
697
+ _, aligned_magi = linear(
697
698
  magi_vectors,
698
699
  magi_epoch,
699
700
  mago_epoch,
701
+ extrapolate=True,
700
702
  )
701
703
 
702
704
  diff = aligned_magi - mago_vectors
@@ -64,6 +64,7 @@ class ImapAttitudeUltraFlags(FlagNameMixin):
64
64
  AUXMISMATCH = 2**1 # bit 1 # aux packet does not match Universal Spin Table
65
65
  SPINPHASE = 2**2 # bit 2 # spin phase flagged by Universal Spin Table
66
66
  SPINPERIOD = 2**3 # bit 3 # spin period flagged by Universal Spin Table
67
+ DURINGREPOINT = 2**4 # bit 4 # spin during a repointing
67
68
 
68
69
 
69
70
  class ImapRatesUltraFlags(FlagNameMixin):
@@ -129,7 +129,7 @@ def imap_state(
129
129
  -------
130
130
  state : np.ndarray
131
131
  The Cartesian state vector representing the position and velocity of the
132
- IMAP spacecraft.
132
+ IMAP spacecraft. Units are km and km/s.
133
133
  """
134
134
  state, _ = spiceypy.spkezr(
135
135
  SpiceBody.IMAP.name, et, ref_frame.name, abcorr, observer.name
@@ -323,6 +323,7 @@ def frame_transform_az_el(
323
323
  Ephemeris time(s) corresponding to position(s).
324
324
  az_el : np.ndarray
325
325
  <azimuth, elevation> vector or array of vectors in reference frame `from_frame`.
326
+ Azimuth and elevation pairs are always the final dimension of the array.
326
327
  There are several possible shapes for the input az_el and et:
327
328
  1. A single az_el vector may be provided for multiple `et` query times
328
329
  2. A single `et` may be provided for multiple az_el vectors,
@@ -340,15 +341,16 @@ def frame_transform_az_el(
340
341
  to_frame_az_el : np.ndarray
341
342
  Azimuth/elevation coordinates in reference frame `to_frame`. This
342
343
  output coordinate vector will have shape (2,) if a single `az_el` position
343
- vector and single `et` time are input. Otherwise, it will have shape (n, 2)
344
- where n is the number of input position vector or ephemeris times. The last
345
- axis of the output vector contains azimuth in the 0th position and elevation
346
- in the 1st position.
344
+ vector and single `et` time are input. Otherwise, it will have shape (..., 2)
345
+ where ... matches the leading dimensions of the input position vector or
346
+ ephemeris times. The last axis of the output vector contains azimuth in
347
+ the 0th position and elevation in the 1st position.
347
348
  """
348
349
  # Convert input az/el to Cartesian vectors
349
- spherical_coords_in = np.array(
350
- [np.ones_like(az_el[..., 0]), az_el[..., 0], az_el[..., 1]]
351
- ).T
350
+ spherical_coords_in = np.stack(
351
+ [np.ones_like(az_el[..., 0]), az_el[..., 0], az_el[..., 1]],
352
+ axis=-1,
353
+ )
352
354
  from_frame_cartesian = spherical_to_cartesian(spherical_coords_in)
353
355
  # Transform to to_frame
354
356
  to_frame_cartesian = frame_transform(et, from_frame_cartesian, from_frame, to_frame)
@@ -531,7 +533,7 @@ def cartesian_to_spherical(
531
533
  az = np.degrees(az)
532
534
  el = np.degrees(el)
533
535
 
534
- spherical_coords = np.stack((np.squeeze(magnitude_v), az, el), axis=-1)
536
+ spherical_coords = np.stack((np.squeeze(magnitude_v, -1), az, el), axis=-1)
535
537
 
536
538
  return spherical_coords
537
539
 
@@ -34,14 +34,14 @@ POINTING_SEGMENT_DTYPE = np.dtype(
34
34
  )
35
35
 
36
36
 
37
- def generate_pointing_attitude_kernel(imap_attitude_ck: Path) -> list[Path]:
37
+ def generate_pointing_attitude_kernel(imap_attitude_cks: list[Path]) -> list[Path]:
38
38
  """
39
39
  Generate pointing attitude kernel from input IMAP CK kernel.
40
40
 
41
41
  Parameters
42
42
  ----------
43
- imap_attitude_ck : Path
44
- Location of the IMAP attitude kernel from which to generate pointing
43
+ imap_attitude_cks : list[Path]
44
+ List of the IMAP attitude kernels from which to generate pointing
45
45
  attitude.
46
46
 
47
47
  Returns
@@ -49,20 +49,29 @@ def generate_pointing_attitude_kernel(imap_attitude_ck: Path) -> list[Path]:
49
49
  pointing_kernel_path : list[Path]
50
50
  Location of the new pointing kernels.
51
51
  """
52
- pointing_segments = calculate_pointing_attitude_segments(imap_attitude_ck)
52
+ pointing_segments = calculate_pointing_attitude_segments(imap_attitude_cks)
53
+ if len(pointing_segments) == 0:
54
+ raise ValueError("No Pointings covered by input dependencies.")
55
+
53
56
  # get the start and end yyyy_doy strings
54
- # TODO: For now just use the input CK start/end dates. It is possible that
55
- # the end date is incorrect b/c the repoint table determines the last
56
- # segment in the pointing kernel.
57
- spice_file = SPICEFilePath(imap_attitude_ck.name)
57
+ start_datetime = spiceypy.et2datetime(
58
+ sct_to_et(pointing_segments[0]["start_sclk_ticks"])
59
+ )
60
+ end_datetime = spiceypy.et2datetime(
61
+ sct_to_et(pointing_segments[-1]["end_sclk_ticks"])
62
+ )
63
+ # Use the last ck from sorted list to get the version number. I
64
+ # don't think this will be anything but 1.
65
+ sorted_ck_paths = list(sorted(imap_attitude_cks, key=lambda x: x.name))
66
+ spice_file = SPICEFilePath(sorted_ck_paths[-1].name)
58
67
  pointing_kernel_path = (
59
- imap_attitude_ck.parent / f"imap_dps_"
60
- f"{spice_file.spice_metadata['start_date'].strftime('%Y_%j')}_"
61
- f"{spice_file.spice_metadata['end_date'].strftime('%Y_%j')}_"
68
+ sorted_ck_paths[-1].parent / f"imap_dps_"
69
+ f"{start_datetime.strftime('%Y_%j')}_"
70
+ f"{end_datetime.strftime('%Y_%j')}_"
62
71
  f"{spice_file.spice_metadata['version']}.ah.bc"
63
72
  )
64
73
  write_pointing_frame_ck(
65
- pointing_kernel_path, pointing_segments, imap_attitude_ck.name
74
+ pointing_kernel_path, pointing_segments, [p.name for p in imap_attitude_cks]
66
75
  )
67
76
  return [pointing_kernel_path]
68
77
 
@@ -93,7 +102,7 @@ def open_spice_ck_file(pointing_frame_path: Path) -> Generator[int, None, None]:
93
102
 
94
103
 
95
104
  def write_pointing_frame_ck(
96
- pointing_kernel_path: Path, segment_data: np.ndarray, parent_ck: str
105
+ pointing_kernel_path: Path, segment_data: np.ndarray, parent_cks: list[str]
97
106
  ) -> None:
98
107
  """
99
108
  Write a Pointing Frame attitude kernel.
@@ -108,8 +117,8 @@ def write_pointing_frame_ck(
108
117
  ("end_sclk_ticks", np.float64),
109
118
  ("quaternion", np.float64, (4,)),
110
119
  ("pointing_id", np.uint32),
111
- parent_ck : str
112
- Filename of the CK kernel that the quaternion was derived from.
120
+ parent_cks : list[str]
121
+ Filenames of the CK kernels that the quaternions were derived from.
113
122
  """
114
123
  id_imap_dps = spiceypy.gipool("FRAME_IMAP_DPS", 0, 1)
115
124
 
@@ -119,10 +128,12 @@ def write_pointing_frame_ck(
119
128
  "",
120
129
  f"Original file name: {pointing_kernel_path.name}",
121
130
  f"Creation date: {datetime.now(timezone.utc).strftime('%Y-%m-%d')}",
122
- f"Parent file: {parent_ck}",
131
+ f"Parent files: {parent_cks}",
123
132
  "",
124
133
  ]
125
134
 
135
+ logger.debug(f"Writing pointing attitude kernel: {pointing_kernel_path}")
136
+
126
137
  with open_spice_ck_file(pointing_kernel_path) as handle:
127
138
  # Write the comments to the file
128
139
  spiceypy.dafac(handle, comments)
@@ -161,9 +172,11 @@ def write_pointing_frame_ck(
161
172
  np.array([TICK_DURATION]),
162
173
  )
163
174
 
175
+ logger.debug(f"Finished writing pointing attitude kernel: {pointing_kernel_path}")
176
+
164
177
 
165
178
  def calculate_pointing_attitude_segments(
166
- ck_path: Path,
179
+ ck_paths: list[Path],
167
180
  ) -> NDArray:
168
181
  """
169
182
  Calculate the data for each segment of the DPS_FRAME attitude kernel.
@@ -177,8 +190,8 @@ def calculate_pointing_attitude_segments(
177
190
 
178
191
  Parameters
179
192
  ----------
180
- ck_path : pathlib.Path
181
- Location of the CK kernel.
193
+ ck_paths : list[pathlib.Path]
194
+ List of CK kernels to use to generate the pointing attitude kernel.
182
195
 
183
196
  Returns
184
197
  -------
@@ -200,36 +213,41 @@ def calculate_pointing_attitude_segments(
200
213
  - IMAP historical attitude kernel from which the pointing frame kernel will
201
214
  be generated.
202
215
  """
203
- logger.info(f"Extracting mean spin axes from CK kernel {ck_path.name}")
216
+ logger.info(
217
+ f"Extracting mean spin axes for all Pointings that are"
218
+ f" fully covered by the CK files: {[p.name for p in ck_paths]}"
219
+ )
204
220
  # Get IDs.
205
221
  # https://spiceypy.readthedocs.io/en/main/documentation.html#spiceypy.spiceypy.gipool
206
222
  id_imap_sclk = spiceypy.gipool("CK_-43000_SCLK", 0, 1)
223
+ id_imap_spacecraft = spiceypy.gipool("FRAME_IMAP_SPACECRAFT", 0, 1)
207
224
 
208
- # Check that the last loaded kernel matches it input kernel name. This ensures
209
- # that this CK take priority when computing attitude for it's time coverage.
210
- count = spiceypy.ktotal("ck")
211
- loaded_ck_kernel, _, _, _ = spiceypy.kdata(count - 1, "ck")
212
- if str(ck_path) != loaded_ck_kernel:
213
- raise ValueError(
214
- f"Error: Expected CK kernel {ck_path} but loaded {loaded_ck_kernel}"
225
+ # This job relies on the batch starter to provide all the correct CK kernels
226
+ # to cover the time range of the new repoint table.
227
+ # Get the coverage of the CK files storing the earliest start time and
228
+ # latest end time.
229
+ et_start = np.inf
230
+ et_end = -np.inf
231
+ for ck_path in ck_paths:
232
+ ck_cover = spiceypy.ckcov(
233
+ str(ck_path), int(id_imap_spacecraft), True, "INTERVAL", 0, "TDB"
215
234
  )
235
+ num_intervals = spiceypy.wncard(ck_cover)
236
+ individual_ck_start, _ = spiceypy.wnfetd(ck_cover, 0)
237
+ _, individual_ck_end = spiceypy.wnfetd(ck_cover, num_intervals - 1)
238
+ logger.debug(
239
+ f"{ck_path.name} covers time range: ({et_to_utc(individual_ck_start)}, "
240
+ f"{et_to_utc(individual_ck_end)}) in {num_intervals} intervals."
241
+ )
242
+ et_start = min(et_start, individual_ck_start)
243
+ et_end = max(et_end, individual_ck_end)
216
244
 
217
- id_imap_spacecraft = spiceypy.gipool("FRAME_IMAP_SPACECRAFT", 0, 1)
218
-
219
- # Select only the pointings within the attitude coverage.
220
- ck_cover = spiceypy.ckcov(
221
- str(ck_path), int(id_imap_spacecraft), True, "INTERVAL", 0, "TDB"
222
- )
223
- num_intervals = spiceypy.wncard(ck_cover)
224
- et_start, _ = spiceypy.wnfetd(ck_cover, 0)
225
- _, et_end = spiceypy.wnfetd(ck_cover, num_intervals - 1)
226
245
  logger.info(
227
- f"{ck_path.name} contains {num_intervals} intervals with "
228
- f"start time: {et_to_utc(et_start)}, and end time: {et_to_utc(et_end)}"
246
+ f"CK kernels combined coverage range: "
247
+ f"{(et_to_utc(et_start), et_to_utc(et_end))}, "
229
248
  )
230
249
 
231
- # Get data from the repoint table and filter to only the pointings fully
232
- # covered by this attitude kernel
250
+ # Get data from the repoint table and convert to Pointings
233
251
  repoint_df = get_repoint_data()
234
252
  repoint_df["repoint_start_et"] = sct_to_et(
235
253
  met_to_sclkticks(repoint_df["repoint_start_met"].values)
@@ -237,20 +255,29 @@ def calculate_pointing_attitude_segments(
237
255
  repoint_df["repoint_end_et"] = sct_to_et(
238
256
  met_to_sclkticks(repoint_df["repoint_end_met"].values)
239
257
  )
240
- repoint_df = repoint_df[
241
- (repoint_df["repoint_end_et"] >= et_start)
242
- & (repoint_df["repoint_start_et"] <= et_end)
243
- ]
244
- n_pointings = len(repoint_df) - 1
258
+ pointing_ids = repoint_df["repoint_id"].values[:-1]
259
+ pointing_start_ets = repoint_df["repoint_end_et"].values[:-1]
260
+ pointing_end_ets = repoint_df["repoint_start_et"].values[1:]
261
+
262
+ # Keep only the pointings that are fully covered by the attitude kernels.
263
+ keep_mask = (pointing_start_ets >= et_start) & (pointing_end_ets <= et_end)
264
+ # Filter the pointing data.
265
+ pointing_ids = pointing_ids[keep_mask]
266
+ pointing_start_ets = pointing_start_ets[keep_mask]
267
+ pointing_end_ets = pointing_end_ets[keep_mask]
268
+
269
+ n_pointings = len(pointing_ids)
270
+ if n_pointings == 0:
271
+ logger.warning(
272
+ "No Pointings identified based on coverage of CK files. Skipping."
273
+ )
245
274
 
246
275
  pointing_segments = np.zeros(n_pointings, dtype=POINTING_SEGMENT_DTYPE)
247
276
 
248
277
  for i_pointing in range(n_pointings):
249
- pointing_segments[i_pointing]["pointing_id"] = repoint_df.iloc[i_pointing][
250
- "repoint_id"
251
- ]
252
- pointing_start_et = repoint_df.iloc[i_pointing]["repoint_end_et"]
253
- pointing_end_et = repoint_df.iloc[i_pointing + 1]["repoint_start_et"]
278
+ pointing_segments[i_pointing]["pointing_id"] = pointing_ids[i_pointing]
279
+ pointing_start_et = pointing_start_ets[i_pointing]
280
+ pointing_end_et = pointing_end_ets[i_pointing]
254
281
  logger.debug(
255
282
  f"Calculating pointing attitude for pointing "
256
283
  f"{pointing_segments[i_pointing]['pointing_id']} with time "
@@ -0,0 +1,4 @@
1
+ """Constants for SWAPI processing."""
2
+
3
+ NUM_PACKETS_PER_SWEEP = 12
4
+ NUM_ENERGY_STEPS = 72