imap-processing 0.17.0__py3-none-any.whl → 0.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (89) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/ccsds/excel_to_xtce.py +12 -0
  3. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -6
  4. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +11 -0
  5. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +11 -0
  6. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +24 -0
  7. imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +163 -100
  8. imap_processing/cdf/config/imap_hit_l2_variable_attrs.yaml +4 -4
  9. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +97 -54
  10. imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +119 -36
  11. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +16 -90
  12. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +30 -0
  13. imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +15 -1
  14. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +60 -0
  15. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +91 -11
  16. imap_processing/cli.py +28 -5
  17. imap_processing/codice/codice_l1a.py +36 -48
  18. imap_processing/codice/codice_l1b.py +1 -1
  19. imap_processing/codice/codice_l2.py +0 -9
  20. imap_processing/codice/constants.py +481 -498
  21. imap_processing/hit/l0/decom_hit.py +2 -2
  22. imap_processing/hit/l1a/hit_l1a.py +64 -24
  23. imap_processing/hit/l1b/constants.py +5 -0
  24. imap_processing/hit/l1b/hit_l1b.py +18 -16
  25. imap_processing/hit/l2/constants.py +1 -1
  26. imap_processing/hit/l2/hit_l2.py +4 -5
  27. imap_processing/ialirt/constants.py +21 -0
  28. imap_processing/ialirt/generate_coverage.py +188 -0
  29. imap_processing/ialirt/l0/parse_mag.py +62 -5
  30. imap_processing/ialirt/l0/process_swapi.py +1 -1
  31. imap_processing/ialirt/l0/process_swe.py +23 -7
  32. imap_processing/ialirt/utils/constants.py +22 -16
  33. imap_processing/ialirt/utils/create_xarray.py +42 -19
  34. imap_processing/idex/idex_constants.py +1 -5
  35. imap_processing/idex/idex_l2b.py +246 -67
  36. imap_processing/idex/idex_l2c.py +30 -196
  37. imap_processing/lo/l0/lo_apid.py +1 -0
  38. imap_processing/lo/l1a/lo_l1a.py +44 -0
  39. imap_processing/lo/packet_definitions/lo_xtce.xml +5359 -106
  40. imap_processing/mag/constants.py +1 -0
  41. imap_processing/mag/l1d/__init__.py +0 -0
  42. imap_processing/mag/l1d/mag_l1d.py +133 -0
  43. imap_processing/mag/l1d/mag_l1d_data.py +588 -0
  44. imap_processing/mag/l2/__init__.py +0 -0
  45. imap_processing/mag/l2/mag_l2.py +25 -20
  46. imap_processing/mag/l2/mag_l2_data.py +191 -130
  47. imap_processing/quality_flags.py +20 -2
  48. imap_processing/spice/geometry.py +25 -3
  49. imap_processing/spice/pointing_frame.py +1 -1
  50. imap_processing/spice/spin.py +4 -0
  51. imap_processing/spice/time.py +51 -0
  52. imap_processing/swapi/l2/swapi_l2.py +52 -8
  53. imap_processing/swapi/swapi_utils.py +1 -1
  54. imap_processing/swe/l1b/swe_l1b.py +2 -4
  55. imap_processing/ultra/constants.py +49 -1
  56. imap_processing/ultra/l0/decom_tools.py +15 -8
  57. imap_processing/ultra/l0/decom_ultra.py +35 -11
  58. imap_processing/ultra/l0/ultra_utils.py +97 -5
  59. imap_processing/ultra/l1a/ultra_l1a.py +25 -4
  60. imap_processing/ultra/l1b/cullingmask.py +3 -3
  61. imap_processing/ultra/l1b/de.py +53 -15
  62. imap_processing/ultra/l1b/extendedspin.py +26 -2
  63. imap_processing/ultra/l1b/lookup_utils.py +171 -50
  64. imap_processing/ultra/l1b/quality_flag_filters.py +14 -0
  65. imap_processing/ultra/l1b/ultra_l1b_culling.py +198 -5
  66. imap_processing/ultra/l1b/ultra_l1b_extended.py +304 -66
  67. imap_processing/ultra/l1c/helio_pset.py +54 -7
  68. imap_processing/ultra/l1c/spacecraft_pset.py +9 -1
  69. imap_processing/ultra/l1c/ultra_l1c.py +2 -0
  70. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +106 -109
  71. imap_processing/ultra/utils/ultra_l1_utils.py +13 -1
  72. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/METADATA +2 -2
  73. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/RECORD +76 -83
  74. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_LeftSlit.csv +0 -526
  75. imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_RightSlit.csv +0 -526
  76. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_LeftSlit.csv +0 -526
  77. imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +0 -524
  78. imap_processing/ultra/lookup_tables/EgyNorm.mem.csv +0 -32769
  79. imap_processing/ultra/lookup_tables/FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
  80. imap_processing/ultra/lookup_tables/FM90_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
  81. imap_processing/ultra/lookup_tables/dps_grid45_compressed.cdf +0 -0
  82. imap_processing/ultra/lookup_tables/ultra45_back-pos-luts.csv +0 -4097
  83. imap_processing/ultra/lookup_tables/ultra45_tdc_norm.csv +0 -2050
  84. imap_processing/ultra/lookup_tables/ultra90_back-pos-luts.csv +0 -4097
  85. imap_processing/ultra/lookup_tables/ultra90_tdc_norm.csv +0 -2050
  86. imap_processing/ultra/lookup_tables/yadjust.csv +0 -257
  87. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/LICENSE +0 -0
  88. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/WHEEL +0 -0
  89. {imap_processing-0.17.0.dist-info → imap_processing-0.18.0.dist-info}/entry_points.txt +0 -0
@@ -124,9 +124,9 @@ def parse_count_rates(sci_dataset: xr.Dataset) -> None:
124
124
  for dim in dims:
125
125
  if dim not in sci_dataset.coords:
126
126
  sci_dataset.coords[dim] = xr.DataArray(
127
- np.arange(sci_dataset.sizes[dim], dtype=np.int16)
127
+ np.arange(sci_dataset.sizes[dim], dtype=np.uint16)
128
128
  if dim == "gain"
129
- else np.arange(sci_dataset.sizes[dim], dtype=np.int32),
129
+ else np.arange(sci_dataset.sizes[dim], dtype=np.uint32),
130
130
  dims=[dim],
131
131
  name=dim,
132
132
  )
@@ -245,6 +245,69 @@ def calculate_uncertainties(dataset: xr.Dataset) -> xr.Dataset:
245
245
  return dataset
246
246
 
247
247
 
248
+ def add_cdf_attributes(
249
+ dataset: xr.Dataset, logical_source: str, attr_mgr: ImapCdfAttributes
250
+ ) -> xr.Dataset:
251
+ """
252
+ Add attributes to the dataset.
253
+
254
+ This function adds attributes to the dataset variables and dimensions.
255
+ It also adds dimension labels as coordinates to the dataset.The attributes
256
+ are defined in a YAML file and retrieved by the attribute manager.
257
+
258
+ Parameters
259
+ ----------
260
+ dataset : xarray.Dataset
261
+ The dataset to update.
262
+ logical_source : str
263
+ The logical source of the dataset.
264
+ attr_mgr : ImapCdfAttributes
265
+ The attribute manager to retrieve attributes.
266
+
267
+ Returns
268
+ -------
269
+ xarray.Dataset
270
+ The updated dataset with attributes and dimension labels.
271
+ """
272
+ dataset.attrs = attr_mgr.get_global_attributes(logical_source)
273
+
274
+ # Assign attributes and dimensions to each data array in the Dataset
275
+ for var in dataset.data_vars.keys():
276
+ try:
277
+ if "energy_delta" in var or var in {
278
+ "pkt_len",
279
+ "version",
280
+ "type",
281
+ "src_seq_ctr",
282
+ "seq_flgs",
283
+ "pkt_apid",
284
+ "sec_hdr_flg",
285
+ }:
286
+ # skip schema check to avoid DEPEND_0 being added unnecessarily
287
+ dataset[var].attrs = attr_mgr.get_variable_attributes(
288
+ var, check_schema=False
289
+ )
290
+ else:
291
+ dataset[var].attrs = attr_mgr.get_variable_attributes(var)
292
+ except KeyError:
293
+ logger.warning(f"Field {var} not found in attribute manager.")
294
+
295
+ # check_schema=False to avoid attr_mgr adding stuff dimensions don't need
296
+ for dim in dataset.dims:
297
+ dataset[dim].attrs = attr_mgr.get_variable_attributes(dim, check_schema=False)
298
+ if dim != "epoch":
299
+ label_array = xr.DataArray(
300
+ dataset[dim].values.astype(str),
301
+ name=f"{dim}_label",
302
+ dims=[dim],
303
+ attrs=attr_mgr.get_variable_attributes(
304
+ f"{dim}_label", check_schema=False
305
+ ),
306
+ )
307
+ dataset.coords[f"{dim}_label"] = label_array
308
+ return dataset
309
+
310
+
248
311
  def process_science(
249
312
  dataset: xr.Dataset, attr_mgr: ImapCdfAttributes
250
313
  ) -> list[xr.Dataset]:
@@ -294,30 +357,7 @@ def process_science(
294
357
 
295
358
  # Update attributes and dimensions
296
359
  for logical_source, ds in l1a_datasets.items():
297
- ds.attrs = attr_mgr.get_global_attributes(logical_source)
298
-
299
- # Assign attributes and dimensions to each data array in the Dataset
300
- for field in ds.data_vars.keys():
301
- try:
302
- ds[field].attrs = attr_mgr.get_variable_attributes(field)
303
- except KeyError:
304
- print(f"Field {field} not found in attribute manager.")
305
- logger.warning(f"Field {field} not found in attribute manager.")
306
-
307
- # check_schema=False to avoid attr_mgr adding stuff dimensions don't need
308
- for dim in ds.dims:
309
- ds[dim].attrs = attr_mgr.get_variable_attributes(dim, check_schema=False)
310
- # TODO: should labels be added as coordinates? Check with SPDF
311
- if dim != "epoch":
312
- label_array = xr.DataArray(
313
- ds[dim].values.astype(str),
314
- name=f"{dim}_label",
315
- dims=[dim],
316
- attrs=attr_mgr.get_variable_attributes(
317
- f"{dim}_label", check_schema=False
318
- ),
319
- )
320
- ds.coords[f"{dim}_label"] = label_array
360
+ l1a_datasets[logical_source] = add_cdf_attributes(ds, logical_source, attr_mgr)
321
361
 
322
362
  logger.info(f"HIT L1A dataset created for {logical_source}")
323
363
 
@@ -4,6 +4,11 @@
4
4
  # This is used to calculate the fractional livetime
5
5
  LIVESTIM_PULSES = 270
6
6
 
7
+ # A factor used to find the count rate for sectored data that
8
+ # accounts for the fact that a single spacecraft rotation is
9
+ # split into 15 inclination ranges.
10
+ SECTORS = 15
11
+
7
12
  # Fill values for missing data
8
13
  FILLVAL_FLOAT32 = -1.00e31
9
14
  FILLVAL_INT64 = -9223372036854775808
@@ -17,6 +17,7 @@ from imap_processing.hit.l1b.constants import (
17
17
  FILLVAL_FLOAT32,
18
18
  FILLVAL_INT64,
19
19
  LIVESTIM_PULSES,
20
+ SECTORS,
20
21
  SUMMED_PARTICLE_ENERGY_RANGE_MAPPING,
21
22
  )
22
23
 
@@ -106,16 +107,17 @@ def process_science_data(
106
107
  livetime = livetime.rename("livetime")
107
108
 
108
109
  # Process counts data to L1B datasets
109
- l1b_datasets: dict = {}
110
- l1b_datasets["imap_hit_l1b_standard-rates"] = process_standard_rates_data(
111
- l1a_counts_dataset, livetime
112
- )
113
- l1b_datasets["imap_hit_l1b_summed-rates"] = process_summed_rates_data(
114
- l1a_counts_dataset, livetime
115
- )
116
- l1b_datasets["imap_hit_l1b_sectored-rates"] = process_sectored_rates_data(
117
- l1a_counts_dataset, livetime
118
- )
110
+ l1b_datasets: dict = {
111
+ "imap_hit_l1b_standard-rates": process_standard_rates_data(
112
+ l1a_counts_dataset, livetime
113
+ ),
114
+ "imap_hit_l1b_summed-rates": process_summed_rates_data(
115
+ l1a_counts_dataset, livetime
116
+ ),
117
+ "imap_hit_l1b_sectored-rates": process_sectored_rates_data(
118
+ l1a_counts_dataset, livetime
119
+ ),
120
+ }
119
121
 
120
122
  # Update attributes and dimensions
121
123
  for logical_source, dataset in l1b_datasets.items():
@@ -444,7 +446,10 @@ def process_sectored_rates_data(
444
446
 
445
447
  Sectored counts data is transmitted 10 minutes after they are collected.
446
448
  To calculate rates, the sectored counts over 10 minutes need to be divided by
447
- the sum of livetime values from the previous 10 minutes.
449
+ the sum of livetime values from the previous 10 minutes multiplied by a factor
450
+ 15 to account for the different inclination sectors (a single spacecraft
451
+ rotation is split into 15 inclination ranges). See equation 11 in the algorithm
452
+ document.
448
453
 
449
454
  Parameters
450
455
  ----------
@@ -460,10 +465,7 @@ def process_sectored_rates_data(
460
465
  xr.Dataset
461
466
  The processed L1B sectored rates dataset.
462
467
  """
463
- # TODO
464
- # -filter by epoch values in day being processed.
465
- # middle epoch (or mod 5 value for 6th frame)
466
- # -consider refactoring calculate_rates function to handle sectored rates
468
+ # TODO - consider refactoring calculate_rates function to handle sectored rates
467
469
 
468
470
  # Define particles and coordinates
469
471
  particles = ["h", "he4", "cno", "nemgsi", "fe"]
@@ -520,7 +522,7 @@ def process_sectored_rates_data(
520
522
  rates = xr.DataArray(
521
523
  np.where(
522
524
  counts != FILLVAL_INT64,
523
- (counts / livetime_10min_reshaped).astype(np.float32),
525
+ (counts / (SECTORS * livetime_10min_reshaped)).astype(np.float32),
524
526
  FILLVAL_FLOAT32,
525
527
  ),
526
528
  dims=l1a_counts_dataset[var].dims,
@@ -169,7 +169,7 @@ STANDARD_PARTICLE_ENERGY_RANGE_MAPPING = {
169
169
  {"energy_min": 5.0, "energy_max": 6.0, "R2": [59], "R3": [], "R4": []},
170
170
  {"energy_min": 6.0, "energy_max": 8.0, "R2": [60], "R3": [63], "R4": []},
171
171
  {"energy_min": 8.0, "energy_max": 10.0, "R2": [61], "R3": [64], "R4": []},
172
- {"energy_min": 10.0, "energy_max": 12.0, "R2": [], "R3": [65], "R4": []},
172
+ {"energy_min": 10.0, "energy_max": 12.0, "R2": [62], "R3": [65], "R4": []},
173
173
  {"energy_min": 12.0, "energy_max": 15.0, "R2": [], "R3": [66], "R4": []},
174
174
  {"energy_min": 15.0, "energy_max": 21.0, "R2": [], "R3": [67], "R4": []},
175
175
  {"energy_min": 21.0, "energy_max": 27.0, "R2": [], "R3": [68], "R4": []},
@@ -95,11 +95,11 @@ def add_cdf_attributes(
95
95
 
96
96
  Parameters
97
97
  ----------
98
- dataset : xr.Dataset
98
+ dataset : xarray.Dataset
99
99
  The dataset to update.
100
100
  logical_source : str
101
101
  The logical source of the dataset.
102
- attr_mgr : AttributeManager
102
+ attr_mgr : ImapCdfAttributes
103
103
  The attribute manager to retrieve attributes.
104
104
 
105
105
  Returns
@@ -132,12 +132,11 @@ def add_cdf_attributes(
132
132
  # check_schema=False to avoid attr_mgr adding stuff dimensions don't need
133
133
  for dim in dataset.dims:
134
134
  dataset[dim].attrs = attr_mgr.get_variable_attributes(dim, check_schema=False)
135
- # TODO: should labels be added as coordinates? Check with SPDF
136
135
  if dim != "epoch":
137
136
  label_array = xr.DataArray(
138
137
  dataset[dim].values.astype(str),
139
138
  name=f"{dim}_label",
140
- dims=[f"{dim}_label"],
139
+ dims=[dim],
141
140
  attrs=attr_mgr.get_variable_attributes(
142
141
  f"{dim}_label", check_schema=False
143
142
  ),
@@ -633,7 +632,7 @@ def process_summed_intensity(
633
632
  summed_intensity_dataset = add_total_uncertainties(
634
633
  summed_intensity_dataset, var
635
634
  )
636
- # Expand the variable name to include standard intensity
635
+ # Expand the variable name to include summed intensity
637
636
  summed_intensity_dataset = summed_intensity_dataset.rename(
638
637
  {var: f"{var}_summed_intensity"}
639
638
  )
@@ -1,6 +1,7 @@
1
1
  """Module for constants and useful shared classes used in I-ALiRT processing."""
2
2
 
3
3
  from dataclasses import dataclass
4
+ from typing import NamedTuple
4
5
 
5
6
  import numpy as np
6
7
 
@@ -36,3 +37,23 @@ class IalirtSwapiConstants:
36
37
  az_fov = np.deg2rad(30) # azimuthal width of the field of view, radians
37
38
  fwhm_width = 0.085 # FWHM of energy width
38
39
  speed_ew = 0.5 * fwhm_width # speed width of energy passband
40
+
41
+
42
+ class StationProperties(NamedTuple):
43
+ """Class that represents properties of ground stations."""
44
+
45
+ longitude: float # longitude in degrees
46
+ latitude: float # latitude in degrees
47
+ altitude: float # altitude in kilometers
48
+ min_elevation_deg: float # minimum elevation angle in degrees
49
+
50
+
51
+ # Verified by Kiel Observatory staff.
52
+ STATIONS = {
53
+ "Kiel": StationProperties(
54
+ longitude=10.1808, # degrees East
55
+ latitude=54.2632, # degrees North
56
+ altitude=0.1, # approx 100 meters
57
+ min_elevation_deg=5, # 5 degrees is the requirement
58
+ )
59
+ }
@@ -0,0 +1,188 @@
1
+ """Coverage time for each station."""
2
+
3
+ import logging
4
+
5
+ import numpy as np
6
+
7
+ from imap_processing.ialirt.constants import STATIONS
8
+ from imap_processing.ialirt.process_ephemeris import calculate_azimuth_and_elevation
9
+ from imap_processing.spice.time import et_to_utc, str_to_et
10
+
11
+ # Logger setup
12
+ logger = logging.getLogger(__name__)
13
+
14
+ # TODO: get a list of all potential DSN stations.
15
+ ALL_STATIONS = [*STATIONS.keys(), "DSS-55", "DSS-56", "DSS-74", "DSS-75"]
16
+
17
+
18
+ def generate_coverage(
19
+ start_time: str,
20
+ outages: dict | None = None,
21
+ dsn: dict | None = None,
22
+ ) -> tuple[dict, dict]:
23
+ """
24
+ Build the output dictionary containing coverage and outage time for each station.
25
+
26
+ Parameters
27
+ ----------
28
+ start_time : str
29
+ Start time in UTC.
30
+ outages : dict, optional
31
+ Dictionary of outages for each station.
32
+ dsn : dict, optional
33
+ Dictionary of Deep Space Network (DSN) stations.
34
+
35
+ Returns
36
+ -------
37
+ coverage_dict : dict
38
+ Visibility times per station.
39
+ outage_dict : dict
40
+ Outage times per station.
41
+ """
42
+ duration_seconds = 24 * 60 * 60 # 86400 seconds in 24 hours
43
+ time_step = 3600 # 1 hr in seconds
44
+
45
+ stations = {
46
+ "Kiel": STATIONS["Kiel"],
47
+ }
48
+ coverage_dict = {}
49
+ outage_dict = {}
50
+
51
+ start_et_input = str_to_et(start_time)
52
+ stop_et_input = start_et_input + duration_seconds
53
+
54
+ time_range = np.arange(start_et_input, stop_et_input, time_step)
55
+ total_visible_mask = np.zeros(time_range.shape, dtype=bool)
56
+
57
+ # Precompute DSN outage mask for non-DSN stations
58
+ dsn_outage_mask = np.zeros(time_range.shape, dtype=bool)
59
+ if dsn:
60
+ for dsn_contacts in dsn.values():
61
+ for start, end in dsn_contacts:
62
+ start_et = str_to_et(start)
63
+ end_et = str_to_et(end)
64
+ dsn_outage_mask |= (time_range >= start_et) & (time_range <= end_et)
65
+
66
+ for station_name, (lon, lat, alt, min_elevation) in stations.items():
67
+ azimuth, elevation = calculate_azimuth_and_elevation(lon, lat, alt, time_range)
68
+ visible = elevation > min_elevation
69
+
70
+ outage_mask = np.zeros(time_range.shape, dtype=bool)
71
+ if outages and station_name in outages:
72
+ for start, end in outages[station_name]:
73
+ start_et = str_to_et(start)
74
+ end_et = str_to_et(end)
75
+ outage_mask |= (time_range >= start_et) & (time_range <= end_et)
76
+
77
+ visible[outage_mask] = False
78
+ # DSN contacts block other stations
79
+ visible[dsn_outage_mask] = False
80
+ total_visible_mask |= visible
81
+
82
+ coverage_dict[station_name] = et_to_utc(time_range[visible], format_str="ISOC")
83
+ outage_dict[station_name] = et_to_utc(
84
+ time_range[outage_mask], format_str="ISOC"
85
+ )
86
+
87
+ # --- DSN Stations ---
88
+ if dsn:
89
+ for dsn_station, contacts in dsn.items():
90
+ dsn_visible_mask = np.zeros(time_range.shape, dtype=bool)
91
+ for start, end in contacts:
92
+ start_et = str_to_et(start)
93
+ end_et = str_to_et(end)
94
+ dsn_visible_mask |= (time_range >= start_et) & (time_range <= end_et)
95
+
96
+ # Apply DSN outages if present
97
+ outage_mask = np.zeros(time_range.shape, dtype=bool)
98
+ if outages and dsn_station in outages:
99
+ for start, end in outages[dsn_station]:
100
+ start_et = str_to_et(start)
101
+ end_et = str_to_et(end)
102
+ outage_mask |= (time_range >= start_et) & (time_range <= end_et)
103
+
104
+ dsn_visible_mask[outage_mask] = False
105
+ total_visible_mask |= dsn_visible_mask
106
+
107
+ coverage_dict[f"{dsn_station}"] = et_to_utc(
108
+ time_range[dsn_visible_mask], format_str="ISOC"
109
+ )
110
+ outage_dict[f"{dsn_station}"] = et_to_utc(
111
+ time_range[outage_mask], format_str="ISOC"
112
+ )
113
+
114
+ # Total coverage percentage
115
+ total_coverage_percent = (
116
+ np.count_nonzero(total_visible_mask) / time_range.size
117
+ ) * 100
118
+ coverage_dict["total_coverage_percent"] = total_coverage_percent
119
+
120
+ # Ensure all stations are present in both dicts
121
+ for station in ALL_STATIONS:
122
+ coverage_dict.setdefault(station, np.array([], dtype="<U23"))
123
+ outage_dict.setdefault(station, np.array([], dtype="<U23"))
124
+
125
+ return coverage_dict, outage_dict
126
+
127
+
128
+ def format_coverage_summary(
129
+ coverage_dict: dict, outage_dict: dict, start_time: str
130
+ ) -> dict:
131
+ """
132
+ Build the output dictionary containing coverage time for each station.
133
+
134
+ Parameters
135
+ ----------
136
+ coverage_dict : dict
137
+ Coverage for each station, keyed by station name with arrays of UTC times.
138
+ outage_dict : dict
139
+ Outage times for each station, keyed by station name with arrays of UTC times.
140
+ start_time : str
141
+ Start time in UTC.
142
+
143
+ Returns
144
+ -------
145
+ output_dict : dict
146
+ Formatted coverage summary.
147
+ """
148
+ # Include all known stations,
149
+ # plus any new ones that appear in coverage_dict.
150
+ all_stations = ALL_STATIONS + [
151
+ station
152
+ for station in coverage_dict.keys()
153
+ if station not in ALL_STATIONS and station != "total_coverage_percent"
154
+ ]
155
+
156
+ duration_seconds = 24 * 60 * 60 # 86400 seconds in 24 hours
157
+ time_step = 3600 # 1 hr in seconds
158
+
159
+ start_et_input = str_to_et(start_time)
160
+ stop_et_input = start_et_input + duration_seconds
161
+
162
+ time_range = np.arange(start_et_input, stop_et_input, time_step)
163
+ all_times = et_to_utc(time_range, format_str="ISOC")
164
+
165
+ data_rows = []
166
+ for time in all_times:
167
+ row = {"time": time}
168
+ for station in all_stations:
169
+ visible_times = coverage_dict.get(station, [])
170
+ outage_times = outage_dict.get(station, [])
171
+ if time in outage_times:
172
+ row[station] = "X"
173
+ elif time in visible_times:
174
+ row[station] = "1"
175
+ else:
176
+ row[station] = "0"
177
+ data_rows.append(row)
178
+
179
+ output_dict = {
180
+ "summary": "I-ALiRT Coverage Summary",
181
+ "generated": start_time,
182
+ "time_format": "UTC (ISOC)",
183
+ "stations": all_stations,
184
+ "total_coverage_percent": round(coverage_dict["total_coverage_percent"], 1),
185
+ "data": data_rows,
186
+ }
187
+
188
+ return output_dict
@@ -20,6 +20,8 @@ from imap_processing.mag.l1b.mag_l1b import (
20
20
  calibrate_vector,
21
21
  shift_time,
22
22
  )
23
+ from imap_processing.mag.l1d.mag_l1d_data import MagL1d
24
+ from imap_processing.mag.l2.mag_l2_data import MagL2L1dBase
23
25
  from imap_processing.spice.time import met_to_ttj2000ns, met_to_utc
24
26
 
25
27
  logger = logging.getLogger(__name__)
@@ -286,6 +288,56 @@ def calculate_l1b(
286
288
  return updated_vector_mago, updated_vector_magi, time_data
287
289
 
288
290
 
291
+ def calibrate_and_offset_vectors(
292
+ vectors: np.ndarray,
293
+ range_vals: np.ndarray,
294
+ calibration: np.ndarray,
295
+ offsets: np.ndarray,
296
+ is_magi: bool = False,
297
+ ) -> np.ndarray:
298
+ """
299
+ Apply calibration and offsets to magnetic vectors.
300
+
301
+ Parameters
302
+ ----------
303
+ vectors : np.ndarray
304
+ Raw magnetic vectors, shape (n, 3).
305
+ range_vals : np.ndarray
306
+ Range indices for each vector, shape (n). Values 0–3.
307
+ calibration : np.ndarray
308
+ Calibration matrix, shape (3, 3, 4).
309
+ offsets : np.ndarray
310
+ Offsets array, shape (2, 4, 3) where:
311
+ - index 0 = MAGo, 1 = MAGi
312
+ - second index = range (0–3)
313
+ - third index = axis (x, y, z)
314
+ is_magi : bool, optional
315
+ True if applying to MAGi data, False for MAGo.
316
+
317
+ Returns
318
+ -------
319
+ calibrated_and_offset_vectors : np.ndarray
320
+ Calibrated and offset vectors, shape (n, 3).
321
+ """
322
+ # Append range as 4th column
323
+ vec_plus_range = np.concatenate((vectors, range_vals[:, np.newaxis]), axis=1)
324
+
325
+ # Apply calibration matrix -> (n,4)
326
+ calibrated = MagL2L1dBase.apply_calibration(vec_plus_range, calibration)
327
+
328
+ # Apply offsets per vector
329
+ # vec shape (4)
330
+ # offsets shape (2, 4, 3) where first index is 0 for MAGo and 1 for MAGi
331
+ calibrated = np.array(
332
+ [
333
+ MagL1d.apply_calibration_offset_single_vector(vec, offsets, is_magi=is_magi)
334
+ for vec in calibrated
335
+ ]
336
+ )
337
+
338
+ return calibrated[:, :3]
339
+
340
+
289
341
  def process_packet(
290
342
  accumulated_data: xr.Dataset, calibration_dataset: xr.Dataset
291
343
  ) -> tuple[list[dict], list[dict]]:
@@ -392,11 +444,16 @@ def process_packet(
392
444
  "met": int(met.values.min()),
393
445
  "met_in_utc": met_to_utc(met.values.min()).split(".")[0],
394
446
  "ttj2000ns": int(met_to_ttj2000ns(met.values.min())),
395
- "mag_4s_b_gse": [Decimal("0.0") for _ in range(3)],
396
- "mag_4s_b_gsm": [Decimal("0.0") for _ in range(3)],
397
- "mag_4s_b_rtn": [Decimal("0.0") for _ in range(3)],
398
- "mag_phi_4s_b_gsm": Decimal("0.0"),
399
- "mag_theta_4s_b_gsm": Decimal("0.0"),
447
+ # TODO: Placeholder for mag_epoch
448
+ "mag_epoch": int(met.values.min()),
449
+ "mag_B_GSE": [Decimal("0.0") for _ in range(3)],
450
+ "mag_B_GSM": [Decimal("0.0") for _ in range(3)],
451
+ "mag_B_RTN": [Decimal("0.0") for _ in range(3)],
452
+ "mag_B_magnitude": Decimal("0.0"),
453
+ "mag_phi_B_GSM": Decimal("0.0"),
454
+ "mag_theta_B_GSM": Decimal("0.0"),
455
+ "mag_phi_B_GSE": Decimal("0.0"),
456
+ "mag_theta_B_GSE": Decimal("0.0"),
400
457
  }
401
458
  )
402
459
 
@@ -96,7 +96,7 @@ def optimize_pseudo_parameters(
96
96
  # Read in energy passbands
97
97
  energy_data = pd.read_csv(
98
98
  f"{imap_module_directory}/tests/swapi/lut/imap_swapi_esa-unit"
99
- f"-conversion_20250211_v000.csv"
99
+ f"-conversion_20250626_v001.csv"
100
100
  )
101
101
  energy_passbands = (
102
102
  energy_data["Energy"][0:63]
@@ -542,16 +542,32 @@ def process_swe(accumulated_data: xr.Dataset, in_flight_cal_files: list) -> list
542
542
  summed_first = normalized_first_half.sum(axis=(1, 2))
543
543
  summed_second = normalized_second_half.sum(axis=(1, 2))
544
544
 
545
+ met_first_half = int(
546
+ grouped["met"].where(grouped["swe_seq"] == 0, drop=True).values[0]
547
+ )
548
+ met_second_half = int(
549
+ grouped["met"].where(grouped["swe_seq"] == 30, drop=True).values[0]
550
+ )
551
+
552
+ swe_data.append(
553
+ {
554
+ "apid": 478,
555
+ "met": met_first_half,
556
+ "met_in_utc": met_to_utc(met_first_half).split(".")[0],
557
+ "ttj2000ns": int(met_to_ttj2000ns(met_first_half)),
558
+ "swe_normalized_counts": [int(val) for val in summed_first],
559
+ "swe_counterstreaming_electrons": bde_first_half,
560
+ },
561
+ )
545
562
  swe_data.append(
546
563
  {
547
564
  "apid": 478,
548
- "met": int(grouped["met"].min()),
549
- "met_in_utc": met_to_utc(grouped["met"].min()).split(".")[0],
550
- "ttj2000ns": int(met_to_ttj2000ns(grouped["met"].min())),
551
- "swe_normalized_counts_half_1_esa": [int(val) for val in summed_first],
552
- "swe_normalized_counts_half_2_esa": [int(val) for val in summed_second],
553
- "swe_counterstreaming_electrons": max(bde_first_half, bde_second_half),
554
- }
565
+ "met": met_second_half,
566
+ "met_in_utc": met_to_utc(met_second_half).split(".")[0],
567
+ "ttj2000ns": int(met_to_ttj2000ns(met_second_half)),
568
+ "swe_normalized_counts": [int(val) for val in summed_second],
569
+ "swe_counterstreaming_electrons": bde_second_half,
570
+ },
555
571
  )
556
572
 
557
573
  return swe_data
@@ -2,19 +2,19 @@
2
2
 
3
3
  IALIRT_KEYS = [
4
4
  # H intensities in 15 energy ranges and binned into 4 azimuths and 4 spin angle bins
5
- "codicehi_h",
5
+ "codice_hi_h",
6
6
  # C/O abundance ratio
7
- "codicelo_c_over_o_abundance",
7
+ "codice_lo_c_over_o_abundance",
8
8
  # Mg/O abundance ratio
9
- "codicelo_mg_over_o_abundance",
9
+ "codice_lo_mg_over_o_abundance",
10
10
  # Fe/O abundance ratio
11
- "codicelo_fe_over_o_abundance",
11
+ "codice_lo_fe_over_o_abundance",
12
12
  # C+6/C+5 charge state ratio
13
- "codicelo_c_plus_6_over_c_plus_5_ratio",
13
+ "codice_lo_c_plus_6_over_c_plus_5_ratio",
14
14
  # O+7/O+6 charge state ratio
15
- "codicelo_o_plus_7_over_o_plus_6_ratio",
15
+ "codice_lo_o_plus_7_over_o_plus_6_ratio",
16
16
  # Fe low/Fe high charge state ratio
17
- "codicelo_fe_low_over_fe_high_ratio",
17
+ "codice_lo_fe_low_over_fe_high_ratio",
18
18
  # Low energy (~300 keV) electrons (A-side)
19
19
  "hit_e_a_side_low_en",
20
20
  # Medium energy (~3 MeV) electrons (A-side)
@@ -37,26 +37,32 @@ IALIRT_KEYS = [
37
37
  "hit_he_omni_low_en",
38
38
  # High energy (15 to 70 MeV/nuc) He (Omnidirectional)
39
39
  "hit_he_omni_high_en",
40
+ # MAG instrument epoch
41
+ "mag_epoch",
40
42
  # Magnetic field vector in GSE coordinates
41
- "mag_4s_b_gse",
43
+ "mag_B_GSE",
42
44
  # Magnetic field vector in GSM coordinates
43
- "mag_4s_b_gsm",
45
+ "mag_B_GSM",
44
46
  # Magnetic field vector in RTN coordinates
45
- "mag_4s_b_rtn",
47
+ "mag_B_RTN",
48
+ # Magnitude of the magnetic field vector
49
+ "mag_B_magnitude",
46
50
  # Azimuth angle (φ) of the magnetic field in GSM coordinates
47
- "mag_phi_4s_b_gsm",
51
+ "mag_phi_B_GSM",
48
52
  # Elevation angle (θ) of the magnetic field in GSM coordinates
49
- "mag_theta_4s_b_gsm",
53
+ "mag_theta_B_GSM",
54
+ # Azimuth angle (φ) of the magnetic field in GSE coordinates
55
+ "mag_phi_B_GSE",
56
+ # Elevation angle (θ) of the magnetic field in GSE coordinates
57
+ "mag_theta_B_GSE",
50
58
  # Pseudo density of solar wind protons
51
59
  "swapi_pseudo_proton_density",
52
60
  # Pseudo speed of solar wind protons in solar inertial frame
53
61
  "swapi_pseudo_proton_speed",
54
62
  # Pseudo temperature of solar wind protons in plasma frame
55
63
  "swapi_pseudo_proton_temperature",
56
- # SWE Normalized Counts - Half Cycle 1
57
- "swe_normalized_counts_half_1",
58
- # SWE Normalized Counts - Half Cycle 2
59
- "swe_normalized_counts_half_2",
64
+ # SWE Normalized Counts
65
+ "swe_normalized_counts",
60
66
  # SWE Counterstreaming flag
61
67
  "swe_counterstreaming_electrons",
62
68
  ]