imap-processing 0.18.0__py3-none-any.whl → 0.19.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (104) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/ancillary/ancillary_dataset_combiner.py +161 -1
  3. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +301 -274
  4. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +28 -28
  5. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1044 -203
  6. imap_processing/cdf/config/imap_constant_attrs.yaml +4 -2
  7. imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +12 -0
  8. imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +5 -0
  9. imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +10 -4
  10. imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +33 -4
  11. imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +8 -91
  12. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +106 -16
  13. imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +4 -15
  14. imap_processing/cdf/config/imap_lo_l1c_variable_attrs.yaml +189 -98
  15. imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +85 -2
  16. imap_processing/cdf/config/imap_mag_l1c_variable_attrs.yaml +24 -1
  17. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +12 -4
  18. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +50 -7
  19. imap_processing/cli.py +95 -41
  20. imap_processing/codice/codice_l1a.py +131 -31
  21. imap_processing/codice/codice_l2.py +118 -10
  22. imap_processing/codice/constants.py +740 -595
  23. imap_processing/decom.py +1 -4
  24. imap_processing/ena_maps/ena_maps.py +32 -25
  25. imap_processing/ena_maps/utils/naming.py +8 -2
  26. imap_processing/glows/ancillary/imap_glows_exclusions-by-instr-team_20250923_v002.dat +10 -0
  27. imap_processing/glows/ancillary/imap_glows_map-of-excluded-regions_20250923_v002.dat +393 -0
  28. imap_processing/glows/ancillary/imap_glows_map-of-uv-sources_20250923_v002.dat +593 -0
  29. imap_processing/glows/ancillary/imap_glows_pipeline_settings_20250923_v002.json +54 -0
  30. imap_processing/glows/ancillary/imap_glows_suspected-transients_20250923_v002.dat +10 -0
  31. imap_processing/glows/l1b/glows_l1b.py +99 -9
  32. imap_processing/glows/l1b/glows_l1b_data.py +350 -38
  33. imap_processing/glows/l2/glows_l2.py +11 -0
  34. imap_processing/hi/hi_l1a.py +124 -3
  35. imap_processing/hi/hi_l1b.py +154 -71
  36. imap_processing/hi/hi_l2.py +84 -51
  37. imap_processing/hi/utils.py +153 -8
  38. imap_processing/hit/l0/constants.py +3 -0
  39. imap_processing/hit/l0/decom_hit.py +3 -6
  40. imap_processing/hit/l1a/hit_l1a.py +311 -21
  41. imap_processing/hit/l1b/hit_l1b.py +54 -126
  42. imap_processing/hit/l2/hit_l2.py +6 -6
  43. imap_processing/ialirt/calculate_ingest.py +219 -0
  44. imap_processing/ialirt/constants.py +12 -2
  45. imap_processing/ialirt/generate_coverage.py +15 -2
  46. imap_processing/ialirt/l0/ialirt_spice.py +5 -2
  47. imap_processing/ialirt/l0/parse_mag.py +293 -42
  48. imap_processing/ialirt/l0/process_hit.py +5 -3
  49. imap_processing/ialirt/l0/process_swapi.py +41 -25
  50. imap_processing/ialirt/process_ephemeris.py +70 -14
  51. imap_processing/idex/idex_l0.py +2 -2
  52. imap_processing/idex/idex_l1a.py +2 -3
  53. imap_processing/idex/idex_l1b.py +2 -3
  54. imap_processing/idex/idex_l2a.py +130 -4
  55. imap_processing/idex/idex_l2b.py +158 -143
  56. imap_processing/idex/idex_utils.py +1 -3
  57. imap_processing/lo/l0/lo_science.py +25 -24
  58. imap_processing/lo/l1b/lo_l1b.py +3 -3
  59. imap_processing/lo/l1c/lo_l1c.py +116 -50
  60. imap_processing/lo/l2/lo_l2.py +29 -29
  61. imap_processing/lo/lo_ancillary.py +55 -0
  62. imap_processing/mag/l1a/mag_l1a.py +1 -0
  63. imap_processing/mag/l1a/mag_l1a_data.py +26 -0
  64. imap_processing/mag/l1b/mag_l1b.py +3 -2
  65. imap_processing/mag/l1c/interpolation_methods.py +14 -15
  66. imap_processing/mag/l1c/mag_l1c.py +23 -6
  67. imap_processing/mag/l1d/mag_l1d.py +57 -14
  68. imap_processing/mag/l1d/mag_l1d_data.py +167 -30
  69. imap_processing/mag/l2/mag_l2_data.py +10 -2
  70. imap_processing/quality_flags.py +9 -1
  71. imap_processing/spice/geometry.py +76 -33
  72. imap_processing/spice/pointing_frame.py +0 -6
  73. imap_processing/spice/repoint.py +29 -2
  74. imap_processing/spice/spin.py +28 -8
  75. imap_processing/spice/time.py +12 -22
  76. imap_processing/swapi/l1/swapi_l1.py +10 -4
  77. imap_processing/swapi/l2/swapi_l2.py +15 -17
  78. imap_processing/swe/l1b/swe_l1b.py +1 -2
  79. imap_processing/ultra/constants.py +1 -24
  80. imap_processing/ultra/l0/ultra_utils.py +9 -11
  81. imap_processing/ultra/l1a/ultra_l1a.py +1 -2
  82. imap_processing/ultra/l1b/cullingmask.py +6 -3
  83. imap_processing/ultra/l1b/de.py +81 -23
  84. imap_processing/ultra/l1b/extendedspin.py +13 -10
  85. imap_processing/ultra/l1b/lookup_utils.py +281 -28
  86. imap_processing/ultra/l1b/quality_flag_filters.py +10 -1
  87. imap_processing/ultra/l1b/ultra_l1b_culling.py +161 -3
  88. imap_processing/ultra/l1b/ultra_l1b_extended.py +253 -47
  89. imap_processing/ultra/l1c/helio_pset.py +97 -24
  90. imap_processing/ultra/l1c/l1c_lookup_utils.py +256 -0
  91. imap_processing/ultra/l1c/spacecraft_pset.py +83 -16
  92. imap_processing/ultra/l1c/ultra_l1c.py +6 -2
  93. imap_processing/ultra/l1c/ultra_l1c_culling.py +85 -0
  94. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +385 -277
  95. imap_processing/ultra/l2/ultra_l2.py +0 -1
  96. imap_processing/ultra/utils/ultra_l1_utils.py +28 -3
  97. imap_processing/utils.py +3 -4
  98. {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/METADATA +2 -2
  99. {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/RECORD +102 -95
  100. imap_processing/idex/idex_l2c.py +0 -84
  101. imap_processing/spice/kernels.py +0 -187
  102. {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/LICENSE +0 -0
  103. {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/WHEEL +0 -0
  104. {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  """Contains data classes to support Ultra L0 processing."""
2
2
 
3
- from typing import NamedTuple, Union
3
+ from typing import NamedTuple
4
4
 
5
5
 
6
6
  class PacketProperties(NamedTuple):
@@ -9,23 +9,21 @@ class PacketProperties(NamedTuple):
9
9
  apid: list # List of APIDs
10
10
  logical_source: list # List of logical sources
11
11
  addition_to_logical_desc: str # Description of the logical source
12
- width: Union[int, None] # Width of binary data (could be None).
12
+ width: int | None # Width of binary data (could be None).
13
13
  # Block, image_planes, pixel_window_rows, and pixel_window_columns are important for
14
14
  # decompressing the images and a description is available on page 171 of IMAP-Ultra
15
15
  # Flight Software Specification document (7523-9009_Rev_-.pdf).
16
- block: Union[int, None] # Number of values in each block (could be None).
17
- len_array: Union[
18
- int, None
19
- ] # Length of the array to be decompressed (could be None).
20
- mantissa_bit_length: Union[int, None] # used to determine the level of
16
+ block: int | None # Number of values in each block (could be None).
17
+ len_array: int | None # Length of the array to be decompressed (could be None).
18
+ mantissa_bit_length: int | None # used to determine the level of
21
19
  # precision that can be recovered from compressed data (could be None).
22
- image_planes: Union[int, None] = None
20
+ image_planes: int | None = None
23
21
  # number of images. See table 11 in the FSSD.
24
- pixel_window_rows: Union[int, None] = None
22
+ pixel_window_rows: int | None = None
25
23
  # number of rows in each image. See table 49 in the FSSD.
26
- pixel_window_columns: Union[int, None] = None
24
+ pixel_window_columns: int | None = None
27
25
  # number of columns in each image. See table 49 in the FSSD.
28
- image_planes_per_packet: Union[int, None] = None
26
+ image_planes_per_packet: int | None = None
29
27
  # number of image planes in each packet. See table 52 in the FSSD.
30
28
 
31
29
 
@@ -1,7 +1,6 @@
1
1
  """Generate ULTRA L1a CDFs."""
2
2
 
3
3
  import logging
4
- from typing import Optional
5
4
 
6
5
  import xarray as xr
7
6
 
@@ -44,7 +43,7 @@ logger = logging.getLogger(__name__)
44
43
 
45
44
 
46
45
  def ultra_l1a( # noqa: PLR0912
47
- packet_file: str, apid_input: Optional[int] = None
46
+ packet_file: str, apid_input: int | None = None
48
47
  ) -> list[xr.Dataset]:
49
48
  """
50
49
  Will process ULTRA L0 data into L1A CDF files at output_filepath.
@@ -3,7 +3,7 @@
3
3
  import numpy as np
4
4
  import xarray as xr
5
5
 
6
- from imap_processing.ultra.l1b.quality_flag_filters import QUALITY_FLAG_FILTERS
6
+ from imap_processing.ultra.l1b.quality_flag_filters import SPIN_QUALITY_FLAG_FILTERS
7
7
  from imap_processing.ultra.utils.ultra_l1_utils import create_dataset, extract_data_dict
8
8
 
9
9
  FILLVAL_UINT16 = 65535
@@ -32,14 +32,17 @@ def calculate_cullingmask(extendedspin_dataset: xr.Dataset, name: str) -> xr.Dat
32
32
  good_mask = (
33
33
  (
34
34
  extendedspin_dataset["quality_attitude"]
35
- & sum(flag.value for flag in QUALITY_FLAG_FILTERS["quality_attitude"])
35
+ & sum(flag.value for flag in SPIN_QUALITY_FLAG_FILTERS["quality_attitude"])
36
36
  )
37
37
  == 0
38
38
  ) & (
39
39
  (
40
40
  (
41
41
  extendedspin_dataset["quality_ena_rates"]
42
- & sum(flag.value for flag in QUALITY_FLAG_FILTERS["quality_ena_rates"])
42
+ & sum(
43
+ flag.value
44
+ for flag in SPIN_QUALITY_FLAG_FILTERS["quality_ena_rates"]
45
+ )
43
46
  )
44
47
  == 0
45
48
  ).all(dim="energy_bin_geometric_mean")
@@ -4,12 +4,16 @@ import numpy as np
4
4
  import xarray as xr
5
5
 
6
6
  from imap_processing.cdf.utils import parse_filename_like
7
- from imap_processing.quality_flags import ImapDEUltraFlags
7
+ from imap_processing.quality_flags import (
8
+ ImapDEOutliersUltraFlags,
9
+ ImapDEScatteringUltraFlags,
10
+ )
8
11
  from imap_processing.spice.geometry import SpiceFrame
9
12
  from imap_processing.ultra.l1b.lookup_utils import get_geometric_factor
10
13
  from imap_processing.ultra.l1b.ultra_l1b_annotated import (
11
14
  get_annotated_particle_velocity,
12
15
  )
16
+ from imap_processing.ultra.l1b.ultra_l1b_culling import flag_scattering
13
17
  from imap_processing.ultra.l1b.ultra_l1b_extended import (
14
18
  StopType,
15
19
  determine_ebin_pulse_height,
@@ -32,6 +36,8 @@ from imap_processing.ultra.l1b.ultra_l1b_extended import (
32
36
  get_spin_number,
33
37
  get_ssd_back_position_and_tof_offset,
34
38
  get_ssd_tof,
39
+ is_back_tof_valid,
40
+ is_coin_ph_valid,
35
41
  )
36
42
  from imap_processing.ultra.utils.ultra_l1_utils import create_dataset
37
43
 
@@ -88,7 +94,10 @@ def calculate_de(
88
94
  ]
89
95
 
90
96
  de_dict.update(
91
- {key: de_dataset[dataset_key] for key, dataset_key in zip(keys, dataset_keys)}
97
+ {
98
+ key: de_dataset[dataset_key]
99
+ for key, dataset_key in zip(keys, dataset_keys, strict=False)
100
+ }
92
101
  )
93
102
 
94
103
  valid_mask = de_dataset["start_type"].data != FILLVAL_UINT8
@@ -114,6 +123,7 @@ def calculate_de(
114
123
  tof = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float32)
115
124
  etof = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float32)
116
125
  ctof = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float32)
126
+ tof_energy = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float32)
117
127
  magnitude_v = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float32)
118
128
  energy = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float32)
119
129
  e_bin = np.full(len(de_dataset["epoch"]), FILLVAL_UINT8, dtype=np.uint8)
@@ -125,10 +135,18 @@ def calculate_de(
125
135
  sc_dps_velocity = np.full(shape, FILLVAL_FLOAT32, dtype=np.float32)
126
136
  helio_velocity = np.full(shape, FILLVAL_FLOAT32, dtype=np.float32)
127
137
  spin_starts = np.full(len(de_dataset["epoch"]), FILLVAL_FLOAT32, dtype=np.float64)
138
+ velocities = np.full(shape, FILLVAL_FLOAT32, dtype=np.float32)
139
+ v_hat = np.full(shape, FILLVAL_FLOAT32, dtype=np.float32)
140
+ r_hat = np.full(shape, FILLVAL_FLOAT32, dtype=np.float32)
128
141
 
129
142
  start_type = np.full(len(de_dataset["epoch"]), FILLVAL_UINT8, dtype=np.uint8)
130
143
  quality_flags = np.full(
131
- de_dataset["epoch"].shape, ImapDEUltraFlags.NONE.value, dtype=np.uint16
144
+ de_dataset["epoch"].shape, ImapDEOutliersUltraFlags.NONE.value, dtype=np.uint16
145
+ )
146
+ scattering_quality_flags = np.full(
147
+ de_dataset["epoch"].shape,
148
+ ImapDEScatteringUltraFlags.NONE.value,
149
+ dtype=np.uint16,
132
150
  )
133
151
 
134
152
  xf[valid_indices] = get_front_x_position(
@@ -149,9 +167,13 @@ def calculate_de(
149
167
  )
150
168
 
151
169
  # Pulse height
152
- tof[ph_indices], t2[ph_indices], xb[ph_indices], yb[ph_indices] = (
153
- get_ph_tof_and_back_positions(de_dataset, xf, f"ultra{sensor}", ancillary_files)
170
+ ph_result = get_ph_tof_and_back_positions(
171
+ de_dataset, xf, f"ultra{sensor}", ancillary_files
154
172
  )
173
+ tof[ph_indices] = ph_result.tof
174
+ t2[ph_indices] = ph_result.t2
175
+ xb[ph_indices] = ph_result.xb
176
+ yb[ph_indices] = ph_result.yb
155
177
  d[ph_indices], yf[ph_indices] = get_front_y_position(
156
178
  de_dataset["start_type"].data[ph_indices], yb[ph_indices], ancillary_files
157
179
  )
@@ -174,9 +196,6 @@ def calculate_de(
174
196
  (xb[ph_indices], yb[ph_indices]),
175
197
  d[ph_indices],
176
198
  )
177
- e_bin[ph_indices] = determine_ebin_pulse_height(
178
- energy[ph_indices], tof[ph_indices], r[ph_indices]
179
- )
180
199
  species_bin[ph_indices] = determine_species(tof[ph_indices], r[ph_indices], "PH")
181
200
  etof[ph_indices], xc[ph_indices] = get_coincidence_positions(
182
201
  de_dataset.isel(epoch=ph_indices),
@@ -184,6 +203,27 @@ def calculate_de(
184
203
  f"ultra{sensor}",
185
204
  ancillary_files,
186
205
  )
206
+ backtofvalid = is_back_tof_valid(
207
+ de_dataset,
208
+ xf,
209
+ f"ultra{sensor}",
210
+ ancillary_files,
211
+ )
212
+ coinphvalid = is_coin_ph_valid(
213
+ etof[ph_indices],
214
+ xc[ph_indices],
215
+ xb[ph_indices],
216
+ f"ultra{sensor}",
217
+ ancillary_files,
218
+ )
219
+ e_bin[ph_indices] = determine_ebin_pulse_height(
220
+ energy[ph_indices],
221
+ tof[ph_indices],
222
+ r[ph_indices],
223
+ backtofvalid,
224
+ coinphvalid,
225
+ ancillary_files,
226
+ )
187
227
  ctof[ph_indices], magnitude_v[ph_indices] = get_ctof(
188
228
  tof[ph_indices], r[ph_indices], "PH"
189
229
  )
@@ -211,7 +251,11 @@ def calculate_de(
211
251
  d[ssd_indices],
212
252
  )
213
253
  e_bin[ssd_indices] = determine_ebin_ssd(
214
- energy[ssd_indices], tof[ssd_indices], r[ssd_indices]
254
+ energy[ssd_indices],
255
+ tof[ssd_indices],
256
+ r[ssd_indices],
257
+ f"ultra{sensor}",
258
+ ancillary_files,
215
259
  )
216
260
  species_bin[ssd_indices] = determine_species(
217
261
  tof[ssd_indices], r[ssd_indices], "SSD"
@@ -237,17 +281,22 @@ def calculate_de(
237
281
  de_dict["phi"] = phi
238
282
  de_dict["theta"] = theta
239
283
 
240
- v, vhat, r = get_de_velocity(
241
- (de_dict["x_front"], de_dict["y_front"]),
242
- (de_dict["x_back"], de_dict["y_back"]),
243
- de_dict["front_back_distance"],
244
- de_dict["tof_start_stop"],
284
+ velocities[valid_indices], v_hat[valid_indices], r_hat[valid_indices] = (
285
+ get_de_velocity(
286
+ (de_dict["x_front"][valid_indices], de_dict["y_front"][valid_indices]),
287
+ (de_dict["x_back"][valid_indices], de_dict["y_back"][valid_indices]),
288
+ de_dict["front_back_distance"][valid_indices],
289
+ de_dict["tof_start_stop"][valid_indices],
290
+ )
245
291
  )
246
- de_dict["direct_event_velocity"] = v.astype(np.float32)
247
- de_dict["direct_event_unit_velocity"] = vhat.astype(np.float32)
248
- de_dict["direct_event_unit_position"] = r.astype(np.float32)
292
+ de_dict["direct_event_velocity"] = velocities.astype(np.float32)
293
+ de_dict["direct_event_unit_velocity"] = v_hat.astype(np.float32)
294
+ de_dict["direct_event_unit_position"] = r_hat.astype(np.float32)
249
295
 
250
- de_dict["tof_energy"] = get_de_energy_kev(v, species_bin)
296
+ tof_energy[valid_indices] = get_de_energy_kev(
297
+ velocities[valid_indices], species_bin[valid_indices]
298
+ )
299
+ de_dict["tof_energy"] = tof_energy
251
300
  de_dict["energy"] = energy
252
301
  de_dict["ebin"] = e_bin
253
302
  de_dict["species"] = species_bin
@@ -289,20 +338,29 @@ def calculate_de(
289
338
  de_dict["tof_energy"], de_dict["phi"], de_dict["theta"], ancillary_files
290
339
  )
291
340
  de_dict["geometric_factor_blades"] = get_geometric_factor(
292
- ancillary_files,
293
- "l1b-sensor-gf-blades",
294
341
  de_dict["phi"],
295
342
  de_dict["theta"],
296
343
  quality_flags,
344
+ ancillary_files,
345
+ "l1b-sensor-gf-blades",
297
346
  )
298
347
  de_dict["geometric_factor_noblades"] = get_geometric_factor(
299
- ancillary_files,
300
- "l1b-sensor-gf-noblades",
301
348
  de_dict["phi"],
302
349
  de_dict["theta"],
303
350
  quality_flags,
351
+ ancillary_files,
352
+ "l1b-sensor-gf-noblades",
353
+ )
354
+ de_dict["quality_outliers"] = quality_flags
355
+ flag_scattering(
356
+ de_dict["tof_energy"],
357
+ de_dict["theta"],
358
+ de_dict["phi"],
359
+ ancillary_files,
360
+ sensor,
361
+ scattering_quality_flags,
304
362
  )
305
- de_dict["quality_fov"] = quality_flags
363
+ de_dict["quality_scattering"] = scattering_quality_flags
306
364
 
307
365
  dataset = create_dataset(de_dict, name, "l1b")
308
366
 
@@ -4,6 +4,7 @@ import numpy as np
4
4
  import xarray as xr
5
5
 
6
6
  from imap_processing.ultra.l1b.ultra_l1b_culling import (
7
+ count_rejected_events_per_spin,
7
8
  flag_attitude,
8
9
  flag_hk,
9
10
  flag_imap_instruments,
@@ -64,8 +65,15 @@ def calculate_extendedspin(
64
65
  first_epochs = filtered_dataset["epoch"].values[first_indices]
65
66
 
66
67
  # Get the number of pulses per spin.
67
- start_per_spin, stop_per_spin, coin_per_spin = get_pulses_per_spin(rates_dataset)
68
+ pulses = get_pulses_per_spin(rates_dataset)
68
69
 
70
+ # Track rejected events in each spin based on
71
+ # quality flags in de l1b data.
72
+ rejected_counts = count_rejected_events_per_spin(
73
+ de_dataset["spin"].values,
74
+ de_dataset["quality_scattering"].values,
75
+ de_dataset["quality_outliers"].values,
76
+ )
69
77
  # These will be the coordinates.
70
78
  extendedspin_dict["epoch"] = first_epochs
71
79
  extendedspin_dict["spin_number"] = spin
@@ -76,15 +84,10 @@ def calculate_extendedspin(
76
84
  extendedspin_dict["spin_start_time"] = spin_starttime
77
85
  extendedspin_dict["spin_period"] = spin_period
78
86
  extendedspin_dict["spin_rate"] = spin_rates
79
- extendedspin_dict["start_pulses_per_spin"] = start_per_spin
80
- extendedspin_dict["stop_pulses_per_spin"] = stop_per_spin
81
- extendedspin_dict["coin_pulses_per_spin"] = coin_per_spin
82
- # TODO: this will be used to track rejected events in each
83
- # spin based on quality flags in de l1b data.
84
- extendedspin_dict["rejected_events_per_spin"] = np.full_like(
85
- spin, FILLVAL_UINT16, dtype=np.uint16
86
- )
87
-
87
+ extendedspin_dict["start_pulses_per_spin"] = pulses.start_per_spin
88
+ extendedspin_dict["stop_pulses_per_spin"] = pulses.stop_per_spin
89
+ extendedspin_dict["coin_pulses_per_spin"] = pulses.coin_per_spin
90
+ extendedspin_dict["rejected_events_per_spin"] = rejected_counts
88
91
  extendedspin_dict["quality_attitude"] = attitude_qf
89
92
  extendedspin_dict["quality_ena_rates"] = rates_qf
90
93
  extendedspin_dict["quality_hk"] = hk_qf
@@ -6,7 +6,7 @@ import pandas as pd
6
6
  import xarray as xr
7
7
  from numpy.typing import NDArray
8
8
 
9
- from imap_processing.quality_flags import ImapDEUltraFlags
9
+ from imap_processing.quality_flags import ImapDEOutliersUltraFlags
10
10
 
11
11
 
12
12
  def get_y_adjust(dy_lut: np.ndarray, ancillary_files: dict) -> npt.NDArray:
@@ -232,15 +232,12 @@ def get_energy_efficiencies(ancillary_files: dict) -> pd.DataFrame:
232
232
  return lookup_table
233
233
 
234
234
 
235
- def get_geometric_factor(
235
+ def load_geometric_factor_tables(
236
236
  ancillary_files: dict,
237
237
  filename: str,
238
- phi: NDArray,
239
- theta: NDArray,
240
- quality_flag: NDArray,
241
- ) -> tuple[NDArray, NDArray]:
238
+ ) -> dict:
242
239
  """
243
- Lookup table for geometric factor using nearest neighbor.
240
+ Lookup tables for geometric factor.
244
241
 
245
242
  Parameters
246
243
  ----------
@@ -248,17 +245,11 @@ def get_geometric_factor(
248
245
  Ancillary files.
249
246
  filename : str
250
247
  Name of the file in ancillary_files to use.
251
- phi : NDArray
252
- Azimuth angles in degrees.
253
- theta : NDArray
254
- Elevation angles in degrees.
255
- quality_flag : NDArray
256
- Quality flag to set when geometric factor is zero.
257
248
 
258
249
  Returns
259
250
  -------
260
- geometric_factor : NDArray
261
- Geometric factor.
251
+ geometric_factor_tables : dict
252
+ Geometric factor lookup tables.
262
253
  """
263
254
  gf_table = pd.read_csv(
264
255
  ancillary_files[filename], header=None, skiprows=6, nrows=301
@@ -270,30 +261,207 @@ def get_geometric_factor(
270
261
  ancillary_files[filename], header=None, skiprows=610, nrows=301
271
262
  ).to_numpy(dtype=float)
272
263
 
264
+ return {
265
+ "gf_table": gf_table,
266
+ "theta_table": theta_table,
267
+ "phi_table": phi_table,
268
+ }
269
+
270
+
271
+ def get_geometric_factor(
272
+ phi: NDArray,
273
+ theta: NDArray,
274
+ quality_flag: NDArray,
275
+ ancillary_files: dict | None = None,
276
+ filename: str | None = None,
277
+ geometric_factor_tables: dict | None = None,
278
+ ) -> tuple[NDArray, NDArray]:
279
+ """
280
+ Lookup table for geometric factor using nearest neighbor.
281
+
282
+ Parameters
283
+ ----------
284
+ phi : NDArray
285
+ Azimuth angles in degrees.
286
+ theta : NDArray
287
+ Elevation angles in degrees.
288
+ quality_flag : NDArray
289
+ Quality flag to set when geometric factor is zero.
290
+ ancillary_files : dict[Path], optional
291
+ Ancillary files.
292
+ filename : str, optional
293
+ Name of the file in ancillary_files to use.
294
+ geometric_factor_tables : dict, optional
295
+ Preloaded geometric factor lookup tables. If not provided, will load.
296
+
297
+ Returns
298
+ -------
299
+ geometric_factor : NDArray
300
+ Geometric factor.
301
+ """
302
+ if geometric_factor_tables is None:
303
+ if ancillary_files is None or filename is None:
304
+ raise ValueError(
305
+ "ancillary_files and filename must be provided if "
306
+ "geometric_factor_tables is not supplied."
307
+ )
308
+ geometric_factor_tables = load_geometric_factor_tables(
309
+ ancillary_files, filename
310
+ )
273
311
  # Assume uniform grids: extract 1D arrays from first row/col
274
- theta_vals = theta_table[0, :] # columns represent theta
275
- phi_vals = phi_table[:, 0] # rows represent phi
312
+ theta_vals = geometric_factor_tables["theta_table"][0, :] # columns represent theta
313
+ phi_vals = geometric_factor_tables["phi_table"][:, 0] # rows represent phi
276
314
 
277
315
  # Find nearest index in table for each input value
278
316
  phi_idx = np.abs(phi_vals[:, None] - phi).argmin(axis=0)
279
317
  theta_idx = np.abs(theta_vals[:, None] - theta).argmin(axis=0)
280
318
 
281
319
  # Fetch geometric factor values at nearest (phi, theta) pairs
282
- geometric_factor = gf_table[phi_idx, theta_idx]
283
-
284
- phi_rad = np.deg2rad(phi)
285
- numerator = 5.0 * np.cos(phi_rad)
286
- denominator = 1 + 2.80 * np.cos(phi_rad)
287
- # Equation 19 in the Ultra Algorithm Document.
288
- theta_nom = np.arctan(numerator / denominator)
289
- theta_nom = np.rad2deg(theta_nom)
320
+ geometric_factor = geometric_factor_tables["gf_table"][phi_idx, theta_idx]
290
321
 
291
- outside_fov = np.abs(theta) > theta_nom
292
- quality_flag[outside_fov] |= ImapDEUltraFlags.FOV.value
322
+ outside_fov = ~is_inside_fov(np.deg2rad(phi), np.deg2rad(theta))
323
+ quality_flag[outside_fov] |= ImapDEOutliersUltraFlags.FOV.value
293
324
 
294
325
  return geometric_factor
295
326
 
296
327
 
328
+ def load_scattering_lookup_tables(ancillary_files: dict, instrument_id: int) -> dict:
329
+ """
330
+ Load scattering coefficient lookup tables for the specified instrument.
331
+
332
+ Parameters
333
+ ----------
334
+ ancillary_files : dict
335
+ Ancillary files.
336
+ instrument_id : int
337
+ Instrument ID, either 45 or 90.
338
+
339
+ Returns
340
+ -------
341
+ dict
342
+ Dictionary containing arrays for theta_grid, phi_grid, a_theta, g_theta,
343
+ a_phi, g_phi.
344
+ """
345
+ # TODO remove the line below when the 45 sensor scattering coefficients are
346
+ # delivered.
347
+ instrument_id = 90
348
+ descriptor = f"l1b-{instrument_id}sensor-scattering-calibration"
349
+ theta_grid = pd.read_csv(
350
+ ancillary_files[descriptor], header=None, skiprows=7, nrows=241
351
+ ).to_numpy(dtype=float)
352
+ phi_grid = pd.read_csv(
353
+ ancillary_files[descriptor], header=None, skiprows=249, nrows=241
354
+ ).to_numpy(dtype=float)
355
+ a_theta = pd.read_csv(
356
+ ancillary_files[descriptor], header=None, skiprows=491, nrows=241
357
+ ).to_numpy(dtype=float)
358
+ g_theta = pd.read_csv(
359
+ ancillary_files[descriptor], header=None, skiprows=733, nrows=241
360
+ ).to_numpy(dtype=float)
361
+ a_phi = pd.read_csv(
362
+ ancillary_files[descriptor], header=None, skiprows=975, nrows=241
363
+ ).to_numpy(dtype=float)
364
+ g_phi = pd.read_csv(
365
+ ancillary_files[descriptor], header=None, skiprows=1217, nrows=241
366
+ ).to_numpy(dtype=float)
367
+ return {
368
+ "theta_grid": theta_grid,
369
+ "phi_grid": phi_grid,
370
+ "a_theta": a_theta,
371
+ "g_theta": g_theta,
372
+ "a_phi": a_phi,
373
+ "g_phi": g_phi,
374
+ }
375
+
376
+
377
+ def get_scattering_coefficients(
378
+ theta: NDArray,
379
+ phi: NDArray,
380
+ lookup_tables: dict | None = None,
381
+ ancillary_files: dict | None = None,
382
+ instrument_id: int | None = None,
383
+ ) -> tuple[NDArray, NDArray]:
384
+ """
385
+ Get a and g coefficients for theta and phi to compute scattering FWHM.
386
+
387
+ Parameters
388
+ ----------
389
+ theta : NDArray
390
+ Elevation angles in degrees.
391
+ phi : NDArray
392
+ Azimuth angles in degrees.
393
+ lookup_tables : dict, optional
394
+ Preloaded lookup tables. If not provided, will load using ancillary_files and
395
+ instrument_id.
396
+ ancillary_files : dict, optional
397
+ Ancillary files, required if lookup_tables is not provided.
398
+ instrument_id : int, optional
399
+ Instrument ID, required if lookup_tables is not provided.
400
+
401
+ Returns
402
+ -------
403
+ tuple
404
+ Scattering a and g values corresponding to the given theta and phi values.
405
+ """
406
+ if lookup_tables is None:
407
+ if ancillary_files is None or instrument_id is None:
408
+ raise ValueError(
409
+ "ancillary_files and instrument_id must be provided if lookup_tables "
410
+ "is not supplied."
411
+ )
412
+ lookup_tables = load_scattering_lookup_tables(ancillary_files, instrument_id)
413
+
414
+ theta_grid = lookup_tables["theta_grid"]
415
+ phi_grid = lookup_tables["phi_grid"]
416
+ a_theta = lookup_tables["a_theta"]
417
+ g_theta = lookup_tables["g_theta"]
418
+ a_phi = lookup_tables["a_phi"]
419
+ g_phi = lookup_tables["g_phi"]
420
+
421
+ theta_vals = theta_grid[0, :] # columns represent theta
422
+ phi_vals = phi_grid[:, 0] # rows represent phi
423
+
424
+ phi_idx = np.abs(phi_vals[:, None] - phi).argmin(axis=0)
425
+ theta_idx = np.abs(theta_vals[:, None] - theta).argmin(axis=0)
426
+
427
+ a_theta_val = a_theta[phi_idx, theta_idx]
428
+ g_theta_val = g_theta[phi_idx, theta_idx]
429
+ a_phi_val = a_phi[phi_idx, theta_idx]
430
+ g_phi_val = g_phi[phi_idx, theta_idx]
431
+
432
+ return np.column_stack([a_theta_val, g_theta_val]), np.column_stack(
433
+ [a_phi_val, g_phi_val]
434
+ )
435
+
436
+
437
+ def is_inside_fov(phi: np.ndarray, theta: np.ndarray) -> np.ndarray:
438
+ """
439
+ Determine angles in the field of view (FOV).
440
+
441
+ This function is used in the deadtime correction to determine whether a given
442
+ (theta, phi) angle is within the instrument's Field of View (FOV).
443
+ Only pixels inside the FOV are considered for time accumulation. The FOV boundary
444
+ is defined by equation 19 in the Ultra Algorithm Document.
445
+
446
+ Parameters
447
+ ----------
448
+ phi : np.ndarray
449
+ Azimuth angles in radians.
450
+ theta : np.ndarray
451
+ Elevation angles in radians.
452
+
453
+ Returns
454
+ -------
455
+ numpy.ndarray
456
+ Boolean array indicating if the angle is in the FOV, False otherwise.
457
+ """
458
+ numerator = 5.0 * np.cos(phi)
459
+ denominator = 1 + 2.80 * np.cos(phi)
460
+ # Equation 19 in the Ultra Algorithm Document.
461
+ theta_nom = np.arctan(numerator / denominator)
462
+ return np.abs(theta) <= theta_nom
463
+
464
+
297
465
  def get_ph_corrected(
298
466
  sensor: str,
299
467
  location: str,
@@ -343,8 +511,93 @@ def get_ph_corrected(
343
511
 
344
512
  # Flag where clamping occurred
345
513
  flagged_mask = (xlut != xlut_clamped) | (ylut != ylut_clamped)
346
- quality_flag[flagged_mask] |= ImapDEUltraFlags.PHCORR.value
514
+ quality_flag[flagged_mask] |= ImapDEOutliersUltraFlags.PHCORR.value
347
515
 
348
516
  ph_correction = ph_correct_array[xlut_clamped, ylut_clamped]
349
517
 
350
518
  return ph_correction, quality_flag
519
+
520
+
521
+ def get_ebins(
522
+ lut: str,
523
+ energy: NDArray,
524
+ ctof: NDArray,
525
+ ebins: NDArray,
526
+ ancillary_files: dict,
527
+ ) -> NDArray:
528
+ """
529
+ Get energy bins from the lookup table.
530
+
531
+ Parameters
532
+ ----------
533
+ lut : str
534
+ Lookup table name, e.g., "l1b-tofxpht".
535
+ energy : NDArray
536
+ Energy from the event (keV).
537
+ ctof : NDArray
538
+ Corrected TOF (tenths of a ns).
539
+ ebins : NDArray
540
+ Energy bins to fill with values.
541
+ ancillary_files : dict[Path]
542
+ Ancillary files.
543
+
544
+ Returns
545
+ -------
546
+ ebins : NDArray
547
+ Energy bins from the lookup table.
548
+ """
549
+ with open(ancillary_files[lut]) as f:
550
+ all_lines = f.readlines()
551
+ pixel_text = "".join(all_lines[4:])
552
+
553
+ lut_array = np.fromstring(pixel_text, sep=" ", dtype=int).reshape((2048, 4096))
554
+ # Note that the LUT is indexed [energy, ctof] for l1b-tofxph
555
+ # and [ctof, energy] for everything else.
556
+ if lut == "l1b-tofxph":
557
+ energy_lookup = (2048 - np.floor(energy)).astype(int)
558
+ ctof_lookup = np.floor(ctof).astype(int)
559
+ valid = (
560
+ (energy_lookup >= 0)
561
+ & (energy_lookup < 2048)
562
+ & (ctof_lookup >= 0)
563
+ & (ctof_lookup < 4096)
564
+ )
565
+ ebins[valid] = lut_array[energy_lookup[valid], ctof_lookup[valid]]
566
+ else:
567
+ energy_lookup = np.floor(energy).astype(int)
568
+ ctof_lookup = (2048 - np.floor(ctof)).astype(int)
569
+ valid = (
570
+ (energy_lookup >= 0)
571
+ & (energy_lookup < 4096)
572
+ & (ctof_lookup >= 0)
573
+ & (ctof_lookup < 2048)
574
+ )
575
+ ebins[valid] = lut_array[ctof_lookup[valid], energy_lookup[valid]]
576
+
577
+ return ebins
578
+
579
+
580
+ def get_scattering_thresholds(ancillary_files: dict) -> dict:
581
+ """
582
+ Load scattering culling thresholds as a function of energy from a lookup table.
583
+
584
+ Parameters
585
+ ----------
586
+ ancillary_files : dict[Path]
587
+ Ancillary files.
588
+
589
+ Returns
590
+ -------
591
+ threshold_dict
592
+ Dictionary containing energy ranges and the corresponding scattering culling
593
+ threshold.
594
+ """
595
+ # Culling FWHM Scattering values as a function of energy.
596
+ thresholds = pd.read_csv(
597
+ ancillary_files["l1b-scattering-thresholds-per-energy"], header=None, skiprows=1
598
+ ).to_numpy(dtype=np.float64)
599
+ # The first two columns represent the energy range (min, max) in keV, and the
600
+ # value is the FWHM scattering threshold in degrees
601
+ threshold_dict = {(row[0], row[1]): row[2] for row in thresholds}
602
+
603
+ return threshold_dict