imap-processing 0.18.0__py3-none-any.whl → 0.19.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (122) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/ancillary/ancillary_dataset_combiner.py +161 -1
  3. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -0
  4. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +221 -1057
  5. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +307 -283
  6. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1044 -203
  7. imap_processing/cdf/config/imap_constant_attrs.yaml +4 -2
  8. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +11 -0
  9. imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +15 -1
  10. imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +5 -0
  11. imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +10 -4
  12. imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +33 -4
  13. imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +8 -91
  14. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +106 -16
  15. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +5 -4
  16. imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +4 -15
  17. imap_processing/cdf/config/imap_lo_l1c_variable_attrs.yaml +189 -98
  18. imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +85 -2
  19. imap_processing/cdf/config/imap_mag_l1c_variable_attrs.yaml +24 -1
  20. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +20 -8
  21. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +45 -35
  22. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +110 -7
  23. imap_processing/cli.py +138 -93
  24. imap_processing/codice/codice_l0.py +2 -1
  25. imap_processing/codice/codice_l1a.py +167 -69
  26. imap_processing/codice/codice_l1b.py +42 -32
  27. imap_processing/codice/codice_l2.py +215 -9
  28. imap_processing/codice/constants.py +790 -603
  29. imap_processing/codice/data/lo_stepping_values.csv +1 -1
  30. imap_processing/decom.py +1 -4
  31. imap_processing/ena_maps/ena_maps.py +71 -43
  32. imap_processing/ena_maps/utils/corrections.py +291 -0
  33. imap_processing/ena_maps/utils/map_utils.py +20 -4
  34. imap_processing/ena_maps/utils/naming.py +8 -2
  35. imap_processing/glows/ancillary/imap_glows_exclusions-by-instr-team_20250923_v002.dat +10 -0
  36. imap_processing/glows/ancillary/imap_glows_map-of-excluded-regions_20250923_v002.dat +393 -0
  37. imap_processing/glows/ancillary/imap_glows_map-of-uv-sources_20250923_v002.dat +593 -0
  38. imap_processing/glows/ancillary/imap_glows_pipeline-settings_20250923_v002.json +54 -0
  39. imap_processing/glows/ancillary/imap_glows_suspected-transients_20250923_v002.dat +10 -0
  40. imap_processing/glows/l1b/glows_l1b.py +123 -18
  41. imap_processing/glows/l1b/glows_l1b_data.py +358 -47
  42. imap_processing/glows/l2/glows_l2.py +11 -0
  43. imap_processing/hi/hi_l1a.py +124 -3
  44. imap_processing/hi/hi_l1b.py +154 -71
  45. imap_processing/hi/hi_l1c.py +4 -109
  46. imap_processing/hi/hi_l2.py +104 -60
  47. imap_processing/hi/utils.py +262 -8
  48. imap_processing/hit/l0/constants.py +3 -0
  49. imap_processing/hit/l0/decom_hit.py +3 -6
  50. imap_processing/hit/l1a/hit_l1a.py +311 -21
  51. imap_processing/hit/l1b/hit_l1b.py +54 -126
  52. imap_processing/hit/l2/hit_l2.py +6 -6
  53. imap_processing/ialirt/calculate_ingest.py +219 -0
  54. imap_processing/ialirt/constants.py +12 -2
  55. imap_processing/ialirt/generate_coverage.py +15 -2
  56. imap_processing/ialirt/l0/ialirt_spice.py +6 -2
  57. imap_processing/ialirt/l0/parse_mag.py +293 -42
  58. imap_processing/ialirt/l0/process_hit.py +5 -3
  59. imap_processing/ialirt/l0/process_swapi.py +41 -25
  60. imap_processing/ialirt/process_ephemeris.py +70 -14
  61. imap_processing/ialirt/utils/create_xarray.py +1 -1
  62. imap_processing/idex/idex_l0.py +2 -2
  63. imap_processing/idex/idex_l1a.py +2 -3
  64. imap_processing/idex/idex_l1b.py +2 -3
  65. imap_processing/idex/idex_l2a.py +130 -4
  66. imap_processing/idex/idex_l2b.py +158 -143
  67. imap_processing/idex/idex_utils.py +1 -3
  68. imap_processing/lo/ancillary_data/imap_lo_hydrogen-geometric-factor_v001.csv +75 -0
  69. imap_processing/lo/ancillary_data/imap_lo_oxygen-geometric-factor_v001.csv +75 -0
  70. imap_processing/lo/l0/lo_science.py +25 -24
  71. imap_processing/lo/l1b/lo_l1b.py +93 -19
  72. imap_processing/lo/l1c/lo_l1c.py +273 -93
  73. imap_processing/lo/l2/lo_l2.py +949 -135
  74. imap_processing/lo/lo_ancillary.py +55 -0
  75. imap_processing/mag/l1a/mag_l1a.py +1 -0
  76. imap_processing/mag/l1a/mag_l1a_data.py +26 -0
  77. imap_processing/mag/l1b/mag_l1b.py +3 -2
  78. imap_processing/mag/l1c/interpolation_methods.py +14 -15
  79. imap_processing/mag/l1c/mag_l1c.py +23 -6
  80. imap_processing/mag/l1d/mag_l1d.py +57 -14
  81. imap_processing/mag/l1d/mag_l1d_data.py +202 -32
  82. imap_processing/mag/l2/mag_l2.py +2 -0
  83. imap_processing/mag/l2/mag_l2_data.py +14 -5
  84. imap_processing/quality_flags.py +23 -1
  85. imap_processing/spice/geometry.py +89 -39
  86. imap_processing/spice/pointing_frame.py +4 -8
  87. imap_processing/spice/repoint.py +78 -2
  88. imap_processing/spice/spin.py +28 -8
  89. imap_processing/spice/time.py +12 -22
  90. imap_processing/swapi/l1/swapi_l1.py +10 -4
  91. imap_processing/swapi/l2/swapi_l2.py +15 -17
  92. imap_processing/swe/l1b/swe_l1b.py +1 -2
  93. imap_processing/ultra/constants.py +30 -24
  94. imap_processing/ultra/l0/ultra_utils.py +9 -11
  95. imap_processing/ultra/l1a/ultra_l1a.py +1 -2
  96. imap_processing/ultra/l1b/badtimes.py +35 -11
  97. imap_processing/ultra/l1b/de.py +95 -31
  98. imap_processing/ultra/l1b/extendedspin.py +31 -16
  99. imap_processing/ultra/l1b/goodtimes.py +112 -0
  100. imap_processing/ultra/l1b/lookup_utils.py +281 -28
  101. imap_processing/ultra/l1b/quality_flag_filters.py +10 -1
  102. imap_processing/ultra/l1b/ultra_l1b.py +7 -7
  103. imap_processing/ultra/l1b/ultra_l1b_culling.py +169 -7
  104. imap_processing/ultra/l1b/ultra_l1b_extended.py +311 -69
  105. imap_processing/ultra/l1c/helio_pset.py +139 -37
  106. imap_processing/ultra/l1c/l1c_lookup_utils.py +289 -0
  107. imap_processing/ultra/l1c/spacecraft_pset.py +140 -29
  108. imap_processing/ultra/l1c/ultra_l1c.py +33 -24
  109. imap_processing/ultra/l1c/ultra_l1c_culling.py +92 -0
  110. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +400 -292
  111. imap_processing/ultra/l2/ultra_l2.py +54 -11
  112. imap_processing/ultra/utils/ultra_l1_utils.py +37 -7
  113. imap_processing/utils.py +3 -4
  114. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/METADATA +2 -2
  115. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/RECORD +118 -109
  116. imap_processing/idex/idex_l2c.py +0 -84
  117. imap_processing/spice/kernels.py +0 -187
  118. imap_processing/ultra/l1b/cullingmask.py +0 -87
  119. imap_processing/ultra/l1c/histogram.py +0 -36
  120. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/LICENSE +0 -0
  121. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/WHEEL +0 -0
  122. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,112 @@
1
+ """Calculate Goodtimes."""
2
+
3
+ import numpy as np
4
+ import xarray as xr
5
+
6
+ from imap_processing.ultra.l1b.quality_flag_filters import SPIN_QUALITY_FLAG_FILTERS
7
+ from imap_processing.ultra.utils.ultra_l1_utils import create_dataset, extract_data_dict
8
+
9
+ FILLVAL_UINT16 = 65535
10
+ FILLVAL_FLOAT32 = -1.0e31
11
+ FILLVAL_FLOAT64 = -1.0e31
12
+ FILLVAL_UINT32 = 4294967295
13
+
14
+
15
+ def calculate_goodtimes(extendedspin_dataset: xr.Dataset, name: str) -> xr.Dataset:
16
+ """
17
+ Create dataset with defined datatype for Goodtimes Data.
18
+
19
+ Parameters
20
+ ----------
21
+ extendedspin_dataset : xarray.Dataset
22
+ Dataset containing the data.
23
+ name : str
24
+ Name of the dataset.
25
+
26
+ Returns
27
+ -------
28
+ goodtimes_dataset : xarray.Dataset
29
+ Dataset containing the extendedspin data that remains after culling.
30
+ """
31
+ n_bins = extendedspin_dataset.dims["energy_bin_geometric_mean"]
32
+ # If the spin rate was too high or low then the spin should be thrown out.
33
+ # If the rates at any energy level are too high then throw out the entire spin.
34
+ good_mask = (
35
+ (
36
+ extendedspin_dataset["quality_attitude"]
37
+ & sum(flag.value for flag in SPIN_QUALITY_FLAG_FILTERS["quality_attitude"])
38
+ )
39
+ == 0
40
+ ) & (
41
+ (
42
+ (
43
+ extendedspin_dataset["quality_ena_rates"]
44
+ & sum(
45
+ flag.value
46
+ for flag in SPIN_QUALITY_FLAG_FILTERS["quality_ena_rates"]
47
+ )
48
+ )
49
+ == 0
50
+ ).all(dim="energy_bin_geometric_mean")
51
+ )
52
+ filtered_dataset = extendedspin_dataset.sel(
53
+ spin_number=extendedspin_dataset["spin_number"][good_mask]
54
+ )
55
+
56
+ data_dict = extract_data_dict(filtered_dataset)
57
+
58
+ goodtimes_dataset = create_dataset(data_dict, name, "l1b")
59
+
60
+ if goodtimes_dataset["spin_number"].size == 0:
61
+ goodtimes_dataset = goodtimes_dataset.drop_dims("spin_number")
62
+ goodtimes_dataset = goodtimes_dataset.expand_dims(spin_number=[FILLVAL_UINT32])
63
+ goodtimes_dataset["spin_start_time"] = xr.DataArray(
64
+ np.array([FILLVAL_FLOAT64], dtype="float64"), dims=["spin_number"]
65
+ )
66
+ goodtimes_dataset["spin_period"] = xr.DataArray(
67
+ np.array([FILLVAL_FLOAT64], dtype="float64"), dims=["spin_number"]
68
+ )
69
+ goodtimes_dataset["spin_rate"] = xr.DataArray(
70
+ np.array([FILLVAL_FLOAT64], dtype="float64"), dims=["spin_number"]
71
+ )
72
+ goodtimes_dataset["start_pulses_per_spin"] = xr.DataArray(
73
+ np.array([FILLVAL_FLOAT32], dtype="float32"),
74
+ dims=["spin_number"],
75
+ )
76
+ goodtimes_dataset["stop_pulses_per_spin"] = xr.DataArray(
77
+ np.array([FILLVAL_FLOAT32], dtype="float32"),
78
+ dims=["spin_number"],
79
+ )
80
+ goodtimes_dataset["coin_pulses_per_spin"] = xr.DataArray(
81
+ np.array([FILLVAL_FLOAT32], dtype="float32"),
82
+ dims=["spin_number"],
83
+ )
84
+ goodtimes_dataset["rejected_events_per_spin"] = xr.DataArray(
85
+ np.array([FILLVAL_UINT32], dtype="uint32"),
86
+ dims=["spin_number"],
87
+ )
88
+ goodtimes_dataset["quality_attitude"] = xr.DataArray(
89
+ np.array([FILLVAL_UINT16], dtype="uint16"), dims=["spin_number"]
90
+ )
91
+ goodtimes_dataset["quality_hk"] = xr.DataArray(
92
+ np.array([FILLVAL_UINT16], dtype="uint16"),
93
+ dims=["spin_number"],
94
+ )
95
+ goodtimes_dataset["quality_instruments"] = xr.DataArray(
96
+ np.array([FILLVAL_UINT16], dtype="uint16"),
97
+ dims=["spin_number"],
98
+ )
99
+ goodtimes_dataset["quality_ena_rates"] = (
100
+ ("energy_bin_geometric_mean", "spin_number"),
101
+ np.full((n_bins, 1), FILLVAL_UINT16, dtype="uint16"),
102
+ )
103
+ goodtimes_dataset["ena_rates"] = (
104
+ ("energy_bin_geometric_mean", "spin_number"),
105
+ np.full((n_bins, 1), FILLVAL_FLOAT64, dtype="float64"),
106
+ )
107
+ goodtimes_dataset["ena_rates_threshold"] = (
108
+ ("energy_bin_geometric_mean", "spin_number"),
109
+ np.full((n_bins, 1), FILLVAL_FLOAT32, dtype="float32"),
110
+ )
111
+
112
+ return goodtimes_dataset
@@ -6,7 +6,7 @@ import pandas as pd
6
6
  import xarray as xr
7
7
  from numpy.typing import NDArray
8
8
 
9
- from imap_processing.quality_flags import ImapDEUltraFlags
9
+ from imap_processing.quality_flags import ImapDEOutliersUltraFlags
10
10
 
11
11
 
12
12
  def get_y_adjust(dy_lut: np.ndarray, ancillary_files: dict) -> npt.NDArray:
@@ -232,15 +232,12 @@ def get_energy_efficiencies(ancillary_files: dict) -> pd.DataFrame:
232
232
  return lookup_table
233
233
 
234
234
 
235
- def get_geometric_factor(
235
+ def load_geometric_factor_tables(
236
236
  ancillary_files: dict,
237
237
  filename: str,
238
- phi: NDArray,
239
- theta: NDArray,
240
- quality_flag: NDArray,
241
- ) -> tuple[NDArray, NDArray]:
238
+ ) -> dict:
242
239
  """
243
- Lookup table for geometric factor using nearest neighbor.
240
+ Lookup tables for geometric factor.
244
241
 
245
242
  Parameters
246
243
  ----------
@@ -248,17 +245,11 @@ def get_geometric_factor(
248
245
  Ancillary files.
249
246
  filename : str
250
247
  Name of the file in ancillary_files to use.
251
- phi : NDArray
252
- Azimuth angles in degrees.
253
- theta : NDArray
254
- Elevation angles in degrees.
255
- quality_flag : NDArray
256
- Quality flag to set when geometric factor is zero.
257
248
 
258
249
  Returns
259
250
  -------
260
- geometric_factor : NDArray
261
- Geometric factor.
251
+ geometric_factor_tables : dict
252
+ Geometric factor lookup tables.
262
253
  """
263
254
  gf_table = pd.read_csv(
264
255
  ancillary_files[filename], header=None, skiprows=6, nrows=301
@@ -270,30 +261,207 @@ def get_geometric_factor(
270
261
  ancillary_files[filename], header=None, skiprows=610, nrows=301
271
262
  ).to_numpy(dtype=float)
272
263
 
264
+ return {
265
+ "gf_table": gf_table,
266
+ "theta_table": theta_table,
267
+ "phi_table": phi_table,
268
+ }
269
+
270
+
271
+ def get_geometric_factor(
272
+ phi: NDArray,
273
+ theta: NDArray,
274
+ quality_flag: NDArray,
275
+ ancillary_files: dict | None = None,
276
+ filename: str | None = None,
277
+ geometric_factor_tables: dict | None = None,
278
+ ) -> tuple[NDArray, NDArray]:
279
+ """
280
+ Lookup table for geometric factor using nearest neighbor.
281
+
282
+ Parameters
283
+ ----------
284
+ phi : NDArray
285
+ Azimuth angles in degrees.
286
+ theta : NDArray
287
+ Elevation angles in degrees.
288
+ quality_flag : NDArray
289
+ Quality flag to set when geometric factor is zero.
290
+ ancillary_files : dict[Path], optional
291
+ Ancillary files.
292
+ filename : str, optional
293
+ Name of the file in ancillary_files to use.
294
+ geometric_factor_tables : dict, optional
295
+ Preloaded geometric factor lookup tables. If not provided, will load.
296
+
297
+ Returns
298
+ -------
299
+ geometric_factor : NDArray
300
+ Geometric factor.
301
+ """
302
+ if geometric_factor_tables is None:
303
+ if ancillary_files is None or filename is None:
304
+ raise ValueError(
305
+ "ancillary_files and filename must be provided if "
306
+ "geometric_factor_tables is not supplied."
307
+ )
308
+ geometric_factor_tables = load_geometric_factor_tables(
309
+ ancillary_files, filename
310
+ )
273
311
  # Assume uniform grids: extract 1D arrays from first row/col
274
- theta_vals = theta_table[0, :] # columns represent theta
275
- phi_vals = phi_table[:, 0] # rows represent phi
312
+ theta_vals = geometric_factor_tables["theta_table"][0, :] # columns represent theta
313
+ phi_vals = geometric_factor_tables["phi_table"][:, 0] # rows represent phi
276
314
 
277
315
  # Find nearest index in table for each input value
278
316
  phi_idx = np.abs(phi_vals[:, None] - phi).argmin(axis=0)
279
317
  theta_idx = np.abs(theta_vals[:, None] - theta).argmin(axis=0)
280
318
 
281
319
  # Fetch geometric factor values at nearest (phi, theta) pairs
282
- geometric_factor = gf_table[phi_idx, theta_idx]
283
-
284
- phi_rad = np.deg2rad(phi)
285
- numerator = 5.0 * np.cos(phi_rad)
286
- denominator = 1 + 2.80 * np.cos(phi_rad)
287
- # Equation 19 in the Ultra Algorithm Document.
288
- theta_nom = np.arctan(numerator / denominator)
289
- theta_nom = np.rad2deg(theta_nom)
320
+ geometric_factor = geometric_factor_tables["gf_table"][phi_idx, theta_idx]
290
321
 
291
- outside_fov = np.abs(theta) > theta_nom
292
- quality_flag[outside_fov] |= ImapDEUltraFlags.FOV.value
322
+ outside_fov = ~is_inside_fov(np.deg2rad(phi), np.deg2rad(theta))
323
+ quality_flag[outside_fov] |= ImapDEOutliersUltraFlags.FOV.value
293
324
 
294
325
  return geometric_factor
295
326
 
296
327
 
328
+ def load_scattering_lookup_tables(ancillary_files: dict, instrument_id: int) -> dict:
329
+ """
330
+ Load scattering coefficient lookup tables for the specified instrument.
331
+
332
+ Parameters
333
+ ----------
334
+ ancillary_files : dict
335
+ Ancillary files.
336
+ instrument_id : int
337
+ Instrument ID, either 45 or 90.
338
+
339
+ Returns
340
+ -------
341
+ dict
342
+ Dictionary containing arrays for theta_grid, phi_grid, a_theta, g_theta,
343
+ a_phi, g_phi.
344
+ """
345
+ # TODO remove the line below when the 45 sensor scattering coefficients are
346
+ # delivered.
347
+ instrument_id = 90
348
+ descriptor = f"l1b-{instrument_id}sensor-scattering-calibration-data"
349
+ theta_grid = pd.read_csv(
350
+ ancillary_files[descriptor], header=None, skiprows=7, nrows=241
351
+ ).to_numpy(dtype=float)
352
+ phi_grid = pd.read_csv(
353
+ ancillary_files[descriptor], header=None, skiprows=249, nrows=241
354
+ ).to_numpy(dtype=float)
355
+ a_theta = pd.read_csv(
356
+ ancillary_files[descriptor], header=None, skiprows=491, nrows=241
357
+ ).to_numpy(dtype=float)
358
+ g_theta = pd.read_csv(
359
+ ancillary_files[descriptor], header=None, skiprows=733, nrows=241
360
+ ).to_numpy(dtype=float)
361
+ a_phi = pd.read_csv(
362
+ ancillary_files[descriptor], header=None, skiprows=975, nrows=241
363
+ ).to_numpy(dtype=float)
364
+ g_phi = pd.read_csv(
365
+ ancillary_files[descriptor], header=None, skiprows=1217, nrows=241
366
+ ).to_numpy(dtype=float)
367
+ return {
368
+ "theta_grid": theta_grid,
369
+ "phi_grid": phi_grid,
370
+ "a_theta": a_theta,
371
+ "g_theta": g_theta,
372
+ "a_phi": a_phi,
373
+ "g_phi": g_phi,
374
+ }
375
+
376
+
377
+ def get_scattering_coefficients(
378
+ theta: NDArray,
379
+ phi: NDArray,
380
+ lookup_tables: dict | None = None,
381
+ ancillary_files: dict | None = None,
382
+ instrument_id: int | None = None,
383
+ ) -> tuple[NDArray, NDArray]:
384
+ """
385
+ Get a and g coefficients for theta and phi to compute scattering FWHM.
386
+
387
+ Parameters
388
+ ----------
389
+ theta : NDArray
390
+ Elevation angles in degrees.
391
+ phi : NDArray
392
+ Azimuth angles in degrees.
393
+ lookup_tables : dict, optional
394
+ Preloaded lookup tables. If not provided, will load using ancillary_files and
395
+ instrument_id.
396
+ ancillary_files : dict, optional
397
+ Ancillary files, required if lookup_tables is not provided.
398
+ instrument_id : int, optional
399
+ Instrument ID, required if lookup_tables is not provided.
400
+
401
+ Returns
402
+ -------
403
+ tuple
404
+ Scattering a and g values corresponding to the given theta and phi values.
405
+ """
406
+ if lookup_tables is None:
407
+ if ancillary_files is None or instrument_id is None:
408
+ raise ValueError(
409
+ "ancillary_files and instrument_id must be provided if lookup_tables "
410
+ "is not supplied."
411
+ )
412
+ lookup_tables = load_scattering_lookup_tables(ancillary_files, instrument_id)
413
+
414
+ theta_grid = lookup_tables["theta_grid"]
415
+ phi_grid = lookup_tables["phi_grid"]
416
+ a_theta = lookup_tables["a_theta"]
417
+ g_theta = lookup_tables["g_theta"]
418
+ a_phi = lookup_tables["a_phi"]
419
+ g_phi = lookup_tables["g_phi"]
420
+
421
+ theta_vals = theta_grid[0, :] # columns represent theta
422
+ phi_vals = phi_grid[:, 0] # rows represent phi
423
+
424
+ phi_idx = np.abs(phi_vals[:, None] - phi).argmin(axis=0)
425
+ theta_idx = np.abs(theta_vals[:, None] - theta).argmin(axis=0)
426
+
427
+ a_theta_val = a_theta[phi_idx, theta_idx]
428
+ g_theta_val = g_theta[phi_idx, theta_idx]
429
+ a_phi_val = a_phi[phi_idx, theta_idx]
430
+ g_phi_val = g_phi[phi_idx, theta_idx]
431
+
432
+ return np.column_stack([a_theta_val, g_theta_val]), np.column_stack(
433
+ [a_phi_val, g_phi_val]
434
+ )
435
+
436
+
437
+ def is_inside_fov(phi: np.ndarray, theta: np.ndarray) -> np.ndarray:
438
+ """
439
+ Determine angles in the field of view (FOV).
440
+
441
+ This function is used in the deadtime correction to determine whether a given
442
+ (theta, phi) angle is within the instrument's Field of View (FOV).
443
+ Only pixels inside the FOV are considered for time accumulation. The FOV boundary
444
+ is defined by equation 19 in the Ultra Algorithm Document.
445
+
446
+ Parameters
447
+ ----------
448
+ phi : np.ndarray
449
+ Azimuth angles in radians.
450
+ theta : np.ndarray
451
+ Elevation angles in radians.
452
+
453
+ Returns
454
+ -------
455
+ numpy.ndarray
456
+ Boolean array indicating if the angle is in the FOV, False otherwise.
457
+ """
458
+ numerator = 5.0 * np.cos(phi)
459
+ denominator = 1 + 2.80 * np.cos(phi)
460
+ # Equation 19 in the Ultra Algorithm Document.
461
+ theta_nom = np.arctan(numerator / denominator)
462
+ return np.abs(theta) <= theta_nom
463
+
464
+
297
465
  def get_ph_corrected(
298
466
  sensor: str,
299
467
  location: str,
@@ -343,8 +511,93 @@ def get_ph_corrected(
343
511
 
344
512
  # Flag where clamping occurred
345
513
  flagged_mask = (xlut != xlut_clamped) | (ylut != ylut_clamped)
346
- quality_flag[flagged_mask] |= ImapDEUltraFlags.PHCORR.value
514
+ quality_flag[flagged_mask] |= ImapDEOutliersUltraFlags.PHCORR.value
347
515
 
348
516
  ph_correction = ph_correct_array[xlut_clamped, ylut_clamped]
349
517
 
350
518
  return ph_correction, quality_flag
519
+
520
+
521
+ def get_ebins(
522
+ lut: str,
523
+ energy: NDArray,
524
+ ctof: NDArray,
525
+ ebins: NDArray,
526
+ ancillary_files: dict,
527
+ ) -> NDArray:
528
+ """
529
+ Get energy bins from the lookup table.
530
+
531
+ Parameters
532
+ ----------
533
+ lut : str
534
+ Lookup table name, e.g., "l1b-tofxpht".
535
+ energy : NDArray
536
+ Energy from the event (keV).
537
+ ctof : NDArray
538
+ Corrected TOF (tenths of a ns).
539
+ ebins : NDArray
540
+ Energy bins to fill with values.
541
+ ancillary_files : dict[Path]
542
+ Ancillary files.
543
+
544
+ Returns
545
+ -------
546
+ ebins : NDArray
547
+ Energy bins from the lookup table.
548
+ """
549
+ with open(ancillary_files[lut]) as f:
550
+ all_lines = f.readlines()
551
+ pixel_text = "".join(all_lines[4:])
552
+
553
+ lut_array = np.fromstring(pixel_text, sep=" ", dtype=int).reshape((2048, 4096))
554
+ # Note that the LUT is indexed [energy, ctof] for l1b-tofxph
555
+ # and [ctof, energy] for everything else.
556
+ if lut == "l1b-tofxph":
557
+ energy_lookup = (2048 - np.floor(energy)).astype(int)
558
+ ctof_lookup = np.floor(ctof).astype(int)
559
+ valid = (
560
+ (energy_lookup >= 0)
561
+ & (energy_lookup < 2048)
562
+ & (ctof_lookup >= 0)
563
+ & (ctof_lookup < 4096)
564
+ )
565
+ ebins[valid] = lut_array[energy_lookup[valid], ctof_lookup[valid]]
566
+ else:
567
+ energy_lookup = np.floor(energy).astype(int)
568
+ ctof_lookup = (2048 - np.floor(ctof)).astype(int)
569
+ valid = (
570
+ (energy_lookup >= 0)
571
+ & (energy_lookup < 4096)
572
+ & (ctof_lookup >= 0)
573
+ & (ctof_lookup < 2048)
574
+ )
575
+ ebins[valid] = lut_array[ctof_lookup[valid], energy_lookup[valid]]
576
+
577
+ return ebins
578
+
579
+
580
+ def get_scattering_thresholds(ancillary_files: dict) -> dict:
581
+ """
582
+ Load scattering culling thresholds as a function of energy from a lookup table.
583
+
584
+ Parameters
585
+ ----------
586
+ ancillary_files : dict[Path]
587
+ Ancillary files.
588
+
589
+ Returns
590
+ -------
591
+ threshold_dict
592
+ Dictionary containing energy ranges and the corresponding scattering culling
593
+ threshold.
594
+ """
595
+ # Culling FWHM Scattering values as a function of energy.
596
+ thresholds = pd.read_csv(
597
+ ancillary_files["l1b-scattering-thresholds-per-energy"], header=None, skiprows=1
598
+ ).to_numpy(dtype=np.float64)
599
+ # The first two columns represent the energy range (min, max) in keV, and the
600
+ # value is the FWHM scattering threshold in degrees
601
+ threshold_dict = {(row[0], row[1]): row[2] for row in thresholds}
602
+
603
+ return threshold_dict
@@ -2,13 +2,22 @@
2
2
 
3
3
  from imap_processing.quality_flags import (
4
4
  FlagNameMixin,
5
+ ImapDEOutliersUltraFlags,
6
+ ImapDEScatteringUltraFlags,
5
7
  ImapRatesUltraFlags,
6
8
  )
7
9
 
8
- QUALITY_FLAG_FILTERS: dict[str, list[FlagNameMixin]] = {
10
+ SPIN_QUALITY_FLAG_FILTERS: dict[str, list[FlagNameMixin]] = {
9
11
  "quality_attitude": [],
10
12
  "quality_ena_rates": [
11
13
  ImapRatesUltraFlags.FIRSTSPIN,
12
14
  ImapRatesUltraFlags.LASTSPIN,
13
15
  ],
14
16
  }
17
+
18
+ DE_QUALITY_FLAG_FILTERS: dict[str, list[FlagNameMixin]] = {
19
+ "quality_outliers": [ImapDEOutliersUltraFlags.FOV],
20
+ "quality_scattering": [
21
+ ImapDEScatteringUltraFlags.ABOVE_THRESHOLD,
22
+ ],
23
+ }
@@ -3,9 +3,9 @@
3
3
  import xarray as xr
4
4
 
5
5
  from imap_processing.ultra.l1b.badtimes import calculate_badtimes
6
- from imap_processing.ultra.l1b.cullingmask import calculate_cullingmask
7
6
  from imap_processing.ultra.l1b.de import calculate_de
8
7
  from imap_processing.ultra.l1b.extendedspin import calculate_extendedspin
8
+ from imap_processing.ultra.l1b.goodtimes import calculate_goodtimes
9
9
 
10
10
 
11
11
  def ultra_l1b(data_dict: dict, ancillary_files: dict) -> list[xr.Dataset]:
@@ -29,7 +29,7 @@ def ultra_l1b(data_dict: dict, ancillary_files: dict) -> list[xr.Dataset]:
29
29
  General flow:
30
30
  1. l1a data products are created (upstream to this code)
31
31
  2. l1b de is created here and dropped in s3 kicking off processing again
32
- 3. l1b extended, culling, badtimes created here
32
+ 3. l1b extended, goodtimes, badtimes created here
33
33
  """
34
34
  output_datasets = []
35
35
 
@@ -72,22 +72,22 @@ def ultra_l1b(data_dict: dict, ancillary_files: dict) -> list[xr.Dataset]:
72
72
  output_datasets.append(extendedspin_dataset)
73
73
  elif (
74
74
  f"imap_ultra_l1b_{instrument_id}sensor-extendedspin" in data_dict
75
- and f"imap_ultra_l1b_{instrument_id}sensor-cullingmask" in data_dict
75
+ and f"imap_ultra_l1b_{instrument_id}sensor-goodtimes" in data_dict
76
76
  ):
77
77
  badtimes_dataset = calculate_badtimes(
78
78
  data_dict[f"imap_ultra_l1b_{instrument_id}sensor-extendedspin"],
79
- data_dict[f"imap_ultra_l1b_{instrument_id}sensor-cullingmask"][
79
+ data_dict[f"imap_ultra_l1b_{instrument_id}sensor-goodtimes"][
80
80
  "spin_number"
81
81
  ].values,
82
82
  f"imap_ultra_l1b_{instrument_id}sensor-badtimes",
83
83
  )
84
84
  output_datasets.append(badtimes_dataset)
85
85
  elif f"imap_ultra_l1b_{instrument_id}sensor-extendedspin" in data_dict:
86
- cullingmask_dataset = calculate_cullingmask(
86
+ goodtimes_dataset = calculate_goodtimes(
87
87
  data_dict[f"imap_ultra_l1b_{instrument_id}sensor-extendedspin"],
88
- f"imap_ultra_l1b_{instrument_id}sensor-cullingmask",
88
+ f"imap_ultra_l1b_{instrument_id}sensor-goodtimes",
89
89
  )
90
- output_datasets.append(cullingmask_dataset)
90
+ output_datasets.append(goodtimes_dataset)
91
91
  if not output_datasets:
92
92
  raise ValueError("Data dictionary does not contain the expected keys.")
93
93