imap-processing 0.18.0__py3-none-any.whl → 0.19.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (122) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/ancillary/ancillary_dataset_combiner.py +161 -1
  3. imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -0
  4. imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +221 -1057
  5. imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +307 -283
  6. imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1044 -203
  7. imap_processing/cdf/config/imap_constant_attrs.yaml +4 -2
  8. imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +11 -0
  9. imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +15 -1
  10. imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +5 -0
  11. imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +10 -4
  12. imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +33 -4
  13. imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +8 -91
  14. imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +106 -16
  15. imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +5 -4
  16. imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +4 -15
  17. imap_processing/cdf/config/imap_lo_l1c_variable_attrs.yaml +189 -98
  18. imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +85 -2
  19. imap_processing/cdf/config/imap_mag_l1c_variable_attrs.yaml +24 -1
  20. imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +20 -8
  21. imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +45 -35
  22. imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +110 -7
  23. imap_processing/cli.py +138 -93
  24. imap_processing/codice/codice_l0.py +2 -1
  25. imap_processing/codice/codice_l1a.py +167 -69
  26. imap_processing/codice/codice_l1b.py +42 -32
  27. imap_processing/codice/codice_l2.py +215 -9
  28. imap_processing/codice/constants.py +790 -603
  29. imap_processing/codice/data/lo_stepping_values.csv +1 -1
  30. imap_processing/decom.py +1 -4
  31. imap_processing/ena_maps/ena_maps.py +71 -43
  32. imap_processing/ena_maps/utils/corrections.py +291 -0
  33. imap_processing/ena_maps/utils/map_utils.py +20 -4
  34. imap_processing/ena_maps/utils/naming.py +8 -2
  35. imap_processing/glows/ancillary/imap_glows_exclusions-by-instr-team_20250923_v002.dat +10 -0
  36. imap_processing/glows/ancillary/imap_glows_map-of-excluded-regions_20250923_v002.dat +393 -0
  37. imap_processing/glows/ancillary/imap_glows_map-of-uv-sources_20250923_v002.dat +593 -0
  38. imap_processing/glows/ancillary/imap_glows_pipeline-settings_20250923_v002.json +54 -0
  39. imap_processing/glows/ancillary/imap_glows_suspected-transients_20250923_v002.dat +10 -0
  40. imap_processing/glows/l1b/glows_l1b.py +123 -18
  41. imap_processing/glows/l1b/glows_l1b_data.py +358 -47
  42. imap_processing/glows/l2/glows_l2.py +11 -0
  43. imap_processing/hi/hi_l1a.py +124 -3
  44. imap_processing/hi/hi_l1b.py +154 -71
  45. imap_processing/hi/hi_l1c.py +4 -109
  46. imap_processing/hi/hi_l2.py +104 -60
  47. imap_processing/hi/utils.py +262 -8
  48. imap_processing/hit/l0/constants.py +3 -0
  49. imap_processing/hit/l0/decom_hit.py +3 -6
  50. imap_processing/hit/l1a/hit_l1a.py +311 -21
  51. imap_processing/hit/l1b/hit_l1b.py +54 -126
  52. imap_processing/hit/l2/hit_l2.py +6 -6
  53. imap_processing/ialirt/calculate_ingest.py +219 -0
  54. imap_processing/ialirt/constants.py +12 -2
  55. imap_processing/ialirt/generate_coverage.py +15 -2
  56. imap_processing/ialirt/l0/ialirt_spice.py +6 -2
  57. imap_processing/ialirt/l0/parse_mag.py +293 -42
  58. imap_processing/ialirt/l0/process_hit.py +5 -3
  59. imap_processing/ialirt/l0/process_swapi.py +41 -25
  60. imap_processing/ialirt/process_ephemeris.py +70 -14
  61. imap_processing/ialirt/utils/create_xarray.py +1 -1
  62. imap_processing/idex/idex_l0.py +2 -2
  63. imap_processing/idex/idex_l1a.py +2 -3
  64. imap_processing/idex/idex_l1b.py +2 -3
  65. imap_processing/idex/idex_l2a.py +130 -4
  66. imap_processing/idex/idex_l2b.py +158 -143
  67. imap_processing/idex/idex_utils.py +1 -3
  68. imap_processing/lo/ancillary_data/imap_lo_hydrogen-geometric-factor_v001.csv +75 -0
  69. imap_processing/lo/ancillary_data/imap_lo_oxygen-geometric-factor_v001.csv +75 -0
  70. imap_processing/lo/l0/lo_science.py +25 -24
  71. imap_processing/lo/l1b/lo_l1b.py +93 -19
  72. imap_processing/lo/l1c/lo_l1c.py +273 -93
  73. imap_processing/lo/l2/lo_l2.py +949 -135
  74. imap_processing/lo/lo_ancillary.py +55 -0
  75. imap_processing/mag/l1a/mag_l1a.py +1 -0
  76. imap_processing/mag/l1a/mag_l1a_data.py +26 -0
  77. imap_processing/mag/l1b/mag_l1b.py +3 -2
  78. imap_processing/mag/l1c/interpolation_methods.py +14 -15
  79. imap_processing/mag/l1c/mag_l1c.py +23 -6
  80. imap_processing/mag/l1d/mag_l1d.py +57 -14
  81. imap_processing/mag/l1d/mag_l1d_data.py +202 -32
  82. imap_processing/mag/l2/mag_l2.py +2 -0
  83. imap_processing/mag/l2/mag_l2_data.py +14 -5
  84. imap_processing/quality_flags.py +23 -1
  85. imap_processing/spice/geometry.py +89 -39
  86. imap_processing/spice/pointing_frame.py +4 -8
  87. imap_processing/spice/repoint.py +78 -2
  88. imap_processing/spice/spin.py +28 -8
  89. imap_processing/spice/time.py +12 -22
  90. imap_processing/swapi/l1/swapi_l1.py +10 -4
  91. imap_processing/swapi/l2/swapi_l2.py +15 -17
  92. imap_processing/swe/l1b/swe_l1b.py +1 -2
  93. imap_processing/ultra/constants.py +30 -24
  94. imap_processing/ultra/l0/ultra_utils.py +9 -11
  95. imap_processing/ultra/l1a/ultra_l1a.py +1 -2
  96. imap_processing/ultra/l1b/badtimes.py +35 -11
  97. imap_processing/ultra/l1b/de.py +95 -31
  98. imap_processing/ultra/l1b/extendedspin.py +31 -16
  99. imap_processing/ultra/l1b/goodtimes.py +112 -0
  100. imap_processing/ultra/l1b/lookup_utils.py +281 -28
  101. imap_processing/ultra/l1b/quality_flag_filters.py +10 -1
  102. imap_processing/ultra/l1b/ultra_l1b.py +7 -7
  103. imap_processing/ultra/l1b/ultra_l1b_culling.py +169 -7
  104. imap_processing/ultra/l1b/ultra_l1b_extended.py +311 -69
  105. imap_processing/ultra/l1c/helio_pset.py +139 -37
  106. imap_processing/ultra/l1c/l1c_lookup_utils.py +289 -0
  107. imap_processing/ultra/l1c/spacecraft_pset.py +140 -29
  108. imap_processing/ultra/l1c/ultra_l1c.py +33 -24
  109. imap_processing/ultra/l1c/ultra_l1c_culling.py +92 -0
  110. imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +400 -292
  111. imap_processing/ultra/l2/ultra_l2.py +54 -11
  112. imap_processing/ultra/utils/ultra_l1_utils.py +37 -7
  113. imap_processing/utils.py +3 -4
  114. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/METADATA +2 -2
  115. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/RECORD +118 -109
  116. imap_processing/idex/idex_l2c.py +0 -84
  117. imap_processing/spice/kernels.py +0 -187
  118. imap_processing/ultra/l1b/cullingmask.py +0 -87
  119. imap_processing/ultra/l1c/histogram.py +0 -36
  120. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/LICENSE +0 -0
  121. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/WHEEL +0 -0
  122. {imap_processing-0.18.0.dist-info → imap_processing-0.19.2.dist-info}/entry_points.txt +0 -0
@@ -26,7 +26,7 @@ logger = logging.getLogger(__name__)
26
26
  # TODO review logging levels to use (debug vs. info)
27
27
 
28
28
 
29
- def hit_l1b(dependencies: dict) -> list[xr.Dataset]:
29
+ def hit_l1b(dependency: str | xr.Dataset, l1b_descriptor: str) -> xr.Dataset:
30
30
  """
31
31
  Will process HIT data to L1B.
32
32
 
@@ -34,54 +34,56 @@ def hit_l1b(dependencies: dict) -> list[xr.Dataset]:
34
34
 
35
35
  Parameters
36
36
  ----------
37
- dependencies : dict
38
- Dictionary of dependencies that are L1A xarray datasets
39
- for science data and a file path string to an L0 file
40
- for housekeeping data.
37
+ dependency : Union[str, xr.Dataset]
38
+ Dependency is either an L1A xarray dataset to process
39
+ science data or a file path string to an L0 file to
40
+ process housekeeping data.
41
+ l1b_descriptor : str
42
+ The descriptor for the L1B dataset to create.
41
43
 
42
44
  Returns
43
45
  -------
44
- processed_data : list[xarray.Dataset]
45
- List of four L1B datasets.
46
+ l1b_dataset : xarray.Dataset
47
+ The processed L1B dataset.
46
48
  """
47
49
  # Create the attribute manager for this data level
48
50
  attr_mgr = get_attribute_manager("l1b")
49
51
 
52
+ l1b_dataset = None
53
+
50
54
  # Create L1B datasets
51
- l1b_datasets: list = []
52
- if "imap_hit_l0_raw" in dependencies:
55
+ if l1b_descriptor == "hk":
53
56
  # Unpack ccsds file to xarray datasets
54
- packet_file = dependencies["imap_hit_l0_raw"]
57
+ packet_file = dependency
55
58
  datasets_by_apid = get_datasets_by_apid(packet_file, derived=True)
56
- # TODO: update to raise error after all APIDs are included in the same
57
- # raw files. currently science and housekeeping are in separate files.
58
59
  if HitAPID.HIT_HSKP in datasets_by_apid:
59
60
  # Process housekeeping to L1B.
60
- l1b_datasets.append(
61
- process_housekeeping_data(
62
- datasets_by_apid[HitAPID.HIT_HSKP], attr_mgr, "imap_hit_l1b_hk"
63
- )
61
+ l1b_dataset = process_housekeeping_data(
62
+ datasets_by_apid[HitAPID.HIT_HSKP], attr_mgr, "imap_hit_l1b_hk"
64
63
  )
65
64
  logger.info("HIT L1B housekeeping dataset created")
66
- if "imap_hit_l1a_counts" in dependencies:
65
+ elif l1b_descriptor in ["standard-rates", "summed-rates", "sectored-rates"]:
67
66
  # Process science data to L1B datasets
68
- l1a_counts_dataset = dependencies["imap_hit_l1a_counts"]
69
- l1b_datasets.extend(process_science_data(l1a_counts_dataset, attr_mgr))
70
- logger.info("HIT L1B science datasets created")
67
+ l1b_dataset = process_science_data(dependency, l1b_descriptor, attr_mgr)
68
+ logger.info("HIT L1B science dataset created")
69
+ else:
70
+ logger.error(f"Unsupported descriptor for L1B processing: {l1b_descriptor}")
71
+ raise ValueError(f"Unsupported descriptor: {l1b_descriptor}")
71
72
 
72
- return l1b_datasets
73
+ return l1b_dataset
73
74
 
74
75
 
75
76
  def process_science_data(
76
- l1a_counts_dataset: xr.Dataset, attr_mgr: ImapCdfAttributes
77
- ) -> list[xr.Dataset]:
77
+ l1a_counts_dataset: xr.Dataset, descriptor: str, attr_mgr: ImapCdfAttributes
78
+ ) -> xr.Dataset:
78
79
  """
79
80
  Will create L1B science datasets for CDF products.
80
81
 
81
- Process L1A raw counts data to create L1B science data for
82
- CDF creation. This function will create three L1B science
82
+ This function processes L1A counts data to L1B science
83
+ data for CDF creation. There are three L1B science
83
84
  datasets: standard rates, summed rates, and sectored rates.
84
- It will also update dataset attributes, coordinates and
85
+ This function creates one dataset based on the descriptor
86
+ provided. It will also update dataset attributes, coordinates and
85
87
  data variable dimensions according to specifications in
86
88
  a CDF yaml file.
87
89
 
@@ -89,46 +91,44 @@ def process_science_data(
89
91
  ----------
90
92
  l1a_counts_dataset : xr.Dataset
91
93
  The L1A counts dataset.
94
+ descriptor : str
95
+ The descriptor for the L1B dataset to create
96
+ (e.g., "standard-rates", "summed-rates", "sectored-rates").
92
97
  attr_mgr : AttributeManager
93
98
  The attribute manager for the L1B data level.
94
99
 
95
100
  Returns
96
101
  -------
97
- dataset : list
98
- The processed L1B science datasets as xarray datasets.
102
+ dataset : xarray.Dataset
103
+ A processed L1B science dataset.
99
104
  """
100
105
  logger.info("Creating HIT L1B science datasets")
101
106
 
102
- # TODO: Write functions to create the following datasets
103
- # Process sectored rates dataset
107
+ dataset = None
108
+ logical_source = None
104
109
 
105
110
  # Calculate fractional livetime from the livetime counter
106
111
  livetime = l1a_counts_dataset["livetime_counter"] / LIVESTIM_PULSES
107
112
  livetime = livetime.rename("livetime")
108
113
 
109
- # Process counts data to L1B datasets
110
- l1b_datasets: dict = {
111
- "imap_hit_l1b_standard-rates": process_standard_rates_data(
112
- l1a_counts_dataset, livetime
113
- ),
114
- "imap_hit_l1b_summed-rates": process_summed_rates_data(
115
- l1a_counts_dataset, livetime
116
- ),
117
- "imap_hit_l1b_sectored-rates": process_sectored_rates_data(
118
- l1a_counts_dataset, livetime
119
- ),
120
- }
114
+ # Process counts data to an L1B dataset based on the descriptor
115
+ if descriptor == "standard-rates":
116
+ dataset = process_standard_rates_data(l1a_counts_dataset, livetime)
117
+ logical_source = "imap_hit_l1b_standard-rates"
118
+ elif descriptor == "summed-rates":
119
+ dataset = process_summed_rates_data(l1a_counts_dataset, livetime)
120
+ logical_source = "imap_hit_l1b_summed-rates"
121
+ elif descriptor == "sectored-rates":
122
+ dataset = process_sectored_rates_data(l1a_counts_dataset, livetime)
123
+ logical_source = "imap_hit_l1b_sectored-rates"
121
124
 
122
125
  # Update attributes and dimensions
123
- for logical_source, dataset in l1b_datasets.items():
126
+ if dataset and logical_source:
124
127
  dataset.attrs = attr_mgr.get_global_attributes(logical_source)
125
-
126
- # TODO: Add CDF attributes to yaml once they're defined for L1B science data
127
- # Assign attributes and dimensions to each data array in the Dataset
128
+ # TODO: Add CDF attributes to yaml
128
129
  for field in dataset.data_vars.keys():
129
130
  try:
130
- # Create a dict of dimensions using the DEPEND_I keys in the
131
- # attributes
131
+ # Create a dict of dimensions using the DEPEND_I keys in the attributes
132
132
  dims = {
133
133
  key: value
134
134
  for key, value in attr_mgr.get_variable_attributes(field).items()
@@ -137,7 +137,6 @@ def process_science_data(
137
137
  dataset[field].attrs = attr_mgr.get_variable_attributes(field)
138
138
  dataset[field].assign_coords(dims)
139
139
  except KeyError:
140
- print(f"Field {field} not found in attribute manager.")
141
140
  logger.warning(f"Field {field} not found in attribute manager.")
142
141
 
143
142
  # Skip schema check for epoch to prevent attr_mgr from adding the
@@ -145,10 +144,9 @@ def process_science_data(
145
144
  dataset.epoch.attrs = attr_mgr.get_variable_attributes(
146
145
  "epoch", check_schema=False
147
146
  )
148
-
149
147
  logger.info(f"HIT L1B dataset created for {logical_source}")
150
148
 
151
- return list(l1b_datasets.values())
149
+ return dataset
152
150
 
153
151
 
154
152
  def initialize_l1b_dataset(l1a_counts_dataset: xr.Dataset, coords: list) -> xr.Dataset:
@@ -359,79 +357,11 @@ def process_summed_rates_data(
359
357
  return l1b_summed_rates_dataset
360
358
 
361
359
 
362
- def subset_data_for_sectored_counts(
363
- l1a_counts_dataset: xr.Dataset, livetime: xr.DataArray
364
- ) -> tuple[xr.Dataset, xr.DataArray]:
365
- """
366
- Subset data for complete sets of sectored counts and corresponding livetime values.
367
-
368
- A set of sectored data starts with hydrogen and ends with iron and correspond to
369
- the mod 10 values 0-9. The livetime values from the previous 10 minutes are used
370
- to calculate the rates for each set since those counts are transmitted 10 minutes
371
- after they were collected. Therefore, only complete sets of sectored counts where
372
- livetime from the previous 10 minutes are available are included in the output.
373
-
374
- Parameters
375
- ----------
376
- l1a_counts_dataset : xr.Dataset
377
- The L1A counts dataset.
378
- livetime : xr.DataArray
379
- 1D array of livetime values calculated from the livetime counter.
380
-
381
- Returns
382
- -------
383
- tuple[xr.Dataset, xr.DataArray]
384
- Dataset of complete sectored counts and corresponding livetime values.
385
- """
386
- # Identify 10-minute intervals of complete sectored counts.
387
- bin_size = 10
388
- mod_10 = l1a_counts_dataset.hdr_minute_cnt.values % 10
389
- pattern = np.arange(bin_size)
390
-
391
- # Use sliding windows to find pattern matches
392
- matches = np.all(
393
- np.lib.stride_tricks.sliding_window_view(mod_10, bin_size) == pattern, axis=1
394
- )
395
- start_indices = np.where(matches)[0]
396
-
397
- # Filter out start indices that are less than or equal to the bin size
398
- # since the previous 10 minutes are needed for calculating rates
399
- if start_indices.size == 0:
400
- logger.error(
401
- "No data to process - valid start indices not found for "
402
- "complete sectored counts."
403
- )
404
- raise ValueError("No valid start indices found for complete sectored counts.")
405
- else:
406
- start_indices = start_indices[start_indices >= bin_size]
407
-
408
- # Subset data for complete sets of sectored counts.
409
- # Each set of sectored counts is 10 minutes long, so we take the indices
410
- # starting from the start indices and extend to the bin size of 10.
411
- # This creates a 1D array of indices that correspond to the complete
412
- # sets of sectored counts which is used to filter the L1A dataset and
413
- # create the L1B sectored rates dataset.
414
- data_indices = np.concatenate(
415
- [np.arange(idx, idx + bin_size) for idx in start_indices]
416
- )
417
- l1b_sectored_rates_dataset = l1a_counts_dataset.isel(epoch=data_indices)
418
-
419
- # Subset livetime values corresponding to the previous 10 minutes
420
- # for each start index. This ensures the livetime data aligns correctly
421
- # with the sectored counts for rate calculations.
422
- livetime_indices = np.concatenate(
423
- [np.arange(idx - bin_size, idx) for idx in start_indices]
424
- )
425
- livetime = livetime.isel(epoch=livetime_indices)
426
-
427
- return l1b_sectored_rates_dataset, livetime
428
-
429
-
430
360
  def process_sectored_rates_data(
431
361
  l1a_counts_dataset: xr.Dataset, livetime: xr.DataArray
432
362
  ) -> xr.Dataset:
433
363
  """
434
- Will process L1B sectored rates data from L1A raw counts data.
364
+ Will process L1A raw counts data into L1B sectored rates.
435
365
 
436
366
  A complete set of sectored counts is taken over 10 science frames (10 minutes)
437
367
  where each science frame contains counts for one species and energy range.
@@ -451,10 +381,13 @@ def process_sectored_rates_data(
451
381
  rotation is split into 15 inclination ranges). See equation 11 in the algorithm
452
382
  document.
453
383
 
384
+ NOTE: The L1A counts dataset has complete sets of sectored counts and livetime is
385
+ already shifted to 10 minutes before the counts. This was handled in L1A processing.
386
+
454
387
  Parameters
455
388
  ----------
456
389
  l1a_counts_dataset : xr.Dataset
457
- The L1A counts dataset.
390
+ The L1A counts dataset containing sectored counts.
458
391
 
459
392
  livetime : xr.DataArray
460
393
  1D array of livetime values calculated from the livetime counter.
@@ -477,11 +410,6 @@ def process_sectored_rates_data(
477
410
  if any(str(var).startswith(f"{p}_") for p in particles)
478
411
  ]
479
412
 
480
- # Subset data for complete sets of sectored counts and corresponding livetime values
481
- l1a_counts_dataset, livetime = subset_data_for_sectored_counts(
482
- l1a_counts_dataset, livetime
483
- )
484
-
485
413
  # Sum livetime over 10 minute intervals
486
414
  livetime_10min = sum_livetime_10min(livetime)
487
415
 
@@ -27,16 +27,16 @@ logger = logging.getLogger(__name__)
27
27
  # - review logging levels to use (debug vs. info)
28
28
 
29
29
 
30
- def hit_l2(dependency_sci: xr.Dataset, dependencies_anc: list) -> list[xr.Dataset]:
30
+ def hit_l2(dependency_sci: xr.Dataset, dependencies_anc: list) -> xr.Dataset:
31
31
  """
32
- Will process HIT data to L2.
32
+ Will process HIT L1B data to L2.
33
33
 
34
34
  Processes dependencies needed to create L2 data products.
35
35
 
36
36
  Parameters
37
37
  ----------
38
38
  dependency_sci : xr.Dataset
39
- L1B xarray science dataset that is either summed rates
39
+ L1B dataset that is either summed rates
40
40
  standard rates or sector rates.
41
41
 
42
42
  dependencies_anc : list
@@ -44,8 +44,8 @@ def hit_l2(dependency_sci: xr.Dataset, dependencies_anc: list) -> list[xr.Datase
44
44
 
45
45
  Returns
46
46
  -------
47
- processed_data : list[xarray.Dataset]
48
- List of one L2 dataset.
47
+ l2_dataset : xarray.Dataset
48
+ The processed L2 dataset from the dependency dataset provided.
49
49
  """
50
50
  logger.info("Creating HIT L2 science dataset")
51
51
 
@@ -74,7 +74,7 @@ def hit_l2(dependency_sci: xr.Dataset, dependencies_anc: list) -> list[xr.Datase
74
74
 
75
75
  logger.info(f"HIT L2 dataset created for {logical_source}")
76
76
 
77
- return [l2_dataset]
77
+ return l2_dataset
78
78
 
79
79
 
80
80
  def add_cdf_attributes(
@@ -0,0 +1,219 @@
1
+ """Packet ingest and tcp connection times for each station."""
2
+
3
+ import logging
4
+ from datetime import datetime, timedelta, timezone
5
+ from typing import Any
6
+
7
+ from imap_processing.ialirt.constants import STATIONS
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ def find_tcp_connections(
13
+ start_file_creation: datetime,
14
+ end_file_creation: datetime,
15
+ lines: list,
16
+ realtime_summary: dict,
17
+ ) -> dict:
18
+ """
19
+ Find tcp connection time ranges for ground station from log lines.
20
+
21
+ Parameters
22
+ ----------
23
+ start_file_creation : datetime
24
+ File creation time of last file minus 48 hrs.
25
+ end_file_creation : datetime
26
+ File creation time of last file.
27
+ lines : list
28
+ All lines of log files.
29
+ realtime_summary : dict
30
+ Input dictionary containing ingest parameters.
31
+
32
+ Returns
33
+ -------
34
+ realtime_summary : dict
35
+ Output dictionary with tcp connection info.
36
+ """
37
+ current_starts: dict[str, datetime | None] = {}
38
+
39
+ for line in lines:
40
+ if "antenna partner connection is" not in line:
41
+ continue
42
+
43
+ timestamp_str = line.split(" ")[0]
44
+ msg = " ".join(line.split(" ")[1:])
45
+ station = msg.split(" antenna")[0]
46
+
47
+ if station not in realtime_summary["connection_times"]:
48
+ realtime_summary["connection_times"][station] = []
49
+ if station not in realtime_summary["stations"]:
50
+ realtime_summary["stations"].append(station)
51
+
52
+ timestamp = datetime.strptime(timestamp_str, "%Y/%j-%H:%M:%S.%f")
53
+
54
+ if f"{station} antenna partner connection is up." in line:
55
+ current_starts[station] = timestamp
56
+
57
+ elif f"{station} antenna partner connection is down!" in line:
58
+ start = current_starts.get(station)
59
+ if start is not None:
60
+ realtime_summary["connection_times"][station].append(
61
+ {
62
+ "start": datetime.isoformat(start),
63
+ "end": datetime.isoformat(timestamp),
64
+ }
65
+ )
66
+ current_starts[station] = None
67
+ else:
68
+ # No matching "up"
69
+ realtime_summary["connection_times"][station].append(
70
+ {
71
+ "start": datetime.isoformat(start_file_creation),
72
+ "end": datetime.isoformat(timestamp),
73
+ }
74
+ )
75
+ current_starts[station] = None
76
+
77
+ # Handle hanging "up" at the end of file
78
+ for station, start in current_starts.items():
79
+ if start is not None:
80
+ realtime_summary["connection_times"][station].append(
81
+ {
82
+ "start": datetime.isoformat(start),
83
+ "end": datetime.isoformat(end_file_creation),
84
+ }
85
+ )
86
+
87
+ # Filter out connection windows that are completely outside the time window
88
+ for station in realtime_summary["connection_times"]:
89
+ realtime_summary["connection_times"][station] = [
90
+ window
91
+ for window in realtime_summary["connection_times"][station]
92
+ if datetime.fromisoformat(window["end"]) >= start_file_creation
93
+ and datetime.fromisoformat(window["start"]) <= end_file_creation
94
+ ]
95
+
96
+ return realtime_summary
97
+
98
+
99
+ def packets_created(start_file_creation: datetime, lines: list) -> list:
100
+ """
101
+ Find timestamps when packets were created based on log lines.
102
+
103
+ Parameters
104
+ ----------
105
+ start_file_creation : datetime
106
+ File creation time of last file minus 48 hrs.
107
+ lines : list
108
+ All lines of log files.
109
+
110
+ Returns
111
+ -------
112
+ packet_times : list
113
+ List of datetime objects when packets were created.
114
+ """
115
+ packet_times = []
116
+
117
+ for line in lines:
118
+ if "Renamed iois_1_packets" in line:
119
+ timestamp_str = line.split(" ")[0]
120
+ timestamp = datetime.strptime(timestamp_str, "%Y/%j-%H:%M:%S.%f")
121
+ # Possible that data extends further than 48 hrs in the past.
122
+ if timestamp >= start_file_creation:
123
+ packet_times.append(timestamp)
124
+
125
+ return packet_times
126
+
127
+
128
+ def format_ingest_data(last_filename: str, log_lines: list) -> dict:
129
+ """
130
+ Format TCP connection and packet ingest data from multiple log files.
131
+
132
+ Parameters
133
+ ----------
134
+ last_filename : str
135
+ Log file that is last chronologically.
136
+ log_lines : list[str]
137
+ Combined lines from all log files (assumed already sorted by time).
138
+
139
+ Returns
140
+ -------
141
+ realtime_summary : dict
142
+ Structured output with TCP connection windows per station
143
+ and global packet ingest timestamps.
144
+
145
+ Notes
146
+ -----
147
+ Example output:
148
+ {
149
+ "summary": "I-ALiRT Real-time Ingest Summary",
150
+ "generated": "2025-08-07T21:36:09Z",
151
+ "time_format": "UTC (ISOC)",
152
+ "stations": [
153
+ "Kiel"
154
+ ],
155
+ "time_range": [
156
+ "2025-07-30T23:00:00",
157
+ "2025-07-31T02:00:00"
158
+ ],
159
+ "packet_ingest": [
160
+ "2025-07-31T00:00:00",
161
+ "2025-07-31T02:01:00"
162
+ ],
163
+ "connection_times": {
164
+ "Kiel": [
165
+ {
166
+ "start": "2025-07-30T23:00:00",
167
+ "end": "2025-07-31T00:15:00"
168
+ },
169
+ {
170
+ "start": "2025-07-31T02:00:00",
171
+ "end": "2025-07-31T02:00:00"
172
+ }
173
+ ]
174
+ }
175
+ }
176
+
177
+ where time_range is the overall time range of the data,
178
+ packet_ingest contains timestamps when packets were finalized,
179
+ and tcp contains connection windows for each station.
180
+ """
181
+ # File creation time.
182
+ last_timestamp_str = last_filename.split(".")[2]
183
+ last_timestamp_str = last_timestamp_str.replace("_", ":")
184
+ end_of_time = datetime.strptime(last_timestamp_str, "%Y-%jT%H:%M:%S")
185
+
186
+ # File creation time of last file minus 48 hrs.
187
+ start_of_time = datetime.strptime(last_timestamp_str, "%Y-%jT%H:%M:%S") - timedelta(
188
+ hours=48
189
+ )
190
+
191
+ realtime_summary: dict[str, Any] = {
192
+ "summary": "I-ALiRT Real-time Ingest Summary",
193
+ "generated": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
194
+ "time_format": "UTC (ISOC)",
195
+ "stations": list(STATIONS),
196
+ "time_range": [
197
+ start_of_time.isoformat(),
198
+ end_of_time.isoformat(),
199
+ ], # Overall time range of the data
200
+ "packet_ingest": [], # Global packet ingest times
201
+ "connection_times": {
202
+ station: [] for station in list(STATIONS)
203
+ }, # Per-station TCP connection windows
204
+ }
205
+
206
+ # TCP connection data for each station
207
+ realtime_summary = find_tcp_connections(
208
+ start_of_time, end_of_time, log_lines, realtime_summary
209
+ )
210
+
211
+ # Global packet ingest timestamps
212
+ packet_times = packets_created(start_of_time, log_lines)
213
+ realtime_summary["packet_ingest"] = [
214
+ pkt_time.isoformat() for pkt_time in packet_times
215
+ ]
216
+
217
+ logger.info(f"Created ingest files for {realtime_summary['time_range']}")
218
+
219
+ return realtime_summary
@@ -33,10 +33,12 @@ class IalirtSwapiConstants:
33
33
  boltz = 1.380649e-23 # Boltzmann constant, J/K
34
34
  at_mass = 1.6605390666e-27 # atomic mass, kg
35
35
  prot_mass = 1.007276466621 * at_mass # mass of proton, kg
36
- eff_area = 3.3e-5 * 1e-4 # effective area, meters squared
36
+ eff_area = 1.633e-4 * 1e-4 # effective area, cm2 to meters squared
37
37
  az_fov = np.deg2rad(30) # azimuthal width of the field of view, radians
38
38
  fwhm_width = 0.085 # FWHM of energy width
39
39
  speed_ew = 0.5 * fwhm_width # speed width of energy passband
40
+ e_charge = 1.602176634e-19 # electronic charge, [C]
41
+ speed_coeff = np.sqrt(2 * e_charge / prot_mass) / 1e3
40
42
 
41
43
 
42
44
  class StationProperties(NamedTuple):
@@ -48,7 +50,15 @@ class StationProperties(NamedTuple):
48
50
  min_elevation_deg: float # minimum elevation angle in degrees
49
51
 
50
52
 
51
- # Verified by Kiel Observatory staff.
53
+ # Verified by Kiel and KSWC Observatory staff.
54
+ # Notes: the KSWC station is not yet operational,
55
+ # but will have the following properties:
56
+ # "KSWC": StationProperties(
57
+ # longitude=126.2958, # degrees East
58
+ # latitude=33.4273, # degrees North
59
+ # altitude=0.1, # approx 100 meters
60
+ # min_elevation_deg=5, # 5 degrees is the requirement
61
+ # ),
52
62
  STATIONS = {
53
63
  "Kiel": StationProperties(
54
64
  longitude=10.1808, # degrees East
@@ -11,8 +11,21 @@ from imap_processing.spice.time import et_to_utc, str_to_et
11
11
  # Logger setup
12
12
  logger = logging.getLogger(__name__)
13
13
 
14
- # TODO: get a list of all potential DSN stations.
15
- ALL_STATIONS = [*STATIONS.keys(), "DSS-55", "DSS-56", "DSS-74", "DSS-75"]
14
+ ALL_STATIONS = [
15
+ *STATIONS.keys(),
16
+ "DSS-24",
17
+ "DSS-25",
18
+ "DSS-26",
19
+ "DSS-34",
20
+ "DSS-35",
21
+ "DSS-36",
22
+ "DSS-53",
23
+ "DSS-54",
24
+ "DSS-55",
25
+ "DSS-56",
26
+ "DSS-74",
27
+ "DSS-75",
28
+ ]
16
29
 
17
30
 
18
31
  def generate_coverage(
@@ -56,7 +56,10 @@ def get_rotation_matrix(axis: NDArray, angle: NDArray) -> NDArray:
56
56
  """
57
57
  angle_rad = np.radians(angle)
58
58
  rot_matrices = np.array(
59
- [spice.axisar(z, float(phase)) for z, phase in zip(axis, angle_rad)]
59
+ [
60
+ spice.axisar(z, float(phase))
61
+ for z, phase in zip(axis, angle_rad, strict=False)
62
+ ]
60
63
  )
61
64
 
62
65
  return rot_matrices
@@ -130,6 +133,7 @@ def transform_instrument_vectors_to_inertial(
130
133
  spin_phase: NDArray,
131
134
  sc_inertial_right: NDArray,
132
135
  sc_inertial_decline: NDArray,
136
+ # TODO: Use correct IMAP_MAG_I or IMAP_MAG_O frame here
133
137
  instrument_frame: SpiceFrame = SpiceFrame.IMAP_MAG,
134
138
  spacecraft_frame: SpiceFrame = SpiceFrame.IMAP_SPACECRAFT,
135
139
  ) -> NDArray:
@@ -185,7 +189,7 @@ def transform_instrument_vectors_to_inertial(
185
189
  vectors = np.array(
186
190
  [
187
191
  spice.mxv(rot.T.copy(), vec)
188
- for rot, vec in zip(total_rotations, instrument_vectors)
192
+ for rot, vec in zip(total_rotations, instrument_vectors, strict=False)
189
193
  ]
190
194
  )
191
195