imap-processing 0.17.0__py3-none-any.whl → 0.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/ancillary/ancillary_dataset_combiner.py +161 -1
- imap_processing/ccsds/excel_to_xtce.py +12 -0
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +6 -6
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +312 -274
- imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +39 -28
- imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1048 -183
- imap_processing/cdf/config/imap_constant_attrs.yaml +4 -2
- imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +12 -0
- imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +5 -0
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +10 -4
- imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +163 -100
- imap_processing/cdf/config/imap_hit_l2_variable_attrs.yaml +4 -4
- imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +97 -54
- imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +33 -4
- imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +44 -44
- imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +77 -61
- imap_processing/cdf/config/imap_lo_global_cdf_attrs.yaml +30 -0
- imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +4 -15
- imap_processing/cdf/config/imap_lo_l1c_variable_attrs.yaml +189 -98
- imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +99 -2
- imap_processing/cdf/config/imap_mag_l1c_variable_attrs.yaml +24 -1
- imap_processing/cdf/config/imap_ultra_global_cdf_attrs.yaml +60 -0
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +99 -11
- imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +50 -7
- imap_processing/cli.py +121 -44
- imap_processing/codice/codice_l1a.py +165 -77
- imap_processing/codice/codice_l1b.py +1 -1
- imap_processing/codice/codice_l2.py +118 -19
- imap_processing/codice/constants.py +1217 -1089
- imap_processing/decom.py +1 -4
- imap_processing/ena_maps/ena_maps.py +32 -25
- imap_processing/ena_maps/utils/naming.py +8 -2
- imap_processing/glows/ancillary/imap_glows_exclusions-by-instr-team_20250923_v002.dat +10 -0
- imap_processing/glows/ancillary/imap_glows_map-of-excluded-regions_20250923_v002.dat +393 -0
- imap_processing/glows/ancillary/imap_glows_map-of-uv-sources_20250923_v002.dat +593 -0
- imap_processing/glows/ancillary/imap_glows_pipeline_settings_20250923_v002.json +54 -0
- imap_processing/glows/ancillary/imap_glows_suspected-transients_20250923_v002.dat +10 -0
- imap_processing/glows/l1b/glows_l1b.py +99 -9
- imap_processing/glows/l1b/glows_l1b_data.py +350 -38
- imap_processing/glows/l2/glows_l2.py +11 -0
- imap_processing/hi/hi_l1a.py +124 -3
- imap_processing/hi/hi_l1b.py +154 -71
- imap_processing/hi/hi_l2.py +84 -51
- imap_processing/hi/utils.py +153 -8
- imap_processing/hit/l0/constants.py +3 -0
- imap_processing/hit/l0/decom_hit.py +5 -8
- imap_processing/hit/l1a/hit_l1a.py +375 -45
- imap_processing/hit/l1b/constants.py +5 -0
- imap_processing/hit/l1b/hit_l1b.py +61 -131
- imap_processing/hit/l2/constants.py +1 -1
- imap_processing/hit/l2/hit_l2.py +10 -11
- imap_processing/ialirt/calculate_ingest.py +219 -0
- imap_processing/ialirt/constants.py +32 -1
- imap_processing/ialirt/generate_coverage.py +201 -0
- imap_processing/ialirt/l0/ialirt_spice.py +5 -2
- imap_processing/ialirt/l0/parse_mag.py +337 -29
- imap_processing/ialirt/l0/process_hit.py +5 -3
- imap_processing/ialirt/l0/process_swapi.py +41 -25
- imap_processing/ialirt/l0/process_swe.py +23 -7
- imap_processing/ialirt/process_ephemeris.py +70 -14
- imap_processing/ialirt/utils/constants.py +22 -16
- imap_processing/ialirt/utils/create_xarray.py +42 -19
- imap_processing/idex/idex_constants.py +1 -5
- imap_processing/idex/idex_l0.py +2 -2
- imap_processing/idex/idex_l1a.py +2 -3
- imap_processing/idex/idex_l1b.py +2 -3
- imap_processing/idex/idex_l2a.py +130 -4
- imap_processing/idex/idex_l2b.py +313 -119
- imap_processing/idex/idex_utils.py +1 -3
- imap_processing/lo/l0/lo_apid.py +1 -0
- imap_processing/lo/l0/lo_science.py +25 -24
- imap_processing/lo/l1a/lo_l1a.py +44 -0
- imap_processing/lo/l1b/lo_l1b.py +3 -3
- imap_processing/lo/l1c/lo_l1c.py +116 -50
- imap_processing/lo/l2/lo_l2.py +29 -29
- imap_processing/lo/lo_ancillary.py +55 -0
- imap_processing/lo/packet_definitions/lo_xtce.xml +5359 -106
- imap_processing/mag/constants.py +1 -0
- imap_processing/mag/l1a/mag_l1a.py +1 -0
- imap_processing/mag/l1a/mag_l1a_data.py +26 -0
- imap_processing/mag/l1b/mag_l1b.py +3 -2
- imap_processing/mag/l1c/interpolation_methods.py +14 -15
- imap_processing/mag/l1c/mag_l1c.py +23 -6
- imap_processing/mag/l1d/__init__.py +0 -0
- imap_processing/mag/l1d/mag_l1d.py +176 -0
- imap_processing/mag/l1d/mag_l1d_data.py +725 -0
- imap_processing/mag/l2/__init__.py +0 -0
- imap_processing/mag/l2/mag_l2.py +25 -20
- imap_processing/mag/l2/mag_l2_data.py +199 -130
- imap_processing/quality_flags.py +28 -2
- imap_processing/spice/geometry.py +101 -36
- imap_processing/spice/pointing_frame.py +1 -7
- imap_processing/spice/repoint.py +29 -2
- imap_processing/spice/spin.py +32 -8
- imap_processing/spice/time.py +60 -19
- imap_processing/swapi/l1/swapi_l1.py +10 -4
- imap_processing/swapi/l2/swapi_l2.py +66 -24
- imap_processing/swapi/swapi_utils.py +1 -1
- imap_processing/swe/l1b/swe_l1b.py +3 -6
- imap_processing/ultra/constants.py +28 -3
- imap_processing/ultra/l0/decom_tools.py +15 -8
- imap_processing/ultra/l0/decom_ultra.py +35 -11
- imap_processing/ultra/l0/ultra_utils.py +102 -12
- imap_processing/ultra/l1a/ultra_l1a.py +26 -6
- imap_processing/ultra/l1b/cullingmask.py +6 -3
- imap_processing/ultra/l1b/de.py +122 -26
- imap_processing/ultra/l1b/extendedspin.py +29 -2
- imap_processing/ultra/l1b/lookup_utils.py +424 -50
- imap_processing/ultra/l1b/quality_flag_filters.py +23 -0
- imap_processing/ultra/l1b/ultra_l1b_culling.py +356 -5
- imap_processing/ultra/l1b/ultra_l1b_extended.py +534 -90
- imap_processing/ultra/l1c/helio_pset.py +127 -7
- imap_processing/ultra/l1c/l1c_lookup_utils.py +256 -0
- imap_processing/ultra/l1c/spacecraft_pset.py +90 -15
- imap_processing/ultra/l1c/ultra_l1c.py +6 -0
- imap_processing/ultra/l1c/ultra_l1c_culling.py +85 -0
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +446 -341
- imap_processing/ultra/l2/ultra_l2.py +0 -1
- imap_processing/ultra/utils/ultra_l1_utils.py +40 -3
- imap_processing/utils.py +3 -4
- {imap_processing-0.17.0.dist-info → imap_processing-0.19.0.dist-info}/METADATA +3 -3
- {imap_processing-0.17.0.dist-info → imap_processing-0.19.0.dist-info}/RECORD +126 -126
- imap_processing/idex/idex_l2c.py +0 -250
- imap_processing/spice/kernels.py +0 -187
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_LeftSlit.csv +0 -526
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM45_RightSlit.csv +0 -526
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_LeftSlit.csv +0 -526
- imap_processing/ultra/lookup_tables/Angular_Profiles_FM90_RightSlit.csv +0 -524
- imap_processing/ultra/lookup_tables/EgyNorm.mem.csv +0 -32769
- imap_processing/ultra/lookup_tables/FM45_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
- imap_processing/ultra/lookup_tables/FM90_Startup1_ULTRA_IMGPARAMS_20240719.csv +0 -2
- imap_processing/ultra/lookup_tables/dps_grid45_compressed.cdf +0 -0
- imap_processing/ultra/lookup_tables/ultra45_back-pos-luts.csv +0 -4097
- imap_processing/ultra/lookup_tables/ultra45_tdc_norm.csv +0 -2050
- imap_processing/ultra/lookup_tables/ultra90_back-pos-luts.csv +0 -4097
- imap_processing/ultra/lookup_tables/ultra90_tdc_norm.csv +0 -2050
- imap_processing/ultra/lookup_tables/yadjust.csv +0 -257
- {imap_processing-0.17.0.dist-info → imap_processing-0.19.0.dist-info}/LICENSE +0 -0
- {imap_processing-0.17.0.dist-info → imap_processing-0.19.0.dist-info}/WHEEL +0 -0
- {imap_processing-0.17.0.dist-info → imap_processing-0.19.0.dist-info}/entry_points.txt +0 -0
|
@@ -17,6 +17,7 @@ from imap_processing.hit.l1b.constants import (
|
|
|
17
17
|
FILLVAL_FLOAT32,
|
|
18
18
|
FILLVAL_INT64,
|
|
19
19
|
LIVESTIM_PULSES,
|
|
20
|
+
SECTORS,
|
|
20
21
|
SUMMED_PARTICLE_ENERGY_RANGE_MAPPING,
|
|
21
22
|
)
|
|
22
23
|
|
|
@@ -25,7 +26,7 @@ logger = logging.getLogger(__name__)
|
|
|
25
26
|
# TODO review logging levels to use (debug vs. info)
|
|
26
27
|
|
|
27
28
|
|
|
28
|
-
def hit_l1b(
|
|
29
|
+
def hit_l1b(dependency: str | xr.Dataset, l1b_descriptor: str) -> xr.Dataset:
|
|
29
30
|
"""
|
|
30
31
|
Will process HIT data to L1B.
|
|
31
32
|
|
|
@@ -33,54 +34,56 @@ def hit_l1b(dependencies: dict) -> list[xr.Dataset]:
|
|
|
33
34
|
|
|
34
35
|
Parameters
|
|
35
36
|
----------
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
37
|
+
dependency : Union[str, xr.Dataset]
|
|
38
|
+
Dependency is either an L1A xarray dataset to process
|
|
39
|
+
science data or a file path string to an L0 file to
|
|
40
|
+
process housekeeping data.
|
|
41
|
+
l1b_descriptor : str
|
|
42
|
+
The descriptor for the L1B dataset to create.
|
|
40
43
|
|
|
41
44
|
Returns
|
|
42
45
|
-------
|
|
43
|
-
|
|
44
|
-
|
|
46
|
+
l1b_dataset : xarray.Dataset
|
|
47
|
+
The processed L1B dataset.
|
|
45
48
|
"""
|
|
46
49
|
# Create the attribute manager for this data level
|
|
47
50
|
attr_mgr = get_attribute_manager("l1b")
|
|
48
51
|
|
|
52
|
+
l1b_dataset = None
|
|
53
|
+
|
|
49
54
|
# Create L1B datasets
|
|
50
|
-
|
|
51
|
-
if "imap_hit_l0_raw" in dependencies:
|
|
55
|
+
if l1b_descriptor == "hk":
|
|
52
56
|
# Unpack ccsds file to xarray datasets
|
|
53
|
-
packet_file =
|
|
57
|
+
packet_file = dependency
|
|
54
58
|
datasets_by_apid = get_datasets_by_apid(packet_file, derived=True)
|
|
55
|
-
# TODO: update to raise error after all APIDs are included in the same
|
|
56
|
-
# raw files. currently science and housekeeping are in separate files.
|
|
57
59
|
if HitAPID.HIT_HSKP in datasets_by_apid:
|
|
58
60
|
# Process housekeeping to L1B.
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
datasets_by_apid[HitAPID.HIT_HSKP], attr_mgr, "imap_hit_l1b_hk"
|
|
62
|
-
)
|
|
61
|
+
l1b_dataset = process_housekeeping_data(
|
|
62
|
+
datasets_by_apid[HitAPID.HIT_HSKP], attr_mgr, "imap_hit_l1b_hk"
|
|
63
63
|
)
|
|
64
64
|
logger.info("HIT L1B housekeeping dataset created")
|
|
65
|
-
|
|
65
|
+
elif l1b_descriptor in ["standard-rates", "summed-rates", "sectored-rates"]:
|
|
66
66
|
# Process science data to L1B datasets
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
67
|
+
l1b_dataset = process_science_data(dependency, l1b_descriptor, attr_mgr)
|
|
68
|
+
logger.info("HIT L1B science dataset created")
|
|
69
|
+
else:
|
|
70
|
+
logger.error(f"Unsupported descriptor for L1B processing: {l1b_descriptor}")
|
|
71
|
+
raise ValueError(f"Unsupported descriptor: {l1b_descriptor}")
|
|
70
72
|
|
|
71
|
-
return
|
|
73
|
+
return l1b_dataset
|
|
72
74
|
|
|
73
75
|
|
|
74
76
|
def process_science_data(
|
|
75
|
-
l1a_counts_dataset: xr.Dataset, attr_mgr: ImapCdfAttributes
|
|
76
|
-
) ->
|
|
77
|
+
l1a_counts_dataset: xr.Dataset, descriptor: str, attr_mgr: ImapCdfAttributes
|
|
78
|
+
) -> xr.Dataset:
|
|
77
79
|
"""
|
|
78
80
|
Will create L1B science datasets for CDF products.
|
|
79
81
|
|
|
80
|
-
|
|
81
|
-
CDF creation.
|
|
82
|
+
This function processes L1A counts data to L1B science
|
|
83
|
+
data for CDF creation. There are three L1B science
|
|
82
84
|
datasets: standard rates, summed rates, and sectored rates.
|
|
83
|
-
|
|
85
|
+
This function creates one dataset based on the descriptor
|
|
86
|
+
provided. It will also update dataset attributes, coordinates and
|
|
84
87
|
data variable dimensions according to specifications in
|
|
85
88
|
a CDF yaml file.
|
|
86
89
|
|
|
@@ -88,45 +91,44 @@ def process_science_data(
|
|
|
88
91
|
----------
|
|
89
92
|
l1a_counts_dataset : xr.Dataset
|
|
90
93
|
The L1A counts dataset.
|
|
94
|
+
descriptor : str
|
|
95
|
+
The descriptor for the L1B dataset to create
|
|
96
|
+
(e.g., "standard-rates", "summed-rates", "sectored-rates").
|
|
91
97
|
attr_mgr : AttributeManager
|
|
92
98
|
The attribute manager for the L1B data level.
|
|
93
99
|
|
|
94
100
|
Returns
|
|
95
101
|
-------
|
|
96
|
-
dataset :
|
|
97
|
-
|
|
102
|
+
dataset : xarray.Dataset
|
|
103
|
+
A processed L1B science dataset.
|
|
98
104
|
"""
|
|
99
105
|
logger.info("Creating HIT L1B science datasets")
|
|
100
106
|
|
|
101
|
-
|
|
102
|
-
|
|
107
|
+
dataset = None
|
|
108
|
+
logical_source = None
|
|
103
109
|
|
|
104
110
|
# Calculate fractional livetime from the livetime counter
|
|
105
111
|
livetime = l1a_counts_dataset["livetime_counter"] / LIVESTIM_PULSES
|
|
106
112
|
livetime = livetime.rename("livetime")
|
|
107
113
|
|
|
108
|
-
# Process counts data to L1B
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
)
|
|
114
|
+
# Process counts data to an L1B dataset based on the descriptor
|
|
115
|
+
if descriptor == "standard-rates":
|
|
116
|
+
dataset = process_standard_rates_data(l1a_counts_dataset, livetime)
|
|
117
|
+
logical_source = "imap_hit_l1b_standard-rates"
|
|
118
|
+
elif descriptor == "summed-rates":
|
|
119
|
+
dataset = process_summed_rates_data(l1a_counts_dataset, livetime)
|
|
120
|
+
logical_source = "imap_hit_l1b_summed-rates"
|
|
121
|
+
elif descriptor == "sectored-rates":
|
|
122
|
+
dataset = process_sectored_rates_data(l1a_counts_dataset, livetime)
|
|
123
|
+
logical_source = "imap_hit_l1b_sectored-rates"
|
|
119
124
|
|
|
120
125
|
# Update attributes and dimensions
|
|
121
|
-
|
|
126
|
+
if dataset and logical_source:
|
|
122
127
|
dataset.attrs = attr_mgr.get_global_attributes(logical_source)
|
|
123
|
-
|
|
124
|
-
# TODO: Add CDF attributes to yaml once they're defined for L1B science data
|
|
125
|
-
# Assign attributes and dimensions to each data array in the Dataset
|
|
128
|
+
# TODO: Add CDF attributes to yaml
|
|
126
129
|
for field in dataset.data_vars.keys():
|
|
127
130
|
try:
|
|
128
|
-
# Create a dict of dimensions using the DEPEND_I keys in the
|
|
129
|
-
# attributes
|
|
131
|
+
# Create a dict of dimensions using the DEPEND_I keys in the attributes
|
|
130
132
|
dims = {
|
|
131
133
|
key: value
|
|
132
134
|
for key, value in attr_mgr.get_variable_attributes(field).items()
|
|
@@ -135,7 +137,6 @@ def process_science_data(
|
|
|
135
137
|
dataset[field].attrs = attr_mgr.get_variable_attributes(field)
|
|
136
138
|
dataset[field].assign_coords(dims)
|
|
137
139
|
except KeyError:
|
|
138
|
-
print(f"Field {field} not found in attribute manager.")
|
|
139
140
|
logger.warning(f"Field {field} not found in attribute manager.")
|
|
140
141
|
|
|
141
142
|
# Skip schema check for epoch to prevent attr_mgr from adding the
|
|
@@ -143,10 +144,9 @@ def process_science_data(
|
|
|
143
144
|
dataset.epoch.attrs = attr_mgr.get_variable_attributes(
|
|
144
145
|
"epoch", check_schema=False
|
|
145
146
|
)
|
|
146
|
-
|
|
147
147
|
logger.info(f"HIT L1B dataset created for {logical_source}")
|
|
148
148
|
|
|
149
|
-
return
|
|
149
|
+
return dataset
|
|
150
150
|
|
|
151
151
|
|
|
152
152
|
def initialize_l1b_dataset(l1a_counts_dataset: xr.Dataset, coords: list) -> xr.Dataset:
|
|
@@ -357,79 +357,11 @@ def process_summed_rates_data(
|
|
|
357
357
|
return l1b_summed_rates_dataset
|
|
358
358
|
|
|
359
359
|
|
|
360
|
-
def subset_data_for_sectored_counts(
|
|
361
|
-
l1a_counts_dataset: xr.Dataset, livetime: xr.DataArray
|
|
362
|
-
) -> tuple[xr.Dataset, xr.DataArray]:
|
|
363
|
-
"""
|
|
364
|
-
Subset data for complete sets of sectored counts and corresponding livetime values.
|
|
365
|
-
|
|
366
|
-
A set of sectored data starts with hydrogen and ends with iron and correspond to
|
|
367
|
-
the mod 10 values 0-9. The livetime values from the previous 10 minutes are used
|
|
368
|
-
to calculate the rates for each set since those counts are transmitted 10 minutes
|
|
369
|
-
after they were collected. Therefore, only complete sets of sectored counts where
|
|
370
|
-
livetime from the previous 10 minutes are available are included in the output.
|
|
371
|
-
|
|
372
|
-
Parameters
|
|
373
|
-
----------
|
|
374
|
-
l1a_counts_dataset : xr.Dataset
|
|
375
|
-
The L1A counts dataset.
|
|
376
|
-
livetime : xr.DataArray
|
|
377
|
-
1D array of livetime values calculated from the livetime counter.
|
|
378
|
-
|
|
379
|
-
Returns
|
|
380
|
-
-------
|
|
381
|
-
tuple[xr.Dataset, xr.DataArray]
|
|
382
|
-
Dataset of complete sectored counts and corresponding livetime values.
|
|
383
|
-
"""
|
|
384
|
-
# Identify 10-minute intervals of complete sectored counts.
|
|
385
|
-
bin_size = 10
|
|
386
|
-
mod_10 = l1a_counts_dataset.hdr_minute_cnt.values % 10
|
|
387
|
-
pattern = np.arange(bin_size)
|
|
388
|
-
|
|
389
|
-
# Use sliding windows to find pattern matches
|
|
390
|
-
matches = np.all(
|
|
391
|
-
np.lib.stride_tricks.sliding_window_view(mod_10, bin_size) == pattern, axis=1
|
|
392
|
-
)
|
|
393
|
-
start_indices = np.where(matches)[0]
|
|
394
|
-
|
|
395
|
-
# Filter out start indices that are less than or equal to the bin size
|
|
396
|
-
# since the previous 10 minutes are needed for calculating rates
|
|
397
|
-
if start_indices.size == 0:
|
|
398
|
-
logger.error(
|
|
399
|
-
"No data to process - valid start indices not found for "
|
|
400
|
-
"complete sectored counts."
|
|
401
|
-
)
|
|
402
|
-
raise ValueError("No valid start indices found for complete sectored counts.")
|
|
403
|
-
else:
|
|
404
|
-
start_indices = start_indices[start_indices >= bin_size]
|
|
405
|
-
|
|
406
|
-
# Subset data for complete sets of sectored counts.
|
|
407
|
-
# Each set of sectored counts is 10 minutes long, so we take the indices
|
|
408
|
-
# starting from the start indices and extend to the bin size of 10.
|
|
409
|
-
# This creates a 1D array of indices that correspond to the complete
|
|
410
|
-
# sets of sectored counts which is used to filter the L1A dataset and
|
|
411
|
-
# create the L1B sectored rates dataset.
|
|
412
|
-
data_indices = np.concatenate(
|
|
413
|
-
[np.arange(idx, idx + bin_size) for idx in start_indices]
|
|
414
|
-
)
|
|
415
|
-
l1b_sectored_rates_dataset = l1a_counts_dataset.isel(epoch=data_indices)
|
|
416
|
-
|
|
417
|
-
# Subset livetime values corresponding to the previous 10 minutes
|
|
418
|
-
# for each start index. This ensures the livetime data aligns correctly
|
|
419
|
-
# with the sectored counts for rate calculations.
|
|
420
|
-
livetime_indices = np.concatenate(
|
|
421
|
-
[np.arange(idx - bin_size, idx) for idx in start_indices]
|
|
422
|
-
)
|
|
423
|
-
livetime = livetime.isel(epoch=livetime_indices)
|
|
424
|
-
|
|
425
|
-
return l1b_sectored_rates_dataset, livetime
|
|
426
|
-
|
|
427
|
-
|
|
428
360
|
def process_sectored_rates_data(
|
|
429
361
|
l1a_counts_dataset: xr.Dataset, livetime: xr.DataArray
|
|
430
362
|
) -> xr.Dataset:
|
|
431
363
|
"""
|
|
432
|
-
Will process
|
|
364
|
+
Will process L1A raw counts data into L1B sectored rates.
|
|
433
365
|
|
|
434
366
|
A complete set of sectored counts is taken over 10 science frames (10 minutes)
|
|
435
367
|
where each science frame contains counts for one species and energy range.
|
|
@@ -444,12 +376,18 @@ def process_sectored_rates_data(
|
|
|
444
376
|
|
|
445
377
|
Sectored counts data is transmitted 10 minutes after they are collected.
|
|
446
378
|
To calculate rates, the sectored counts over 10 minutes need to be divided by
|
|
447
|
-
the sum of livetime values from the previous 10 minutes
|
|
379
|
+
the sum of livetime values from the previous 10 minutes multiplied by a factor
|
|
380
|
+
15 to account for the different inclination sectors (a single spacecraft
|
|
381
|
+
rotation is split into 15 inclination ranges). See equation 11 in the algorithm
|
|
382
|
+
document.
|
|
383
|
+
|
|
384
|
+
NOTE: The L1A counts dataset has complete sets of sectored counts and livetime is
|
|
385
|
+
already shifted to 10 minutes before the counts. This was handled in L1A processing.
|
|
448
386
|
|
|
449
387
|
Parameters
|
|
450
388
|
----------
|
|
451
389
|
l1a_counts_dataset : xr.Dataset
|
|
452
|
-
The L1A counts dataset.
|
|
390
|
+
The L1A counts dataset containing sectored counts.
|
|
453
391
|
|
|
454
392
|
livetime : xr.DataArray
|
|
455
393
|
1D array of livetime values calculated from the livetime counter.
|
|
@@ -460,10 +398,7 @@ def process_sectored_rates_data(
|
|
|
460
398
|
xr.Dataset
|
|
461
399
|
The processed L1B sectored rates dataset.
|
|
462
400
|
"""
|
|
463
|
-
# TODO
|
|
464
|
-
# -filter by epoch values in day being processed.
|
|
465
|
-
# middle epoch (or mod 5 value for 6th frame)
|
|
466
|
-
# -consider refactoring calculate_rates function to handle sectored rates
|
|
401
|
+
# TODO - consider refactoring calculate_rates function to handle sectored rates
|
|
467
402
|
|
|
468
403
|
# Define particles and coordinates
|
|
469
404
|
particles = ["h", "he4", "cno", "nemgsi", "fe"]
|
|
@@ -475,11 +410,6 @@ def process_sectored_rates_data(
|
|
|
475
410
|
if any(str(var).startswith(f"{p}_") for p in particles)
|
|
476
411
|
]
|
|
477
412
|
|
|
478
|
-
# Subset data for complete sets of sectored counts and corresponding livetime values
|
|
479
|
-
l1a_counts_dataset, livetime = subset_data_for_sectored_counts(
|
|
480
|
-
l1a_counts_dataset, livetime
|
|
481
|
-
)
|
|
482
|
-
|
|
483
413
|
# Sum livetime over 10 minute intervals
|
|
484
414
|
livetime_10min = sum_livetime_10min(livetime)
|
|
485
415
|
|
|
@@ -520,7 +450,7 @@ def process_sectored_rates_data(
|
|
|
520
450
|
rates = xr.DataArray(
|
|
521
451
|
np.where(
|
|
522
452
|
counts != FILLVAL_INT64,
|
|
523
|
-
(counts / livetime_10min_reshaped).astype(np.float32),
|
|
453
|
+
(counts / (SECTORS * livetime_10min_reshaped)).astype(np.float32),
|
|
524
454
|
FILLVAL_FLOAT32,
|
|
525
455
|
),
|
|
526
456
|
dims=l1a_counts_dataset[var].dims,
|
|
@@ -169,7 +169,7 @@ STANDARD_PARTICLE_ENERGY_RANGE_MAPPING = {
|
|
|
169
169
|
{"energy_min": 5.0, "energy_max": 6.0, "R2": [59], "R3": [], "R4": []},
|
|
170
170
|
{"energy_min": 6.0, "energy_max": 8.0, "R2": [60], "R3": [63], "R4": []},
|
|
171
171
|
{"energy_min": 8.0, "energy_max": 10.0, "R2": [61], "R3": [64], "R4": []},
|
|
172
|
-
{"energy_min": 10.0, "energy_max": 12.0, "R2": [], "R3": [65], "R4": []},
|
|
172
|
+
{"energy_min": 10.0, "energy_max": 12.0, "R2": [62], "R3": [65], "R4": []},
|
|
173
173
|
{"energy_min": 12.0, "energy_max": 15.0, "R2": [], "R3": [66], "R4": []},
|
|
174
174
|
{"energy_min": 15.0, "energy_max": 21.0, "R2": [], "R3": [67], "R4": []},
|
|
175
175
|
{"energy_min": 21.0, "energy_max": 27.0, "R2": [], "R3": [68], "R4": []},
|
imap_processing/hit/l2/hit_l2.py
CHANGED
|
@@ -27,16 +27,16 @@ logger = logging.getLogger(__name__)
|
|
|
27
27
|
# - review logging levels to use (debug vs. info)
|
|
28
28
|
|
|
29
29
|
|
|
30
|
-
def hit_l2(dependency_sci: xr.Dataset, dependencies_anc: list) ->
|
|
30
|
+
def hit_l2(dependency_sci: xr.Dataset, dependencies_anc: list) -> xr.Dataset:
|
|
31
31
|
"""
|
|
32
|
-
Will process HIT data to L2.
|
|
32
|
+
Will process HIT L1B data to L2.
|
|
33
33
|
|
|
34
34
|
Processes dependencies needed to create L2 data products.
|
|
35
35
|
|
|
36
36
|
Parameters
|
|
37
37
|
----------
|
|
38
38
|
dependency_sci : xr.Dataset
|
|
39
|
-
L1B
|
|
39
|
+
L1B dataset that is either summed rates
|
|
40
40
|
standard rates or sector rates.
|
|
41
41
|
|
|
42
42
|
dependencies_anc : list
|
|
@@ -44,8 +44,8 @@ def hit_l2(dependency_sci: xr.Dataset, dependencies_anc: list) -> list[xr.Datase
|
|
|
44
44
|
|
|
45
45
|
Returns
|
|
46
46
|
-------
|
|
47
|
-
|
|
48
|
-
|
|
47
|
+
l2_dataset : xarray.Dataset
|
|
48
|
+
The processed L2 dataset from the dependency dataset provided.
|
|
49
49
|
"""
|
|
50
50
|
logger.info("Creating HIT L2 science dataset")
|
|
51
51
|
|
|
@@ -74,7 +74,7 @@ def hit_l2(dependency_sci: xr.Dataset, dependencies_anc: list) -> list[xr.Datase
|
|
|
74
74
|
|
|
75
75
|
logger.info(f"HIT L2 dataset created for {logical_source}")
|
|
76
76
|
|
|
77
|
-
return
|
|
77
|
+
return l2_dataset
|
|
78
78
|
|
|
79
79
|
|
|
80
80
|
def add_cdf_attributes(
|
|
@@ -95,11 +95,11 @@ def add_cdf_attributes(
|
|
|
95
95
|
|
|
96
96
|
Parameters
|
|
97
97
|
----------
|
|
98
|
-
dataset :
|
|
98
|
+
dataset : xarray.Dataset
|
|
99
99
|
The dataset to update.
|
|
100
100
|
logical_source : str
|
|
101
101
|
The logical source of the dataset.
|
|
102
|
-
attr_mgr :
|
|
102
|
+
attr_mgr : ImapCdfAttributes
|
|
103
103
|
The attribute manager to retrieve attributes.
|
|
104
104
|
|
|
105
105
|
Returns
|
|
@@ -132,12 +132,11 @@ def add_cdf_attributes(
|
|
|
132
132
|
# check_schema=False to avoid attr_mgr adding stuff dimensions don't need
|
|
133
133
|
for dim in dataset.dims:
|
|
134
134
|
dataset[dim].attrs = attr_mgr.get_variable_attributes(dim, check_schema=False)
|
|
135
|
-
# TODO: should labels be added as coordinates? Check with SPDF
|
|
136
135
|
if dim != "epoch":
|
|
137
136
|
label_array = xr.DataArray(
|
|
138
137
|
dataset[dim].values.astype(str),
|
|
139
138
|
name=f"{dim}_label",
|
|
140
|
-
dims=[
|
|
139
|
+
dims=[dim],
|
|
141
140
|
attrs=attr_mgr.get_variable_attributes(
|
|
142
141
|
f"{dim}_label", check_schema=False
|
|
143
142
|
),
|
|
@@ -633,7 +632,7 @@ def process_summed_intensity(
|
|
|
633
632
|
summed_intensity_dataset = add_total_uncertainties(
|
|
634
633
|
summed_intensity_dataset, var
|
|
635
634
|
)
|
|
636
|
-
# Expand the variable name to include
|
|
635
|
+
# Expand the variable name to include summed intensity
|
|
637
636
|
summed_intensity_dataset = summed_intensity_dataset.rename(
|
|
638
637
|
{var: f"{var}_summed_intensity"}
|
|
639
638
|
)
|
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
"""Packet ingest and tcp connection times for each station."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from datetime import datetime, timedelta, timezone
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from imap_processing.ialirt.constants import STATIONS
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def find_tcp_connections(
|
|
13
|
+
start_file_creation: datetime,
|
|
14
|
+
end_file_creation: datetime,
|
|
15
|
+
lines: list,
|
|
16
|
+
realtime_summary: dict,
|
|
17
|
+
) -> dict:
|
|
18
|
+
"""
|
|
19
|
+
Find tcp connection time ranges for ground station from log lines.
|
|
20
|
+
|
|
21
|
+
Parameters
|
|
22
|
+
----------
|
|
23
|
+
start_file_creation : datetime
|
|
24
|
+
File creation time of last file minus 48 hrs.
|
|
25
|
+
end_file_creation : datetime
|
|
26
|
+
File creation time of last file.
|
|
27
|
+
lines : list
|
|
28
|
+
All lines of log files.
|
|
29
|
+
realtime_summary : dict
|
|
30
|
+
Input dictionary containing ingest parameters.
|
|
31
|
+
|
|
32
|
+
Returns
|
|
33
|
+
-------
|
|
34
|
+
realtime_summary : dict
|
|
35
|
+
Output dictionary with tcp connection info.
|
|
36
|
+
"""
|
|
37
|
+
current_starts: dict[str, datetime | None] = {}
|
|
38
|
+
|
|
39
|
+
for line in lines:
|
|
40
|
+
if "antenna partner connection is" not in line:
|
|
41
|
+
continue
|
|
42
|
+
|
|
43
|
+
timestamp_str = line.split(" ")[0]
|
|
44
|
+
msg = " ".join(line.split(" ")[1:])
|
|
45
|
+
station = msg.split(" antenna")[0]
|
|
46
|
+
|
|
47
|
+
if station not in realtime_summary["connection_times"]:
|
|
48
|
+
realtime_summary["connection_times"][station] = []
|
|
49
|
+
if station not in realtime_summary["stations"]:
|
|
50
|
+
realtime_summary["stations"].append(station)
|
|
51
|
+
|
|
52
|
+
timestamp = datetime.strptime(timestamp_str, "%Y/%j-%H:%M:%S.%f")
|
|
53
|
+
|
|
54
|
+
if f"{station} antenna partner connection is up." in line:
|
|
55
|
+
current_starts[station] = timestamp
|
|
56
|
+
|
|
57
|
+
elif f"{station} antenna partner connection is down!" in line:
|
|
58
|
+
start = current_starts.get(station)
|
|
59
|
+
if start is not None:
|
|
60
|
+
realtime_summary["connection_times"][station].append(
|
|
61
|
+
{
|
|
62
|
+
"start": datetime.isoformat(start),
|
|
63
|
+
"end": datetime.isoformat(timestamp),
|
|
64
|
+
}
|
|
65
|
+
)
|
|
66
|
+
current_starts[station] = None
|
|
67
|
+
else:
|
|
68
|
+
# No matching "up"
|
|
69
|
+
realtime_summary["connection_times"][station].append(
|
|
70
|
+
{
|
|
71
|
+
"start": datetime.isoformat(start_file_creation),
|
|
72
|
+
"end": datetime.isoformat(timestamp),
|
|
73
|
+
}
|
|
74
|
+
)
|
|
75
|
+
current_starts[station] = None
|
|
76
|
+
|
|
77
|
+
# Handle hanging "up" at the end of file
|
|
78
|
+
for station, start in current_starts.items():
|
|
79
|
+
if start is not None:
|
|
80
|
+
realtime_summary["connection_times"][station].append(
|
|
81
|
+
{
|
|
82
|
+
"start": datetime.isoformat(start),
|
|
83
|
+
"end": datetime.isoformat(end_file_creation),
|
|
84
|
+
}
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# Filter out connection windows that are completely outside the time window
|
|
88
|
+
for station in realtime_summary["connection_times"]:
|
|
89
|
+
realtime_summary["connection_times"][station] = [
|
|
90
|
+
window
|
|
91
|
+
for window in realtime_summary["connection_times"][station]
|
|
92
|
+
if datetime.fromisoformat(window["end"]) >= start_file_creation
|
|
93
|
+
and datetime.fromisoformat(window["start"]) <= end_file_creation
|
|
94
|
+
]
|
|
95
|
+
|
|
96
|
+
return realtime_summary
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def packets_created(start_file_creation: datetime, lines: list) -> list:
|
|
100
|
+
"""
|
|
101
|
+
Find timestamps when packets were created based on log lines.
|
|
102
|
+
|
|
103
|
+
Parameters
|
|
104
|
+
----------
|
|
105
|
+
start_file_creation : datetime
|
|
106
|
+
File creation time of last file minus 48 hrs.
|
|
107
|
+
lines : list
|
|
108
|
+
All lines of log files.
|
|
109
|
+
|
|
110
|
+
Returns
|
|
111
|
+
-------
|
|
112
|
+
packet_times : list
|
|
113
|
+
List of datetime objects when packets were created.
|
|
114
|
+
"""
|
|
115
|
+
packet_times = []
|
|
116
|
+
|
|
117
|
+
for line in lines:
|
|
118
|
+
if "Renamed iois_1_packets" in line:
|
|
119
|
+
timestamp_str = line.split(" ")[0]
|
|
120
|
+
timestamp = datetime.strptime(timestamp_str, "%Y/%j-%H:%M:%S.%f")
|
|
121
|
+
# Possible that data extends further than 48 hrs in the past.
|
|
122
|
+
if timestamp >= start_file_creation:
|
|
123
|
+
packet_times.append(timestamp)
|
|
124
|
+
|
|
125
|
+
return packet_times
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def format_ingest_data(last_filename: str, log_lines: list) -> dict:
|
|
129
|
+
"""
|
|
130
|
+
Format TCP connection and packet ingest data from multiple log files.
|
|
131
|
+
|
|
132
|
+
Parameters
|
|
133
|
+
----------
|
|
134
|
+
last_filename : str
|
|
135
|
+
Log file that is last chronologically.
|
|
136
|
+
log_lines : list[str]
|
|
137
|
+
Combined lines from all log files (assumed already sorted by time).
|
|
138
|
+
|
|
139
|
+
Returns
|
|
140
|
+
-------
|
|
141
|
+
realtime_summary : dict
|
|
142
|
+
Structured output with TCP connection windows per station
|
|
143
|
+
and global packet ingest timestamps.
|
|
144
|
+
|
|
145
|
+
Notes
|
|
146
|
+
-----
|
|
147
|
+
Example output:
|
|
148
|
+
{
|
|
149
|
+
"summary": "I-ALiRT Real-time Ingest Summary",
|
|
150
|
+
"generated": "2025-08-07T21:36:09Z",
|
|
151
|
+
"time_format": "UTC (ISOC)",
|
|
152
|
+
"stations": [
|
|
153
|
+
"Kiel"
|
|
154
|
+
],
|
|
155
|
+
"time_range": [
|
|
156
|
+
"2025-07-30T23:00:00",
|
|
157
|
+
"2025-07-31T02:00:00"
|
|
158
|
+
],
|
|
159
|
+
"packet_ingest": [
|
|
160
|
+
"2025-07-31T00:00:00",
|
|
161
|
+
"2025-07-31T02:01:00"
|
|
162
|
+
],
|
|
163
|
+
"connection_times": {
|
|
164
|
+
"Kiel": [
|
|
165
|
+
{
|
|
166
|
+
"start": "2025-07-30T23:00:00",
|
|
167
|
+
"end": "2025-07-31T00:15:00"
|
|
168
|
+
},
|
|
169
|
+
{
|
|
170
|
+
"start": "2025-07-31T02:00:00",
|
|
171
|
+
"end": "2025-07-31T02:00:00"
|
|
172
|
+
}
|
|
173
|
+
]
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
where time_range is the overall time range of the data,
|
|
178
|
+
packet_ingest contains timestamps when packets were finalized,
|
|
179
|
+
and tcp contains connection windows for each station.
|
|
180
|
+
"""
|
|
181
|
+
# File creation time.
|
|
182
|
+
last_timestamp_str = last_filename.split(".")[2]
|
|
183
|
+
last_timestamp_str = last_timestamp_str.replace("_", ":")
|
|
184
|
+
end_of_time = datetime.strptime(last_timestamp_str, "%Y-%jT%H:%M:%S")
|
|
185
|
+
|
|
186
|
+
# File creation time of last file minus 48 hrs.
|
|
187
|
+
start_of_time = datetime.strptime(last_timestamp_str, "%Y-%jT%H:%M:%S") - timedelta(
|
|
188
|
+
hours=48
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
realtime_summary: dict[str, Any] = {
|
|
192
|
+
"summary": "I-ALiRT Real-time Ingest Summary",
|
|
193
|
+
"generated": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
|
|
194
|
+
"time_format": "UTC (ISOC)",
|
|
195
|
+
"stations": list(STATIONS),
|
|
196
|
+
"time_range": [
|
|
197
|
+
start_of_time.isoformat(),
|
|
198
|
+
end_of_time.isoformat(),
|
|
199
|
+
], # Overall time range of the data
|
|
200
|
+
"packet_ingest": [], # Global packet ingest times
|
|
201
|
+
"connection_times": {
|
|
202
|
+
station: [] for station in list(STATIONS)
|
|
203
|
+
}, # Per-station TCP connection windows
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
# TCP connection data for each station
|
|
207
|
+
realtime_summary = find_tcp_connections(
|
|
208
|
+
start_of_time, end_of_time, log_lines, realtime_summary
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
# Global packet ingest timestamps
|
|
212
|
+
packet_times = packets_created(start_of_time, log_lines)
|
|
213
|
+
realtime_summary["packet_ingest"] = [
|
|
214
|
+
pkt_time.isoformat() for pkt_time in packet_times
|
|
215
|
+
]
|
|
216
|
+
|
|
217
|
+
logger.info(f"Created ingest files for {realtime_summary['time_range']}")
|
|
218
|
+
|
|
219
|
+
return realtime_summary
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""Module for constants and useful shared classes used in I-ALiRT processing."""
|
|
2
2
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
|
+
from typing import NamedTuple
|
|
4
5
|
|
|
5
6
|
import numpy as np
|
|
6
7
|
|
|
@@ -32,7 +33,37 @@ class IalirtSwapiConstants:
|
|
|
32
33
|
boltz = 1.380649e-23 # Boltzmann constant, J/K
|
|
33
34
|
at_mass = 1.6605390666e-27 # atomic mass, kg
|
|
34
35
|
prot_mass = 1.007276466621 * at_mass # mass of proton, kg
|
|
35
|
-
eff_area =
|
|
36
|
+
eff_area = 1.633e-4 * 1e-4 # effective area, cm2 to meters squared
|
|
36
37
|
az_fov = np.deg2rad(30) # azimuthal width of the field of view, radians
|
|
37
38
|
fwhm_width = 0.085 # FWHM of energy width
|
|
38
39
|
speed_ew = 0.5 * fwhm_width # speed width of energy passband
|
|
40
|
+
e_charge = 1.602176634e-19 # electronic charge, [C]
|
|
41
|
+
speed_coeff = np.sqrt(2 * e_charge / prot_mass) / 1e3
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class StationProperties(NamedTuple):
|
|
45
|
+
"""Class that represents properties of ground stations."""
|
|
46
|
+
|
|
47
|
+
longitude: float # longitude in degrees
|
|
48
|
+
latitude: float # latitude in degrees
|
|
49
|
+
altitude: float # altitude in kilometers
|
|
50
|
+
min_elevation_deg: float # minimum elevation angle in degrees
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# Verified by Kiel and KSWC Observatory staff.
|
|
54
|
+
# Notes: the KSWC station is not yet operational,
|
|
55
|
+
# but will have the following properties:
|
|
56
|
+
# "KSWC": StationProperties(
|
|
57
|
+
# longitude=126.2958, # degrees East
|
|
58
|
+
# latitude=33.4273, # degrees North
|
|
59
|
+
# altitude=0.1, # approx 100 meters
|
|
60
|
+
# min_elevation_deg=5, # 5 degrees is the requirement
|
|
61
|
+
# ),
|
|
62
|
+
STATIONS = {
|
|
63
|
+
"Kiel": StationProperties(
|
|
64
|
+
longitude=10.1808, # degrees East
|
|
65
|
+
latitude=54.2632, # degrees North
|
|
66
|
+
altitude=0.1, # approx 100 meters
|
|
67
|
+
min_elevation_deg=5, # 5 degrees is the requirement
|
|
68
|
+
)
|
|
69
|
+
}
|