imap-processing 0.15.0__py3-none-any.whl → 0.16.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml +1404 -93
- imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +113 -130
- imap_processing/cli.py +1 -4
- imap_processing/codice/codice_l1a.py +87 -62
- imap_processing/codice/codice_l2.py +0 -8
- imap_processing/codice/constants.py +16 -5
- imap_processing/hi/hi_l1a.py +447 -0
- imap_processing/hi/{l1b/hi_l1b.py → hi_l1b.py} +1 -1
- imap_processing/hi/{l1c/hi_l1c.py → hi_l1c.py} +21 -21
- imap_processing/hi/{l2/hi_l2.py → hi_l2.py} +13 -13
- imap_processing/hi/utils.py +10 -9
- imap_processing/hit/l0/constants.py +3 -1
- imap_processing/hit/l0/decom_hit.py +45 -11
- imap_processing/hit/l1a/hit_l1a.py +31 -24
- imap_processing/hit/l1b/hit_l1b.py +30 -11
- imap_processing/hit/l2/hit_l2.py +8 -11
- imap_processing/ialirt/constants.py +38 -0
- imap_processing/ialirt/l0/parse_mag.py +1 -1
- imap_processing/ialirt/l0/process_codice.py +91 -0
- imap_processing/ialirt/l0/process_hit.py +12 -21
- imap_processing/ialirt/l0/process_swapi.py +172 -23
- imap_processing/ialirt/l0/process_swe.py +3 -10
- imap_processing/ialirt/utils/constants.py +16 -2
- imap_processing/ialirt/utils/create_xarray.py +59 -11
- imap_processing/ultra/utils/ultra_l1_utils.py +4 -2
- {imap_processing-0.15.0.dist-info → imap_processing-0.16.1.dist-info}/METADATA +1 -1
- {imap_processing-0.15.0.dist-info → imap_processing-0.16.1.dist-info}/RECORD +31 -37
- imap_processing/hi/l1a/__init__.py +0 -0
- imap_processing/hi/l1a/hi_l1a.py +0 -98
- imap_processing/hi/l1a/histogram.py +0 -152
- imap_processing/hi/l1a/science_direct_event.py +0 -214
- imap_processing/hi/l1b/__init__.py +0 -0
- imap_processing/hi/l1c/__init__.py +0 -0
- imap_processing/hi/l2/__init__.py +0 -0
- imap_processing/ialirt/l0/process_codicehi.py +0 -156
- imap_processing/ialirt/l0/process_codicelo.py +0 -41
- {imap_processing-0.15.0.dist-info → imap_processing-0.16.1.dist-info}/LICENSE +0 -0
- {imap_processing-0.15.0.dist-info → imap_processing-0.16.1.dist-info}/WHEEL +0 -0
- {imap_processing-0.15.0.dist-info → imap_processing-0.16.1.dist-info}/entry_points.txt +0 -0
|
@@ -100,7 +100,9 @@ COUNTS_DATA_STRUCTURE = {
|
|
|
100
100
|
"penfgrates": HITPacking(16, 528, (33,)), # range 4 foreground rates
|
|
101
101
|
"penbgrates": HITPacking(16, 240, (15,)), # range 4 background rates
|
|
102
102
|
"ialirtrates": HITPacking(16, 320, (20,)), # ialirt rates
|
|
103
|
-
"sectorates": HITPacking(
|
|
103
|
+
"sectorates": HITPacking(
|
|
104
|
+
16, 1920, (8, 15)
|
|
105
|
+
), # sectored rates (8 zenith angles, 15 azimuth angles)
|
|
104
106
|
"l4fgrates": HITPacking(16, 768, (48,)), # all range foreground rates
|
|
105
107
|
"l4bgrates": HITPacking(16, 384, (24,)), # all range foreground rates
|
|
106
108
|
}
|
|
@@ -89,11 +89,14 @@ def parse_count_rates(sci_dataset: xr.Dataset) -> None:
|
|
|
89
89
|
if all(x not in field for x in ["hdr", "spare", "pha"]):
|
|
90
90
|
parsed_data = np.vectorize(decompress_rates_16_to_32)(parsed_data)
|
|
91
91
|
|
|
92
|
-
# Get dims for data variables
|
|
92
|
+
# Get dims for data variables
|
|
93
93
|
if len(field_meta.shape) > 1:
|
|
94
94
|
if "sectorates" in field:
|
|
95
|
-
# Reshape data
|
|
95
|
+
# Reshape data into (num_frames, 8, 15) for zenith and azimuth
|
|
96
|
+
# look directions
|
|
96
97
|
parsed_data = np.array(parsed_data).reshape((-1, *field_meta.shape))
|
|
98
|
+
# Transpose data to (num_frames, 15, 8) for flipped look directions
|
|
99
|
+
parsed_data = np.transpose(parsed_data, axes=(0, 2, 1))
|
|
97
100
|
dims = ["epoch", "azimuth", "zenith"]
|
|
98
101
|
# Add angle values to coordinates
|
|
99
102
|
sci_dataset.coords["zenith"] = xr.DataArray(
|
|
@@ -202,9 +205,12 @@ def update_ccsds_header_dims(sci_dataset: xr.Dataset) -> xr.Dataset:
|
|
|
202
205
|
it will be updated later in the process to represent
|
|
203
206
|
time per science frame, so another time dimension is
|
|
204
207
|
needed for the ccsds header fields.This function
|
|
205
|
-
updates the dimension for
|
|
206
|
-
instead of epoch.
|
|
207
|
-
|
|
208
|
+
updates the dimension for all data vars to use sc_tick
|
|
209
|
+
instead of epoch. It also temporarily sets sc_tick as the
|
|
210
|
+
dimension for the epoch coordinate (to be updated later
|
|
211
|
+
in the assemble_science_frames function).
|
|
212
|
+
|
|
213
|
+
Note: sc_tick is the time the packet was created.
|
|
208
214
|
|
|
209
215
|
Parameters
|
|
210
216
|
----------
|
|
@@ -294,7 +300,7 @@ def assemble_science_frames(sci_dataset: xr.Dataset) -> xr.Dataset:
|
|
|
294
300
|
# Extract data per science frame and organize by L1A data products
|
|
295
301
|
count_rates = []
|
|
296
302
|
pha = []
|
|
297
|
-
epoch_per_science_frame =
|
|
303
|
+
epoch_per_science_frame = []
|
|
298
304
|
for idx in starting_indices:
|
|
299
305
|
# Data from 20 packets in a science frame
|
|
300
306
|
science_data_frame = science_data[idx : idx + FRAME_SIZE]
|
|
@@ -302,12 +308,15 @@ def assemble_science_frames(sci_dataset: xr.Dataset) -> xr.Dataset:
|
|
|
302
308
|
count_rates.append("".join(science_data_frame[:6]))
|
|
303
309
|
# Last 14 packets contain pulse height event data in binary
|
|
304
310
|
pha.append("".join(science_data_frame[6:]))
|
|
305
|
-
# Get
|
|
306
|
-
epoch_per_science_frame
|
|
311
|
+
# Get the mean epoch in the frame to use as the data collection time
|
|
312
|
+
epoch_per_science_frame.append(
|
|
313
|
+
calculate_epoch_mean(epoch_data, idx, FRAME_SIZE)
|
|
314
|
+
)
|
|
307
315
|
|
|
308
|
-
# Add new data variables to the dataset
|
|
309
|
-
sci_dataset =
|
|
310
|
-
|
|
316
|
+
# Add new data variables to the dataset and update epoch coordinate
|
|
317
|
+
sci_dataset.coords["epoch"] = xr.DataArray(
|
|
318
|
+
np.array(epoch_per_science_frame, dtype=np.int64), dims=["epoch"]
|
|
319
|
+
)
|
|
311
320
|
sci_dataset["count_rates_raw"] = xr.DataArray(
|
|
312
321
|
count_rates, dims=["epoch"], name="count_rates_raw"
|
|
313
322
|
)
|
|
@@ -373,6 +382,31 @@ def decompress_rates_16_to_32(packed: int) -> int:
|
|
|
373
382
|
return decompressed_int
|
|
374
383
|
|
|
375
384
|
|
|
385
|
+
def calculate_epoch_mean(
|
|
386
|
+
epoch_data: np.ndarray, idx: int, frame_size: int
|
|
387
|
+
) -> np.floating:
|
|
388
|
+
"""
|
|
389
|
+
Calculate the mean epoch for a science frame.
|
|
390
|
+
|
|
391
|
+
This function is used to get the center collection time for science data.
|
|
392
|
+
|
|
393
|
+
Parameters
|
|
394
|
+
----------
|
|
395
|
+
epoch_data : np.ndarray
|
|
396
|
+
Array of epoch values for every science packet.
|
|
397
|
+
idx : int
|
|
398
|
+
Starting index of the science frame.
|
|
399
|
+
frame_size : int
|
|
400
|
+
Number of packets in the science frame.
|
|
401
|
+
|
|
402
|
+
Returns
|
|
403
|
+
-------
|
|
404
|
+
float
|
|
405
|
+
Mean epoch value for the science frame.
|
|
406
|
+
"""
|
|
407
|
+
return np.mean([epoch_data[idx], epoch_data[idx + frame_size - 1]])
|
|
408
|
+
|
|
409
|
+
|
|
376
410
|
def decom_hit(sci_dataset: xr.Dataset) -> xr.Dataset:
|
|
377
411
|
"""
|
|
378
412
|
Group and decode HIT science data packets.
|
|
@@ -13,7 +13,11 @@ from imap_processing.hit.hit_utils import (
|
|
|
13
13
|
get_datasets_by_apid,
|
|
14
14
|
process_housekeeping_data,
|
|
15
15
|
)
|
|
16
|
-
from imap_processing.hit.l0.constants import
|
|
16
|
+
from imap_processing.hit.l0.constants import (
|
|
17
|
+
AZIMUTH_ANGLES,
|
|
18
|
+
MOD_10_MAPPING,
|
|
19
|
+
ZENITH_ANGLES,
|
|
20
|
+
)
|
|
17
21
|
from imap_processing.hit.l0.decom_hit import decom_hit
|
|
18
22
|
|
|
19
23
|
logger = logging.getLogger(__name__)
|
|
@@ -104,12 +108,16 @@ def subcom_sectorates(sci_dataset: xr.Dataset) -> xr.Dataset:
|
|
|
104
108
|
hdr_min_count_mod_10 = updated_dataset.hdr_minute_cnt.values % 10
|
|
105
109
|
|
|
106
110
|
# Reference mod 10 mapping to initialize data structure for species and
|
|
107
|
-
# energy ranges and add
|
|
111
|
+
# energy ranges and add arrays with fill values for each science frame.
|
|
108
112
|
num_frames = len(hdr_min_count_mod_10)
|
|
109
113
|
data_by_species_and_energy_range = {
|
|
110
114
|
key: {
|
|
111
115
|
**value,
|
|
112
|
-
"counts": np.full(
|
|
116
|
+
"counts": np.full(
|
|
117
|
+
(num_frames, len(AZIMUTH_ANGLES), len(ZENITH_ANGLES)),
|
|
118
|
+
fill_value=fillval,
|
|
119
|
+
dtype=np.int64,
|
|
120
|
+
),
|
|
113
121
|
}
|
|
114
122
|
for key, value in MOD_10_MAPPING.items()
|
|
115
123
|
}
|
|
@@ -279,39 +287,38 @@ def process_science(
|
|
|
279
287
|
# Calculate uncertainties for count rates
|
|
280
288
|
count_rates_dataset = calculate_uncertainties(count_rates_dataset)
|
|
281
289
|
|
|
282
|
-
|
|
283
|
-
|
|
290
|
+
l1a_datasets: dict = {
|
|
291
|
+
"imap_hit_l1a_counts": count_rates_dataset,
|
|
292
|
+
"imap_hit_l1a_direct-events": pha_raw_dataset,
|
|
293
|
+
}
|
|
284
294
|
|
|
285
|
-
datasets = []
|
|
286
295
|
# Update attributes and dimensions
|
|
287
|
-
for
|
|
288
|
-
[count_rates_dataset, pha_raw_dataset], logical_sources
|
|
289
|
-
):
|
|
296
|
+
for logical_source, ds in l1a_datasets.items():
|
|
290
297
|
ds.attrs = attr_mgr.get_global_attributes(logical_source)
|
|
291
298
|
|
|
292
|
-
# TODO: Add CDF attributes to yaml once they're defined for L1A science data
|
|
293
299
|
# Assign attributes and dimensions to each data array in the Dataset
|
|
294
300
|
for field in ds.data_vars.keys():
|
|
295
301
|
try:
|
|
296
|
-
# Create a dict of dimensions using the DEPEND_I keys in the
|
|
297
|
-
# attributes
|
|
298
|
-
dims = {
|
|
299
|
-
key: value
|
|
300
|
-
for key, value in attr_mgr.get_variable_attributes(field).items()
|
|
301
|
-
if "DEPEND" in key
|
|
302
|
-
}
|
|
303
302
|
ds[field].attrs = attr_mgr.get_variable_attributes(field)
|
|
304
|
-
ds[field].assign_coords(dims)
|
|
305
303
|
except KeyError:
|
|
306
304
|
print(f"Field {field} not found in attribute manager.")
|
|
307
305
|
logger.warning(f"Field {field} not found in attribute manager.")
|
|
308
306
|
|
|
309
|
-
#
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
307
|
+
# check_schema=False to avoid attr_mgr adding stuff dimensions don't need
|
|
308
|
+
for dim in ds.dims:
|
|
309
|
+
ds[dim].attrs = attr_mgr.get_variable_attributes(dim, check_schema=False)
|
|
310
|
+
# TODO: should labels be added as coordinates? Check with SPDF
|
|
311
|
+
if dim != "epoch":
|
|
312
|
+
label_array = xr.DataArray(
|
|
313
|
+
ds[dim].values.astype(str),
|
|
314
|
+
name=f"{dim}_label",
|
|
315
|
+
dims=[dim],
|
|
316
|
+
attrs=attr_mgr.get_variable_attributes(
|
|
317
|
+
f"{dim}_label", check_schema=False
|
|
318
|
+
),
|
|
319
|
+
)
|
|
320
|
+
ds.coords[f"{dim}_label"] = label_array
|
|
314
321
|
|
|
315
322
|
logger.info(f"HIT L1A dataset created for {logical_source}")
|
|
316
323
|
|
|
317
|
-
return
|
|
324
|
+
return list(l1a_datasets.values())
|
|
@@ -366,7 +366,8 @@ def subset_data_for_sectored_counts(
|
|
|
366
366
|
A set of sectored data starts with hydrogen and ends with iron and correspond to
|
|
367
367
|
the mod 10 values 0-9. The livetime values from the previous 10 minutes are used
|
|
368
368
|
to calculate the rates for each set since those counts are transmitted 10 minutes
|
|
369
|
-
after they were collected.
|
|
369
|
+
after they were collected. Therefore, only complete sets of sectored counts where
|
|
370
|
+
livetime from the previous 10 minutes are available are included in the output.
|
|
370
371
|
|
|
371
372
|
Parameters
|
|
372
373
|
----------
|
|
@@ -378,7 +379,7 @@ def subset_data_for_sectored_counts(
|
|
|
378
379
|
Returns
|
|
379
380
|
-------
|
|
380
381
|
tuple[xr.Dataset, xr.DataArray]
|
|
381
|
-
|
|
382
|
+
Dataset of complete sectored counts and corresponding livetime values.
|
|
382
383
|
"""
|
|
383
384
|
# Identify 10-minute intervals of complete sectored counts.
|
|
384
385
|
bin_size = 10
|
|
@@ -392,16 +393,34 @@ def subset_data_for_sectored_counts(
|
|
|
392
393
|
start_indices = np.where(matches)[0]
|
|
393
394
|
|
|
394
395
|
# Filter out start indices that are less than or equal to the bin size
|
|
395
|
-
# since the previous 10 minutes are needed
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
396
|
+
# since the previous 10 minutes are needed for calculating rates
|
|
397
|
+
if start_indices.size == 0:
|
|
398
|
+
logger.error(
|
|
399
|
+
"No data to process - valid start indices not found for "
|
|
400
|
+
"complete sectored counts."
|
|
401
|
+
)
|
|
402
|
+
raise ValueError("No valid start indices found for complete sectored counts.")
|
|
403
|
+
else:
|
|
404
|
+
start_indices = start_indices[start_indices >= bin_size]
|
|
405
|
+
|
|
406
|
+
# Subset data for complete sets of sectored counts.
|
|
407
|
+
# Each set of sectored counts is 10 minutes long, so we take the indices
|
|
408
|
+
# starting from the start indices and extend to the bin size of 10.
|
|
409
|
+
# This creates a 1D array of indices that correspond to the complete
|
|
410
|
+
# sets of sectored counts which is used to filter the L1A dataset and
|
|
411
|
+
# create the L1B sectored rates dataset.
|
|
412
|
+
data_indices = np.concatenate(
|
|
413
|
+
[np.arange(idx, idx + bin_size) for idx in start_indices]
|
|
414
|
+
)
|
|
415
|
+
l1b_sectored_rates_dataset = l1a_counts_dataset.isel(epoch=data_indices)
|
|
401
416
|
|
|
402
|
-
# Subset livetime
|
|
403
|
-
|
|
404
|
-
|
|
417
|
+
# Subset livetime values corresponding to the previous 10 minutes
|
|
418
|
+
# for each start index. This ensures the livetime data aligns correctly
|
|
419
|
+
# with the sectored counts for rate calculations.
|
|
420
|
+
livetime_indices = np.concatenate(
|
|
421
|
+
[np.arange(idx - bin_size, idx) for idx in start_indices]
|
|
422
|
+
)
|
|
423
|
+
livetime = livetime.isel(epoch=livetime_indices)
|
|
405
424
|
|
|
406
425
|
return l1b_sectored_rates_dataset, livetime
|
|
407
426
|
|
imap_processing/hit/l2/hit_l2.py
CHANGED
|
@@ -134,18 +134,15 @@ def add_cdf_attributes(
|
|
|
134
134
|
dataset[dim].attrs = attr_mgr.get_variable_attributes(dim, check_schema=False)
|
|
135
135
|
# TODO: should labels be added as coordinates? Check with SPDF
|
|
136
136
|
if dim != "epoch":
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
f"{dim}_label", check_schema=False
|
|
145
|
-
),
|
|
146
|
-
)
|
|
147
|
-
}
|
|
137
|
+
label_array = xr.DataArray(
|
|
138
|
+
dataset[dim].values.astype(str),
|
|
139
|
+
name=f"{dim}_label",
|
|
140
|
+
dims=[dim],
|
|
141
|
+
attrs=attr_mgr.get_variable_attributes(
|
|
142
|
+
f"{dim}_label", check_schema=False
|
|
143
|
+
),
|
|
148
144
|
)
|
|
145
|
+
dataset.coords[f"{dim}_label"] = label_array
|
|
149
146
|
|
|
150
147
|
return dataset
|
|
151
148
|
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""Module for constants and useful shared classes used in I-ALiRT processing."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass(frozen=True)
|
|
9
|
+
class IalirtSwapiConstants:
|
|
10
|
+
"""
|
|
11
|
+
Constants for I-ALiRT SWAPI which can be used across different levels or classes.
|
|
12
|
+
|
|
13
|
+
Attributes
|
|
14
|
+
----------
|
|
15
|
+
BOLTZ: float
|
|
16
|
+
Boltzmann constant [J/K]
|
|
17
|
+
AT_MASS: float
|
|
18
|
+
Atomic mass [kg]
|
|
19
|
+
PROT_MASS: float
|
|
20
|
+
Mass of proton [kg]
|
|
21
|
+
EFF_AREA: float
|
|
22
|
+
Instrument effective area [m^2]
|
|
23
|
+
AZ_FOV: float
|
|
24
|
+
Azimuthal width of the field of view for solar wind [radians]
|
|
25
|
+
FWHM_WIDTH: float
|
|
26
|
+
Full Width at Half Maximum of energy width [unitless]
|
|
27
|
+
SPEED_EW: float
|
|
28
|
+
Speed width of energy passband [unitless]
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
# Scientific constants used in optimization model
|
|
32
|
+
boltz = 1.380649e-23 # Boltzmann constant, J/K
|
|
33
|
+
at_mass = 1.6605390666e-27 # atomic mass, kg
|
|
34
|
+
prot_mass = 1.007276466621 * at_mass # mass of proton, kg
|
|
35
|
+
eff_area = 3.3e-5 * 1e-4 # effective area, meters squared
|
|
36
|
+
az_fov = np.deg2rad(30) # azimuthal width of the field of view, radians
|
|
37
|
+
fwhm_width = 0.085 # FWHM of energy width
|
|
38
|
+
speed_ew = 0.5 * fwhm_width # speed width of energy passband
|
|
@@ -390,7 +390,7 @@ def process_packet(
|
|
|
390
390
|
{
|
|
391
391
|
"apid": 478,
|
|
392
392
|
"met": int(met.values.min()),
|
|
393
|
-
"
|
|
393
|
+
"met_in_utc": met_to_utc(met.values.min()).split(".")[0],
|
|
394
394
|
"ttj2000ns": int(met_to_ttj2000ns(met.values.min())),
|
|
395
395
|
"mag_4s_b_gse": [Decimal("0.0") for _ in range(3)],
|
|
396
396
|
"mag_4s_b_gsm": [Decimal("0.0") for _ in range(3)],
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
"""Functions to support I-ALiRT CoDICE processing."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from decimal import Decimal
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
import xarray as xr
|
|
8
|
+
|
|
9
|
+
from imap_processing.codice import constants
|
|
10
|
+
from imap_processing.ialirt.utils.time import calculate_time
|
|
11
|
+
from imap_processing.spice.time import met_to_ttj2000ns, met_to_utc
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
FILLVAL_FLOAT32 = Decimal(str(-1.0e31))
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def process_codice(
|
|
19
|
+
dataset: xr.Dataset,
|
|
20
|
+
) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
|
|
21
|
+
"""
|
|
22
|
+
Create final data products.
|
|
23
|
+
|
|
24
|
+
Parameters
|
|
25
|
+
----------
|
|
26
|
+
dataset : xr.Dataset
|
|
27
|
+
Decommed L0 data.
|
|
28
|
+
|
|
29
|
+
Returns
|
|
30
|
+
-------
|
|
31
|
+
codice_data : list[dict]
|
|
32
|
+
Dictionary of final data product.
|
|
33
|
+
|
|
34
|
+
Notes
|
|
35
|
+
-----
|
|
36
|
+
This function is incomplete and will need to be updated to include the
|
|
37
|
+
necessary calculations and data products.
|
|
38
|
+
- Calculate rates (assume 4 minutes per group)
|
|
39
|
+
- Calculate L2 CoDICE pseudodensities (pg 37 of Algorithm Document)
|
|
40
|
+
- Calculate the public data products
|
|
41
|
+
"""
|
|
42
|
+
# For I-ALiRT SIT, the test data being used has all zeros and thus no
|
|
43
|
+
# groups can be found, thus there is no data to process
|
|
44
|
+
# TODO: Once I-ALiRT test data is acquired that actually has data in it,
|
|
45
|
+
# this can be turned back on
|
|
46
|
+
# codicelo_data = create_ialirt_dataset(CODICEAPID.COD_LO_IAL, dataset)
|
|
47
|
+
# codicehi_data = create_ialirt_dataset(CODICEAPID.COD_HI_IAL, dataset)
|
|
48
|
+
|
|
49
|
+
# TODO: calculate rates
|
|
50
|
+
# This will be done in codice.codice_l1b
|
|
51
|
+
|
|
52
|
+
# TODO: calculate L2 CoDICE pseudodensities
|
|
53
|
+
# This will be done in codice.codice_l2
|
|
54
|
+
|
|
55
|
+
# TODO: calculate the public data products
|
|
56
|
+
# This will be done in this module
|
|
57
|
+
|
|
58
|
+
# Create mock dataset for I-ALiRT SIT
|
|
59
|
+
# TODO: Once I-ALiRT test data is acquired that actually has data in it,
|
|
60
|
+
# we should be able to properly populate the I-ALiRT data, but for
|
|
61
|
+
# now, just create lists of dicts with FILLVALs
|
|
62
|
+
cod_lo_data = []
|
|
63
|
+
cod_hi_data = []
|
|
64
|
+
|
|
65
|
+
for epoch in range(len(dataset.epoch.data)):
|
|
66
|
+
sc_sclk_sec = dataset.sc_sclk_sec.data[epoch]
|
|
67
|
+
sc_sclk_sub_sec = dataset.sc_sclk_sub_sec.data[epoch]
|
|
68
|
+
met = calculate_time(sc_sclk_sec, sc_sclk_sub_sec, 256)
|
|
69
|
+
utc = met_to_utc(met).split(".")[0]
|
|
70
|
+
ttj2000ns = int(met_to_ttj2000ns(met))
|
|
71
|
+
|
|
72
|
+
epoch_data = {
|
|
73
|
+
"apid": int(dataset.pkt_apid[epoch].data),
|
|
74
|
+
"met": int(met),
|
|
75
|
+
"met_to_utc": utc,
|
|
76
|
+
"ttj2000ns": ttj2000ns,
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
# Add in CoDICE-Lo specific data
|
|
80
|
+
cod_lo_epoch_data = epoch_data.copy()
|
|
81
|
+
for field in constants.CODICE_LO_IAL_DATA_FIELDS:
|
|
82
|
+
cod_lo_epoch_data[f"codicelo_{field}"] = []
|
|
83
|
+
cod_lo_data.append(cod_lo_epoch_data)
|
|
84
|
+
|
|
85
|
+
# Add in CoDICE-Hi specific data
|
|
86
|
+
cod_hi_epoch_data = epoch_data.copy()
|
|
87
|
+
for field in constants.CODICE_HI_IAL_DATA_FIELDS:
|
|
88
|
+
cod_hi_epoch_data[f"codicehi_{field}"] = []
|
|
89
|
+
cod_hi_data.append(cod_hi_epoch_data)
|
|
90
|
+
|
|
91
|
+
return cod_lo_data, cod_hi_data
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
"""Functions to support HIT processing."""
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
|
-
from decimal import Decimal
|
|
5
4
|
|
|
6
5
|
import numpy as np
|
|
7
6
|
import xarray as xr
|
|
@@ -170,27 +169,19 @@ def process_hit(xarray_data: xr.Dataset) -> list[dict]:
|
|
|
170
169
|
{
|
|
171
170
|
"apid": 478,
|
|
172
171
|
"met": int(met),
|
|
173
|
-
"
|
|
172
|
+
"met_in_utc": met_to_utc(met).split(".")[0],
|
|
174
173
|
"ttj2000ns": int(met_to_ttj2000ns(met)),
|
|
175
|
-
"hit_e_a_side_low_en":
|
|
176
|
-
|
|
177
|
-
),
|
|
178
|
-
"
|
|
179
|
-
|
|
180
|
-
),
|
|
181
|
-
"
|
|
182
|
-
"
|
|
183
|
-
|
|
184
|
-
),
|
|
185
|
-
"
|
|
186
|
-
str(l1["IALRT_RATE_15"] + l1["IALRT_RATE_16"])
|
|
187
|
-
),
|
|
188
|
-
"hit_e_b_side_high_en": Decimal(str(l1["IALRT_RATE_17"])),
|
|
189
|
-
"hit_h_omni_med_en": Decimal(str(l1["H_12_15"] + l1["H_15_70"])),
|
|
190
|
-
"hit_h_a_side_high_en": Decimal(str(l1["IALRT_RATE_8"])),
|
|
191
|
-
"hit_h_b_side_high_en": Decimal(str(l1["IALRT_RATE_18"])),
|
|
192
|
-
"hit_he_omni_low_en": Decimal(str(l1["HE4_06_08"])),
|
|
193
|
-
"hit_he_omni_high_en": Decimal(str(l1["HE4_15_70"])),
|
|
174
|
+
"hit_e_a_side_low_en": int(l1["IALRT_RATE_1"] + l1["IALRT_RATE_2"]),
|
|
175
|
+
"hit_e_a_side_med_en": int(l1["IALRT_RATE_5"] + l1["IALRT_RATE_6"]),
|
|
176
|
+
"hit_e_a_side_high_en": int(l1["IALRT_RATE_7"]),
|
|
177
|
+
"hit_e_b_side_low_en": int(l1["IALRT_RATE_11"] + l1["IALRT_RATE_12"]),
|
|
178
|
+
"hit_e_b_side_med_en": int(l1["IALRT_RATE_15"] + l1["IALRT_RATE_16"]),
|
|
179
|
+
"hit_e_b_side_high_en": int(l1["IALRT_RATE_17"]),
|
|
180
|
+
"hit_h_omni_med_en": int(l1["H_12_15"] + l1["H_15_70"]),
|
|
181
|
+
"hit_h_a_side_high_en": int(l1["IALRT_RATE_8"]),
|
|
182
|
+
"hit_h_b_side_high_en": int(l1["IALRT_RATE_18"]),
|
|
183
|
+
"hit_he_omni_low_en": int(l1["HE4_06_08"]),
|
|
184
|
+
"hit_he_omni_high_en": int(l1["HE4_15_70"]),
|
|
194
185
|
}
|
|
195
186
|
)
|
|
196
187
|
|