imap-processing 0.15.0__py3-none-any.whl → 0.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

Files changed (35) hide show
  1. imap_processing/_version.py +2 -2
  2. imap_processing/cdf/config/imap_ialirt_l1_variable_attrs.yaml +113 -130
  3. imap_processing/cli.py +1 -4
  4. imap_processing/codice/codice_l1a.py +87 -62
  5. imap_processing/codice/codice_l2.py +0 -8
  6. imap_processing/codice/constants.py +16 -5
  7. imap_processing/hi/hi_l1a.py +447 -0
  8. imap_processing/hi/{l1b/hi_l1b.py → hi_l1b.py} +1 -1
  9. imap_processing/hi/{l1c/hi_l1c.py → hi_l1c.py} +21 -21
  10. imap_processing/hi/{l2/hi_l2.py → hi_l2.py} +13 -13
  11. imap_processing/hi/utils.py +6 -6
  12. imap_processing/hit/l1b/hit_l1b.py +30 -11
  13. imap_processing/ialirt/constants.py +38 -0
  14. imap_processing/ialirt/l0/parse_mag.py +1 -1
  15. imap_processing/ialirt/l0/process_codice.py +91 -0
  16. imap_processing/ialirt/l0/process_hit.py +12 -21
  17. imap_processing/ialirt/l0/process_swapi.py +172 -23
  18. imap_processing/ialirt/l0/process_swe.py +3 -10
  19. imap_processing/ialirt/utils/constants.py +16 -2
  20. imap_processing/ialirt/utils/create_xarray.py +59 -11
  21. imap_processing/ultra/utils/ultra_l1_utils.py +4 -2
  22. {imap_processing-0.15.0.dist-info → imap_processing-0.16.0.dist-info}/METADATA +1 -1
  23. {imap_processing-0.15.0.dist-info → imap_processing-0.16.0.dist-info}/RECORD +26 -32
  24. imap_processing/hi/l1a/__init__.py +0 -0
  25. imap_processing/hi/l1a/hi_l1a.py +0 -98
  26. imap_processing/hi/l1a/histogram.py +0 -152
  27. imap_processing/hi/l1a/science_direct_event.py +0 -214
  28. imap_processing/hi/l1b/__init__.py +0 -0
  29. imap_processing/hi/l1c/__init__.py +0 -0
  30. imap_processing/hi/l2/__init__.py +0 -0
  31. imap_processing/ialirt/l0/process_codicehi.py +0 -156
  32. imap_processing/ialirt/l0/process_codicelo.py +0 -41
  33. {imap_processing-0.15.0.dist-info → imap_processing-0.16.0.dist-info}/LICENSE +0 -0
  34. {imap_processing-0.15.0.dist-info → imap_processing-0.16.0.dist-info}/WHEEL +0 -0
  35. {imap_processing-0.15.0.dist-info → imap_processing-0.16.0.dist-info}/entry_points.txt +0 -0
@@ -1,156 +0,0 @@
1
- """Functions to support I-ALiRT CoDICE Hi processing."""
2
-
3
- import logging
4
- from typing import Any
5
-
6
- import numpy as np
7
- import xarray as xr
8
-
9
- logger = logging.getLogger(__name__)
10
-
11
-
12
- def find_groups(data: xr.Dataset) -> xr.Dataset:
13
- """
14
- Find all occurrences of the sequential set of 234 values 0-233.
15
-
16
- If a value is missing, or we are starting/ending
17
- in the middle of a sequence we do not count that as a valid group.
18
-
19
- Parameters
20
- ----------
21
- data : xr.Dataset
22
- CoDICE Hi Dataset.
23
-
24
- Returns
25
- -------
26
- grouped_data : xr.Dataset
27
- Grouped data.
28
- """
29
- subcom_range = (0, 233)
30
-
31
- data = data.sortby("cod_hi_acq", ascending=True)
32
-
33
- # Use cod_hi_counter == 0 to define the beginning of the group.
34
- # Find cod_hi_acq at this index and use it as the beginning time for the group.
35
- start_sc_ticks = data["cod_hi_acq"][(data["cod_hi_counter"] == subcom_range[0])]
36
- start_sc_tick = start_sc_ticks.min()
37
- # Use cod_hi_counter == 233 to define the end of the group.
38
- last_sc_ticks = data["cod_hi_acq"][
39
- ([data["cod_hi_counter"] == subcom_range[-1]][-1])
40
- ]
41
- last_sc_tick = last_sc_ticks.max()
42
-
43
- # Filter out data before the first cod_hi_counter=0 and
44
- # after the last cod_hi_counter=233 and cod_hi_counter values != 0-233.
45
- grouped_data = data.where(
46
- (data["cod_hi_acq"] >= start_sc_tick)
47
- & (data["cod_hi_acq"] <= last_sc_tick)
48
- & (data["cod_hi_counter"] >= subcom_range[0])
49
- & (data["cod_hi_counter"] <= subcom_range[-1]),
50
- drop=True,
51
- )
52
-
53
- # Assign labels based on the cod_hi_acq times.
54
- group_labels = np.searchsorted(
55
- start_sc_ticks, grouped_data["cod_hi_acq"], side="right"
56
- )
57
- # Example:
58
- # grouped_data.coords
59
- # Coordinates:
60
- # * epoch (epoch) int64 7kB 315922822184000000 ... 315923721184000000
61
- # * group (group) int64 7kB 1 1 1 1 1 1 1 1 1 ... 15 15 15 15 15 15 15 15 15
62
- grouped_data["group"] = ("group", group_labels)
63
-
64
- return grouped_data
65
-
66
-
67
- def append_cod_hi_data(dataset: xr.Dataset) -> xr.Dataset:
68
- """
69
- Append the cod_hi_## data values and create an xarray.
70
-
71
- Parameters
72
- ----------
73
- dataset : xr.Dataset
74
- Original dataset of group.
75
-
76
- Returns
77
- -------
78
- appended_dataset : xr.Dataset
79
- Dataset with cod_hi_## stacked.
80
- """
81
- # Number of codice hi data rows
82
- num_cod_hi_rows = 5
83
- cod_hi_data = np.stack(
84
- [dataset[f"cod_hi_data_{i:02}"].values for i in range(num_cod_hi_rows)], axis=1
85
- )
86
-
87
- repeated_data = {
88
- var: np.repeat(dataset[var].values, num_cod_hi_rows)
89
- for var in dataset.data_vars
90
- if not var.startswith("cod_hi_data_")
91
- }
92
-
93
- repeated_data["cod_hi_appended"] = cod_hi_data.flatten()
94
- repeated_epoch = np.repeat(dataset["epoch"].values, num_cod_hi_rows)
95
-
96
- appended_dataset = xr.Dataset(
97
- data_vars={name: ("epoch", values) for name, values in repeated_data.items()},
98
- coords={"epoch": repeated_epoch},
99
- )
100
-
101
- return appended_dataset
102
-
103
-
104
- def process_codicehi(xarray_data: xr.Dataset) -> list[dict]:
105
- """
106
- Create final data products.
107
-
108
- Parameters
109
- ----------
110
- xarray_data : xr.Dataset
111
- Parsed data.
112
-
113
- Returns
114
- -------
115
- codicehi_data : list[dict]
116
- Dictionary of final data product.
117
-
118
- Notes
119
- -----
120
- This function is incomplete and will need to be updated to include the
121
- necessary calculations and data products.
122
- - Calculate species counts (pg 27 of Algorithm Document)
123
- - Calculate rates (assume 4 minutes per group)
124
- - Calculate L2 CoDICE pseudodensities (pg 37 of Algorithm Document)
125
- - Calculate the public data products
126
- """
127
- grouped_data = find_groups(xarray_data)
128
- unique_groups = np.unique(grouped_data["group"])
129
- codicehi_data: list[dict[str, Any]] = [{}]
130
-
131
- for group in unique_groups:
132
- # cod_hi_counter values for the group should be 0-233 with no duplicates.
133
- subcom_values = grouped_data["cod_hi_counter"][
134
- (grouped_data["group"] == group).values
135
- ]
136
-
137
- # Ensure no duplicates and all values from 0 to 233 are present
138
- if not np.array_equal(subcom_values, np.arange(234)):
139
- logger.warning(
140
- f"Group {group} does not contain all values from 0 to "
141
- f"233 without duplicates."
142
- )
143
- continue
144
-
145
- mask = grouped_data["group"] == group
146
- filtered_indices = np.where(mask)[0]
147
- group_data = grouped_data.isel(epoch=filtered_indices)
148
-
149
- append_cod_hi_data(group_data)
150
-
151
- # TODO: calculate species counts
152
- # TODO: calculate rates
153
- # TODO: calculate L2 CoDICE pseudodensities
154
- # TODO: calculate the public data products
155
-
156
- return codicehi_data
@@ -1,41 +0,0 @@
1
- """Functions to support I-ALiRT CoDICE Lo processing."""
2
-
3
- import logging
4
-
5
- import xarray as xr
6
-
7
- from imap_processing.codice.codice_l1a import create_ialirt_dataset
8
-
9
- logger = logging.getLogger(__name__)
10
-
11
-
12
- def process_codicelo(dataset: xr.Dataset) -> list[dict]:
13
- """
14
- Create final data products.
15
-
16
- Parameters
17
- ----------
18
- dataset : xr.Dataset
19
- Decommed L0 data.
20
-
21
- Returns
22
- -------
23
- codicelo_data : list[dict]
24
- Dictionary of final data product.
25
-
26
- Notes
27
- -----
28
- This function is incomplete and will need to be updated to include the
29
- necessary calculations and data products.
30
- - Calculate rates (assume 4 minutes per group)
31
- - Calculate L2 CoDICE pseudodensities (pg 37 of Algorithm Document)
32
- - Calculate the public data products
33
- """
34
- apid = dataset.pkt_apid.data[0]
35
- codicelo_data = create_ialirt_dataset(apid, dataset)
36
-
37
- # TODO: calculate rates
38
- # TODO: calculate L2 CoDICE pseudodensities
39
- # TODO: calculate the public data products
40
-
41
- return codicelo_data