imap-processing 0.16.0__py3-none-any.whl → 0.16.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of imap-processing might be problematic. Click here for more details.

@@ -64,9 +64,10 @@ class HiConstants:
64
64
  TOF3_TICK_DUR = 0.5 # 0.5 ns
65
65
 
66
66
  # These values are stored in the TOF telemetry when the TOF timer
67
- # does not have valid data.
68
- TOF1_BAD_VALUES = (511, 1023)
69
- TOF2_BAD_VALUES = (1023,)
67
+ # does not have valid data. See IMAP-Hi Algorithm Document Section
68
+ # 2.2.5 Annotated Direct Events
69
+ TOF1_BAD_VALUES = (511,)
70
+ TOF2_BAD_VALUES = (511,)
70
71
  TOF3_BAD_VALUES = (1023,)
71
72
 
72
73
 
@@ -100,7 +100,9 @@ COUNTS_DATA_STRUCTURE = {
100
100
  "penfgrates": HITPacking(16, 528, (33,)), # range 4 foreground rates
101
101
  "penbgrates": HITPacking(16, 240, (15,)), # range 4 background rates
102
102
  "ialirtrates": HITPacking(16, 320, (20,)), # ialirt rates
103
- "sectorates": HITPacking(16, 1920, (15, 8)), # sectored rates
103
+ "sectorates": HITPacking(
104
+ 16, 1920, (8, 15)
105
+ ), # sectored rates (8 zenith angles, 15 azimuth angles)
104
106
  "l4fgrates": HITPacking(16, 768, (48,)), # all range foreground rates
105
107
  "l4bgrates": HITPacking(16, 384, (24,)), # all range foreground rates
106
108
  }
@@ -89,11 +89,14 @@ def parse_count_rates(sci_dataset: xr.Dataset) -> None:
89
89
  if all(x not in field for x in ["hdr", "spare", "pha"]):
90
90
  parsed_data = np.vectorize(decompress_rates_16_to_32)(parsed_data)
91
91
 
92
- # Get dims for data variables (yaml file not created yet)
92
+ # Get dims for data variables
93
93
  if len(field_meta.shape) > 1:
94
94
  if "sectorates" in field:
95
- # Reshape data to 15x8 for azimuth and zenith look directions
95
+ # Reshape data into (num_frames, 8, 15) for zenith and azimuth
96
+ # look directions
96
97
  parsed_data = np.array(parsed_data).reshape((-1, *field_meta.shape))
98
+ # Transpose data to (num_frames, 15, 8) for flipped look directions
99
+ parsed_data = np.transpose(parsed_data, axes=(0, 2, 1))
97
100
  dims = ["epoch", "azimuth", "zenith"]
98
101
  # Add angle values to coordinates
99
102
  sci_dataset.coords["zenith"] = xr.DataArray(
@@ -202,9 +205,12 @@ def update_ccsds_header_dims(sci_dataset: xr.Dataset) -> xr.Dataset:
202
205
  it will be updated later in the process to represent
203
206
  time per science frame, so another time dimension is
204
207
  needed for the ccsds header fields.This function
205
- updates the dimension for these fields to use sc_tick
206
- instead of epoch. sc_tick is the time the packet was
207
- created.
208
+ updates the dimension for all data vars to use sc_tick
209
+ instead of epoch. It also temporarily sets sc_tick as the
210
+ dimension for the epoch coordinate (to be updated later
211
+ in the assemble_science_frames function).
212
+
213
+ Note: sc_tick is the time the packet was created.
208
214
 
209
215
  Parameters
210
216
  ----------
@@ -294,7 +300,7 @@ def assemble_science_frames(sci_dataset: xr.Dataset) -> xr.Dataset:
294
300
  # Extract data per science frame and organize by L1A data products
295
301
  count_rates = []
296
302
  pha = []
297
- epoch_per_science_frame = np.array([])
303
+ epoch_per_science_frame = []
298
304
  for idx in starting_indices:
299
305
  # Data from 20 packets in a science frame
300
306
  science_data_frame = science_data[idx : idx + FRAME_SIZE]
@@ -302,12 +308,15 @@ def assemble_science_frames(sci_dataset: xr.Dataset) -> xr.Dataset:
302
308
  count_rates.append("".join(science_data_frame[:6]))
303
309
  # Last 14 packets contain pulse height event data in binary
304
310
  pha.append("".join(science_data_frame[6:]))
305
- # Get first packet's epoch for the science frame
306
- epoch_per_science_frame = np.append(epoch_per_science_frame, epoch_data[idx])
311
+ # Get the mean epoch in the frame to use as the data collection time
312
+ epoch_per_science_frame.append(
313
+ calculate_epoch_mean(epoch_data, idx, FRAME_SIZE)
314
+ )
307
315
 
308
- # Add new data variables to the dataset
309
- sci_dataset = sci_dataset.drop_vars("epoch")
310
- sci_dataset.coords["epoch"] = epoch_per_science_frame
316
+ # Add new data variables to the dataset and update epoch coordinate
317
+ sci_dataset.coords["epoch"] = xr.DataArray(
318
+ np.array(epoch_per_science_frame, dtype=np.int64), dims=["epoch"]
319
+ )
311
320
  sci_dataset["count_rates_raw"] = xr.DataArray(
312
321
  count_rates, dims=["epoch"], name="count_rates_raw"
313
322
  )
@@ -373,6 +382,31 @@ def decompress_rates_16_to_32(packed: int) -> int:
373
382
  return decompressed_int
374
383
 
375
384
 
385
+ def calculate_epoch_mean(
386
+ epoch_data: np.ndarray, idx: int, frame_size: int
387
+ ) -> np.floating:
388
+ """
389
+ Calculate the mean epoch for a science frame.
390
+
391
+ This function is used to get the center collection time for science data.
392
+
393
+ Parameters
394
+ ----------
395
+ epoch_data : np.ndarray
396
+ Array of epoch values for every science packet.
397
+ idx : int
398
+ Starting index of the science frame.
399
+ frame_size : int
400
+ Number of packets in the science frame.
401
+
402
+ Returns
403
+ -------
404
+ float
405
+ Mean epoch value for the science frame.
406
+ """
407
+ return np.mean([epoch_data[idx], epoch_data[idx + frame_size - 1]])
408
+
409
+
376
410
  def decom_hit(sci_dataset: xr.Dataset) -> xr.Dataset:
377
411
  """
378
412
  Group and decode HIT science data packets.
@@ -13,7 +13,11 @@ from imap_processing.hit.hit_utils import (
13
13
  get_datasets_by_apid,
14
14
  process_housekeeping_data,
15
15
  )
16
- from imap_processing.hit.l0.constants import MOD_10_MAPPING
16
+ from imap_processing.hit.l0.constants import (
17
+ AZIMUTH_ANGLES,
18
+ MOD_10_MAPPING,
19
+ ZENITH_ANGLES,
20
+ )
17
21
  from imap_processing.hit.l0.decom_hit import decom_hit
18
22
 
19
23
  logger = logging.getLogger(__name__)
@@ -104,12 +108,16 @@ def subcom_sectorates(sci_dataset: xr.Dataset) -> xr.Dataset:
104
108
  hdr_min_count_mod_10 = updated_dataset.hdr_minute_cnt.values % 10
105
109
 
106
110
  # Reference mod 10 mapping to initialize data structure for species and
107
- # energy ranges and add 15x8 arrays with fill values for each science frame.
111
+ # energy ranges and add arrays with fill values for each science frame.
108
112
  num_frames = len(hdr_min_count_mod_10)
109
113
  data_by_species_and_energy_range = {
110
114
  key: {
111
115
  **value,
112
- "counts": np.full((num_frames, 15, 8), fill_value=fillval, dtype=np.int64),
116
+ "counts": np.full(
117
+ (num_frames, len(AZIMUTH_ANGLES), len(ZENITH_ANGLES)),
118
+ fill_value=fillval,
119
+ dtype=np.int64,
120
+ ),
113
121
  }
114
122
  for key, value in MOD_10_MAPPING.items()
115
123
  }
@@ -279,39 +287,38 @@ def process_science(
279
287
  # Calculate uncertainties for count rates
280
288
  count_rates_dataset = calculate_uncertainties(count_rates_dataset)
281
289
 
282
- # Logical sources for the two products.
283
- logical_sources = ["imap_hit_l1a_counts", "imap_hit_l1a_direct-events"]
290
+ l1a_datasets: dict = {
291
+ "imap_hit_l1a_counts": count_rates_dataset,
292
+ "imap_hit_l1a_direct-events": pha_raw_dataset,
293
+ }
284
294
 
285
- datasets = []
286
295
  # Update attributes and dimensions
287
- for ds, logical_source in zip(
288
- [count_rates_dataset, pha_raw_dataset], logical_sources
289
- ):
296
+ for logical_source, ds in l1a_datasets.items():
290
297
  ds.attrs = attr_mgr.get_global_attributes(logical_source)
291
298
 
292
- # TODO: Add CDF attributes to yaml once they're defined for L1A science data
293
299
  # Assign attributes and dimensions to each data array in the Dataset
294
300
  for field in ds.data_vars.keys():
295
301
  try:
296
- # Create a dict of dimensions using the DEPEND_I keys in the
297
- # attributes
298
- dims = {
299
- key: value
300
- for key, value in attr_mgr.get_variable_attributes(field).items()
301
- if "DEPEND" in key
302
- }
303
302
  ds[field].attrs = attr_mgr.get_variable_attributes(field)
304
- ds[field].assign_coords(dims)
305
303
  except KeyError:
306
304
  print(f"Field {field} not found in attribute manager.")
307
305
  logger.warning(f"Field {field} not found in attribute manager.")
308
306
 
309
- # Skip schema check for epoch to prevent attr_mgr from adding the
310
- # DEPEND_0 attribute which isn't required for epoch
311
- ds.epoch.attrs = attr_mgr.get_variable_attributes("epoch", check_schema=False)
312
-
313
- datasets.append(ds)
307
+ # check_schema=False to avoid attr_mgr adding stuff dimensions don't need
308
+ for dim in ds.dims:
309
+ ds[dim].attrs = attr_mgr.get_variable_attributes(dim, check_schema=False)
310
+ # TODO: should labels be added as coordinates? Check with SPDF
311
+ if dim != "epoch":
312
+ label_array = xr.DataArray(
313
+ ds[dim].values.astype(str),
314
+ name=f"{dim}_label",
315
+ dims=[dim],
316
+ attrs=attr_mgr.get_variable_attributes(
317
+ f"{dim}_label", check_schema=False
318
+ ),
319
+ )
320
+ ds.coords[f"{dim}_label"] = label_array
314
321
 
315
322
  logger.info(f"HIT L1A dataset created for {logical_source}")
316
323
 
317
- return datasets
324
+ return list(l1a_datasets.values())
@@ -134,18 +134,15 @@ def add_cdf_attributes(
134
134
  dataset[dim].attrs = attr_mgr.get_variable_attributes(dim, check_schema=False)
135
135
  # TODO: should labels be added as coordinates? Check with SPDF
136
136
  if dim != "epoch":
137
- dataset = dataset.assign_coords(
138
- {
139
- f"{dim}_label": xr.DataArray(
140
- dataset[dim].values.astype(str),
141
- name=f"{dim}_label",
142
- dims=[dim],
143
- attrs=attr_mgr.get_variable_attributes(
144
- f"{dim}_label", check_schema=False
145
- ),
146
- )
147
- }
137
+ label_array = xr.DataArray(
138
+ dataset[dim].values.astype(str),
139
+ name=f"{dim}_label",
140
+ dims=[dim],
141
+ attrs=attr_mgr.get_variable_attributes(
142
+ f"{dim}_label", check_schema=False
143
+ ),
148
144
  )
145
+ dataset.coords[f"{dim}_label"] = label_array
149
146
 
150
147
  return dataset
151
148
 
@@ -6,10 +6,6 @@ from typing import Any
6
6
 
7
7
  import xarray as xr
8
8
 
9
- from imap_processing.codice import constants
10
- from imap_processing.ialirt.utils.time import calculate_time
11
- from imap_processing.spice.time import met_to_ttj2000ns, met_to_utc
12
-
13
9
  logger = logging.getLogger(__name__)
14
10
 
15
11
  FILLVAL_FLOAT32 = Decimal(str(-1.0e31))
@@ -28,7 +24,7 @@ def process_codice(
28
24
 
29
25
  Returns
30
26
  -------
31
- codice_data : list[dict]
27
+ codice_data : tuple[list[dict[str, Any]], list[dict[str, Any]]]:
32
28
  Dictionary of final data product.
33
29
 
34
30
  Notes
@@ -58,34 +54,8 @@ def process_codice(
58
54
  # Create mock dataset for I-ALiRT SIT
59
55
  # TODO: Once I-ALiRT test data is acquired that actually has data in it,
60
56
  # we should be able to properly populate the I-ALiRT data, but for
61
- # now, just create lists of dicts with FILLVALs
62
- cod_lo_data = []
63
- cod_hi_data = []
64
-
65
- for epoch in range(len(dataset.epoch.data)):
66
- sc_sclk_sec = dataset.sc_sclk_sec.data[epoch]
67
- sc_sclk_sub_sec = dataset.sc_sclk_sub_sec.data[epoch]
68
- met = calculate_time(sc_sclk_sec, sc_sclk_sub_sec, 256)
69
- utc = met_to_utc(met).split(".")[0]
70
- ttj2000ns = int(met_to_ttj2000ns(met))
71
-
72
- epoch_data = {
73
- "apid": int(dataset.pkt_apid[epoch].data),
74
- "met": met,
75
- "met_to_utc": utc,
76
- "ttj2000ns": ttj2000ns,
77
- }
78
-
79
- # Add in CoDICE-Lo specific data
80
- cod_lo_epoch_data = epoch_data.copy()
81
- for field in constants.CODICE_LO_IAL_DATA_FIELDS:
82
- cod_lo_epoch_data[f"codicelo_{field}"] = FILLVAL_FLOAT32
83
- cod_lo_data.append(cod_lo_epoch_data)
84
-
85
- # Add in CoDICE-Hi specific data
86
- cod_hi_epoch_data = epoch_data.copy()
87
- for field in constants.CODICE_HI_IAL_DATA_FIELDS:
88
- cod_hi_epoch_data[f"codicehi_{field}"] = FILLVAL_FLOAT32
89
- cod_hi_data.append(cod_hi_epoch_data)
57
+ # now, just create lists of dicts.
58
+ cod_lo_data: list[dict[str, Any]] = []
59
+ cod_hi_data: list[dict[str, Any]] = []
90
60
 
91
61
  return cod_lo_data, cod_hi_data
@@ -170,7 +170,7 @@ def process_swapi_ialirt(unpacked_data: xr.Dataset) -> list[dict]:
170
170
  logger.warning(
171
171
  "There was an issue with the SWAPI grouping process, returning empty data."
172
172
  )
173
- return [{}]
173
+ return []
174
174
 
175
175
  for group in np.unique(grouped_dataset["group"]):
176
176
  # Sequence values for the group should be 0-11 with no duplicates.
@@ -202,7 +202,7 @@ def process_swapi_ialirt(unpacked_data: xr.Dataset) -> list[dict]:
202
202
  swapi_data.append(
203
203
  {
204
204
  "apid": 478,
205
- "met": met_values[entry],
205
+ "met": int(met_values[entry]),
206
206
  "met_in_utc": met_to_utc(met_values[entry]).split(".")[0],
207
207
  "ttj2000ns": int(met_to_ttj2000ns(met_values[entry])),
208
208
  "swapi_pseudo_proton_speed": Decimal(solution["pseudo_speed"][entry]),
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: imap-processing
3
- Version: 0.16.0
3
+ Version: 0.16.2
4
4
  Summary: IMAP Science Operations Center Processing
5
5
  License: MIT
6
6
  Keywords: IMAP,SDC,SOC,Science Operations
@@ -1,5 +1,5 @@
1
1
  imap_processing/__init__.py,sha256=b9xHlf8_0OtN_OyhPlrC6ayahYR0QTt_e713NjxZObw,1305
2
- imap_processing/_version.py,sha256=eLPvHzTLNE19Gzow2bKK6dlgu7T2y85RkAQPjNmibUA,127
2
+ imap_processing/_version.py,sha256=Ni8iNB9b0Tb7TQE3aSvQE4pU0inSecIcucdCW22PAOs,127
3
3
  imap_processing/ancillary/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  imap_processing/ancillary/ancillary_dataset_combiner.py,sha256=Pxg1wQLjPKzEkgE3a4Tart5TfeuH9rqyCKTd5GbgiL0,9602
5
5
  imap_processing/ccsds/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -22,7 +22,7 @@ imap_processing/cdf/config/imap_glows_l2_variable_attrs.yaml,sha256=eHGzS5_ctJ3g
22
22
  imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml,sha256=28VVPcB_dRsk24Rba-nVsyivixnCOBEEHqL6oUu61Vc,2509
23
23
  imap_processing/cdf/config/imap_hi_variable_attrs.yaml,sha256=VsIpaOP7SrwsWqiFo-Dhcz1iBVnZE7PmzzVaGwKZ_YI,14678
24
24
  imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml,sha256=zjHAXLlXrrQPgzKh3aTuoY1qb41J0i8zQ81xLTf7Y7g,3121
25
- imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml,sha256=5X8V6Hzg7UGP6HeqHGKy-ogJP3K-V4sLi5271N8fBNw,11281
25
+ imap_processing/cdf/config/imap_hit_l1a_variable_attrs.yaml,sha256=nVphxZP1hc8EHgR9K0Qh7HZd-G8iM0C0gSXeEyRdx0M,48050
26
26
  imap_processing/cdf/config/imap_hit_l1b_variable_attrs.yaml,sha256=gxeC7-SUKkWiy1OE6MgfKxeV-0bdXuauodOIfFtQ3JQ,11287
27
27
  imap_processing/cdf/config/imap_hit_l2_variable_attrs.yaml,sha256=f8ZLlF9EIes8BcFrpKTO8Mb_VVnP2d9HoLtQVOgz23Q,61402
28
28
  imap_processing/cdf/config/imap_ialirt_global_cdf_attrs.yaml,sha256=AGQ9J0zEdWRZ_AR6_AYYHkRb57mYIe2jVflhYv8SEsQ,961
@@ -102,18 +102,18 @@ imap_processing/hi/hi_l1c.py,sha256=lT_XFSRME1ol7Bo2y38RCHghEEXeW21oQ2Rcj2fmjDo,
102
102
  imap_processing/hi/hi_l2.py,sha256=veO2FdinuiRcONQ5JucnRj8lvs6wwLVrA_oveqsFzFE,9360
103
103
  imap_processing/hi/packet_definitions/TLM_HI_COMBINED_SCI.xml,sha256=Cwu_sE-b6lpXtuikQKuU5uQ_pMFTG9HoZeH9c-QDQgU,251743
104
104
  imap_processing/hi/packet_definitions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
105
- imap_processing/hi/utils.py,sha256=mZZtOK4S2Mu0uBfPY72x74gtt70anu4sA62PKUjwERk,8097
105
+ imap_processing/hi/utils.py,sha256=5_JowCx2jQsI58M0vCHOgtib_a6Jll9U2vOylpB26A4,8166
106
106
  imap_processing/hit/__init__.py,sha256=Ga6Ioj68HnQ4moi7SxJdOyYmGHcRvKOh4uZUEPJZHl4,20
107
107
  imap_processing/hit/hit_utils.py,sha256=-O8xt90w6sMPQ2t4CYmqrwkBw3rF1h1dMn58Q3Ioz3g,13371
108
108
  imap_processing/hit/l0/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
109
- imap_processing/hit/l0/constants.py,sha256=MlLaYv73tTxVi4MNZL8oP-Ddnj92Xq_f1Ep0Jo2hkHU,5605
110
- imap_processing/hit/l0/decom_hit.py,sha256=ur0M3AVhuptWRvUVUvugxfAMf6yKI0Xee_7RNqJyF4E,15391
109
+ imap_processing/hit/l0/constants.py,sha256=hJnOMvRVt3gm9mZY8iTVc1mBYq_MNuTz9twkuF4G1F0,5656
110
+ imap_processing/hit/l0/decom_hit.py,sha256=o8rPVfGm67v1M4GUIjtSF7FpaQnYZbVRHJR79olDZOs,16436
111
111
  imap_processing/hit/l1a/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
112
- imap_processing/hit/l1a/hit_l1a.py,sha256=BWxm73t6CeNXBt23NArnkXufeRH_OkhVOReLXEpiD3M,11241
112
+ imap_processing/hit/l1a/hit_l1a.py,sha256=gy1wGEJZOxb-sZnlQACv7FVD966fek1EBvE8jzpZdWg,11345
113
113
  imap_processing/hit/l1b/constants.py,sha256=ZL5h2GgC5j_s11YgMxtY4EHpmkfo6IXnniCv-iXdG84,9692
114
114
  imap_processing/hit/l1b/hit_l1b.py,sha256=nXy8SUXJujFcWOrOBufmdrxq9xzdmB9dyZQOYz3-xiM,18489
115
115
  imap_processing/hit/l2/constants.py,sha256=jvs7Uic3Bl1wyRC01s1-F5LAsTzTwi089TpN19fvlFg,18813
116
- imap_processing/hit/l2/hit_l2.py,sha256=E_3hyn_XzmnUbAFqTSHeM_QQtCYqMnOzLm6Km_crmOk,27102
116
+ imap_processing/hit/l2/hit_l2.py,sha256=qVGTHGO6tXNmz0nqKg3MEEqjlVGHhVsff5uS4jNajsk,26998
117
117
  imap_processing/hit/packet_definitions/hit_packet_definitions.xml,sha256=CyYmMnuObmk2y1nlrd7xOZnwZ_qZ3X__AhhKmRyNOvg,136147
118
118
  imap_processing/ialirt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
119
119
  imap_processing/ialirt/constants.py,sha256=mwVPgSVNuPpTO4zx3LeUW7L5a6aJG2ZrezObPvR9s1c,1241
@@ -121,9 +121,9 @@ imap_processing/ialirt/l0/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
121
121
  imap_processing/ialirt/l0/ialirt_spice.py,sha256=WNuMlxA5UDGiBd8ap2k_dHxFmi2UCFgnQ4_wEZUhVU0,5649
122
122
  imap_processing/ialirt/l0/mag_l0_ialirt_data.py,sha256=vgIerXkk4ZoOxxVaNEgvM1ESWVkGusSZ-3k73-Cl_tI,5276
123
123
  imap_processing/ialirt/l0/parse_mag.py,sha256=v5Efczml4s-TFdREhjg3A6PzEFTYAx4dbJyeACmkbPc,12787
124
- imap_processing/ialirt/l0/process_codice.py,sha256=_LvZJ-dt7Qbw41glYS4BdIJZhhChBIk8DvueyNphW4g,3062
124
+ imap_processing/ialirt/l0/process_codice.py,sha256=-gjxJltAGqqnIflvs5-wrZ2sS-1BjHYk0cMGxxhRdi0,1935
125
125
  imap_processing/ialirt/l0/process_hit.py,sha256=HJ_h8nd5Xs0NC-mWNS7Ce3D86H1408lT6ZCdXlNQv48,5408
126
- imap_processing/ialirt/l0/process_swapi.py,sha256=EBr-wQB4IDrdYON80g8f9neNkUXTAAUMv3wlB1MXfr4,7490
126
+ imap_processing/ialirt/l0/process_swapi.py,sha256=spcxacRDdHBpga1ZoAHN5EMUz-pDHdfazuYOrvzdywE,7493
127
127
  imap_processing/ialirt/l0/process_swe.py,sha256=QXp6rmlxw8K5M0bapKwAArCd_99qHFS8sxOnhHLVkhU,17953
128
128
  imap_processing/ialirt/packet_definitions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
129
129
  imap_processing/ialirt/packet_definitions/ialirt.xml,sha256=-hlT16Mr2Q6uCUfJ3jdpw0mPdeGCcZiWiorbkiEanwA,40784
@@ -258,8 +258,8 @@ imap_processing/ultra/packet_definitions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5
258
258
  imap_processing/ultra/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
259
259
  imap_processing/ultra/utils/ultra_l1_utils.py,sha256=W1wABph96AJVkFNGrmq_0Vf6i__fIFxIVTvjCPbysY0,5321
260
260
  imap_processing/utils.py,sha256=zdcBXBQKL2NnElJcEbd-2QPeEDz0H3Yy0hVjRy9xcCE,14062
261
- imap_processing-0.16.0.dist-info/LICENSE,sha256=F2rxhvc6auEI0Dk9IGjglQSQQk60EvTe8M1dORMZPOg,1098
262
- imap_processing-0.16.0.dist-info/METADATA,sha256=ejLO2sQJAM297xzu16ALDVLTqqDA3CXQuqJg2kh8tiM,9102
263
- imap_processing-0.16.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
264
- imap_processing-0.16.0.dist-info/entry_points.txt,sha256=5r8ijLImHSNJxr-SGDC8kJy81BtXjmeUOmNfWSfLuRs,104
265
- imap_processing-0.16.0.dist-info/RECORD,,
261
+ imap_processing-0.16.2.dist-info/LICENSE,sha256=F2rxhvc6auEI0Dk9IGjglQSQQk60EvTe8M1dORMZPOg,1098
262
+ imap_processing-0.16.2.dist-info/METADATA,sha256=kOF9OXN4wivTLhYb6In7XZaHO7TsGbpEGrQXBHfc9bU,9102
263
+ imap_processing-0.16.2.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
264
+ imap_processing-0.16.2.dist-info/entry_points.txt,sha256=5r8ijLImHSNJxr-SGDC8kJy81BtXjmeUOmNfWSfLuRs,104
265
+ imap_processing-0.16.2.dist-info/RECORD,,