imap-processing 0.18.0__py3-none-any.whl → 0.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/ancillary/ancillary_dataset_combiner.py +161 -1
- imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +301 -274
- imap_processing/cdf/config/imap_codice_l1b_variable_attrs.yaml +28 -28
- imap_processing/cdf/config/imap_codice_l2_variable_attrs.yaml +1044 -203
- imap_processing/cdf/config/imap_constant_attrs.yaml +4 -2
- imap_processing/cdf/config/imap_glows_l1b_variable_attrs.yaml +12 -0
- imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml +5 -0
- imap_processing/cdf/config/imap_hit_global_cdf_attrs.yaml +10 -4
- imap_processing/cdf/config/imap_idex_l2a_variable_attrs.yaml +33 -4
- imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml +8 -91
- imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml +106 -16
- imap_processing/cdf/config/imap_lo_l1a_variable_attrs.yaml +4 -15
- imap_processing/cdf/config/imap_lo_l1c_variable_attrs.yaml +189 -98
- imap_processing/cdf/config/imap_mag_global_cdf_attrs.yaml +85 -2
- imap_processing/cdf/config/imap_mag_l1c_variable_attrs.yaml +24 -1
- imap_processing/cdf/config/imap_ultra_l1b_variable_attrs.yaml +12 -4
- imap_processing/cdf/config/imap_ultra_l1c_variable_attrs.yaml +50 -7
- imap_processing/cli.py +95 -41
- imap_processing/codice/codice_l1a.py +131 -31
- imap_processing/codice/codice_l2.py +118 -10
- imap_processing/codice/constants.py +740 -595
- imap_processing/decom.py +1 -4
- imap_processing/ena_maps/ena_maps.py +32 -25
- imap_processing/ena_maps/utils/naming.py +8 -2
- imap_processing/glows/ancillary/imap_glows_exclusions-by-instr-team_20250923_v002.dat +10 -0
- imap_processing/glows/ancillary/imap_glows_map-of-excluded-regions_20250923_v002.dat +393 -0
- imap_processing/glows/ancillary/imap_glows_map-of-uv-sources_20250923_v002.dat +593 -0
- imap_processing/glows/ancillary/imap_glows_pipeline_settings_20250923_v002.json +54 -0
- imap_processing/glows/ancillary/imap_glows_suspected-transients_20250923_v002.dat +10 -0
- imap_processing/glows/l1b/glows_l1b.py +99 -9
- imap_processing/glows/l1b/glows_l1b_data.py +350 -38
- imap_processing/glows/l2/glows_l2.py +11 -0
- imap_processing/hi/hi_l1a.py +124 -3
- imap_processing/hi/hi_l1b.py +154 -71
- imap_processing/hi/hi_l2.py +84 -51
- imap_processing/hi/utils.py +153 -8
- imap_processing/hit/l0/constants.py +3 -0
- imap_processing/hit/l0/decom_hit.py +3 -6
- imap_processing/hit/l1a/hit_l1a.py +311 -21
- imap_processing/hit/l1b/hit_l1b.py +54 -126
- imap_processing/hit/l2/hit_l2.py +6 -6
- imap_processing/ialirt/calculate_ingest.py +219 -0
- imap_processing/ialirt/constants.py +12 -2
- imap_processing/ialirt/generate_coverage.py +15 -2
- imap_processing/ialirt/l0/ialirt_spice.py +5 -2
- imap_processing/ialirt/l0/parse_mag.py +293 -42
- imap_processing/ialirt/l0/process_hit.py +5 -3
- imap_processing/ialirt/l0/process_swapi.py +41 -25
- imap_processing/ialirt/process_ephemeris.py +70 -14
- imap_processing/idex/idex_l0.py +2 -2
- imap_processing/idex/idex_l1a.py +2 -3
- imap_processing/idex/idex_l1b.py +2 -3
- imap_processing/idex/idex_l2a.py +130 -4
- imap_processing/idex/idex_l2b.py +158 -143
- imap_processing/idex/idex_utils.py +1 -3
- imap_processing/lo/l0/lo_science.py +25 -24
- imap_processing/lo/l1b/lo_l1b.py +3 -3
- imap_processing/lo/l1c/lo_l1c.py +116 -50
- imap_processing/lo/l2/lo_l2.py +29 -29
- imap_processing/lo/lo_ancillary.py +55 -0
- imap_processing/mag/l1a/mag_l1a.py +1 -0
- imap_processing/mag/l1a/mag_l1a_data.py +26 -0
- imap_processing/mag/l1b/mag_l1b.py +3 -2
- imap_processing/mag/l1c/interpolation_methods.py +14 -15
- imap_processing/mag/l1c/mag_l1c.py +23 -6
- imap_processing/mag/l1d/mag_l1d.py +57 -14
- imap_processing/mag/l1d/mag_l1d_data.py +167 -30
- imap_processing/mag/l2/mag_l2_data.py +10 -2
- imap_processing/quality_flags.py +9 -1
- imap_processing/spice/geometry.py +76 -33
- imap_processing/spice/pointing_frame.py +0 -6
- imap_processing/spice/repoint.py +29 -2
- imap_processing/spice/spin.py +28 -8
- imap_processing/spice/time.py +12 -22
- imap_processing/swapi/l1/swapi_l1.py +10 -4
- imap_processing/swapi/l2/swapi_l2.py +15 -17
- imap_processing/swe/l1b/swe_l1b.py +1 -2
- imap_processing/ultra/constants.py +1 -24
- imap_processing/ultra/l0/ultra_utils.py +9 -11
- imap_processing/ultra/l1a/ultra_l1a.py +1 -2
- imap_processing/ultra/l1b/cullingmask.py +6 -3
- imap_processing/ultra/l1b/de.py +81 -23
- imap_processing/ultra/l1b/extendedspin.py +13 -10
- imap_processing/ultra/l1b/lookup_utils.py +281 -28
- imap_processing/ultra/l1b/quality_flag_filters.py +10 -1
- imap_processing/ultra/l1b/ultra_l1b_culling.py +161 -3
- imap_processing/ultra/l1b/ultra_l1b_extended.py +253 -47
- imap_processing/ultra/l1c/helio_pset.py +97 -24
- imap_processing/ultra/l1c/l1c_lookup_utils.py +256 -0
- imap_processing/ultra/l1c/spacecraft_pset.py +83 -16
- imap_processing/ultra/l1c/ultra_l1c.py +6 -2
- imap_processing/ultra/l1c/ultra_l1c_culling.py +85 -0
- imap_processing/ultra/l1c/ultra_l1c_pset_bins.py +385 -277
- imap_processing/ultra/l2/ultra_l2.py +0 -1
- imap_processing/ultra/utils/ultra_l1_utils.py +28 -3
- imap_processing/utils.py +3 -4
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/METADATA +2 -2
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/RECORD +102 -95
- imap_processing/idex/idex_l2c.py +0 -84
- imap_processing/spice/kernels.py +0 -187
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/LICENSE +0 -0
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/WHEEL +0 -0
- {imap_processing-0.18.0.dist-info → imap_processing-0.19.0.dist-info}/entry_points.txt +0 -0
imap_processing/_version.py
CHANGED
|
@@ -1,3 +1,3 @@
|
|
|
1
1
|
# These version placeholders will be replaced later during substitution.
|
|
2
|
-
__version__ = "0.
|
|
3
|
-
__version_tuple__ = (0,
|
|
2
|
+
__version__ = "0.19.0"
|
|
3
|
+
__version_tuple__ = (0, 19, 0)
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
+
import json
|
|
5
6
|
from collections import namedtuple
|
|
6
7
|
from pathlib import Path
|
|
7
8
|
|
|
@@ -144,7 +145,53 @@ class AncillaryCombiner:
|
|
|
144
145
|
"""
|
|
145
146
|
return cdf_to_xarray(filepath)
|
|
146
147
|
|
|
147
|
-
|
|
148
|
+
@staticmethod
|
|
149
|
+
def convert_json_to_dataset(filepath: str | Path) -> xr.Dataset:
|
|
150
|
+
"""
|
|
151
|
+
Read a JSON file and convert it to an xarray Dataset.
|
|
152
|
+
|
|
153
|
+
This method handles JSON files by converting them to xarray Datasets
|
|
154
|
+
with appropriate structure. Nested dictionaries are flattened using
|
|
155
|
+
underscore separation, up to 2 levels deep.
|
|
156
|
+
|
|
157
|
+
Parameters
|
|
158
|
+
----------
|
|
159
|
+
filepath : str | Path
|
|
160
|
+
The path to the JSON file to convert.
|
|
161
|
+
|
|
162
|
+
Returns
|
|
163
|
+
-------
|
|
164
|
+
xr.Dataset
|
|
165
|
+
The converted xarray dataset with JSON data as data variables.
|
|
166
|
+
"""
|
|
167
|
+
with open(filepath) as f:
|
|
168
|
+
json_data = json.load(f)
|
|
169
|
+
|
|
170
|
+
# Convert JSON data to xarray Dataset with appropriate structure
|
|
171
|
+
# Each top-level key becomes a data variable
|
|
172
|
+
|
|
173
|
+
# The structure of the dictionary is {<variable_name>: (dims, data)}
|
|
174
|
+
# For the lists, we specify the dimension names. For scalars, pass in [].
|
|
175
|
+
data_vars = {}
|
|
176
|
+
for key, value in json_data.items():
|
|
177
|
+
if isinstance(value, (list, tuple)):
|
|
178
|
+
# Handle arrays/lists
|
|
179
|
+
data_vars[key] = ([f"dim_{key}"], value)
|
|
180
|
+
elif isinstance(value, dict):
|
|
181
|
+
# Handle nested dictionaries by flattening with underscore
|
|
182
|
+
for subkey, subvalue in value.items():
|
|
183
|
+
flat_key = f"{key}_{subkey}"
|
|
184
|
+
if isinstance(subvalue, (list, tuple)):
|
|
185
|
+
data_vars[flat_key] = ([f"dim_{flat_key}"], subvalue)
|
|
186
|
+
else:
|
|
187
|
+
data_vars[flat_key] = ([], subvalue)
|
|
188
|
+
else:
|
|
189
|
+
# Handle scalar values
|
|
190
|
+
data_vars[key] = ([], value)
|
|
191
|
+
|
|
192
|
+
return xr.Dataset(data_vars)
|
|
193
|
+
|
|
194
|
+
def _combine_input_datasets(self) -> xr.Dataset: # noqa: PLR0912
|
|
148
195
|
"""
|
|
149
196
|
Combine all the input datasets into one output dataset.
|
|
150
197
|
|
|
@@ -167,6 +214,10 @@ class AncillaryCombiner:
|
|
|
167
214
|
"""
|
|
168
215
|
output_dataset = xr.Dataset()
|
|
169
216
|
|
|
217
|
+
# Handle empty input gracefully
|
|
218
|
+
if not self.timestamped_data:
|
|
219
|
+
return output_dataset
|
|
220
|
+
|
|
170
221
|
full_range_start = None
|
|
171
222
|
full_range_end = None
|
|
172
223
|
for timestamped_data in self.timestamped_data:
|
|
@@ -258,3 +309,112 @@ class MagAncillaryCombiner(AncillaryCombiner):
|
|
|
258
309
|
expected_end_date: np.datetime64 | str,
|
|
259
310
|
):
|
|
260
311
|
super().__init__(ancillary_input, expected_end_date)
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
class GlowsAncillaryCombiner(AncillaryCombiner):
|
|
315
|
+
"""
|
|
316
|
+
GLOWS-specific instance of AncillaryConverter for bad-angle flag data.
|
|
317
|
+
|
|
318
|
+
This class handles GLOWS ancillary files for L1B processing, including:
|
|
319
|
+
- Excluded regions map (.dat files with ecliptic coordinates)
|
|
320
|
+
- UV sources map (.dat files with star positions and masking radii)
|
|
321
|
+
- Suspected transients (.dat files with time-based histogram masks)
|
|
322
|
+
- Instrument team exclusions (.dat files with time-based histogram masks)
|
|
323
|
+
|
|
324
|
+
Parameters
|
|
325
|
+
----------
|
|
326
|
+
ancillary_input : ProcessingInput
|
|
327
|
+
Collection of GLOWS ancillary files.
|
|
328
|
+
expected_end_date : np.datetime64 | str
|
|
329
|
+
The expected end date of the dataset. This is used to fill in the end date
|
|
330
|
+
of the dataset if it is not provided in the input file. This should either
|
|
331
|
+
be a numpy datetime64 object or a string in the format YYYYMMDD.
|
|
332
|
+
"""
|
|
333
|
+
|
|
334
|
+
def __init__(
|
|
335
|
+
self,
|
|
336
|
+
ancillary_input: ProcessingInput | list[Path],
|
|
337
|
+
expected_end_date: np.datetime64 | str,
|
|
338
|
+
):
|
|
339
|
+
super().__init__(ancillary_input, expected_end_date)
|
|
340
|
+
|
|
341
|
+
def convert_file_to_dataset(self, filepath: str | Path) -> xr.Dataset:
|
|
342
|
+
"""
|
|
343
|
+
Convert GLOWS ancillary .dat files to xarray datasets.
|
|
344
|
+
|
|
345
|
+
This method handles different types of GLOWS ancillary files:
|
|
346
|
+
- excluded_regions: longitude/latitude coordinate pairs
|
|
347
|
+
- uv_sources: star names with coordinates and masking radii
|
|
348
|
+
- suspected_transients: time-based histogram masks
|
|
349
|
+
- exclusions_by_instr_team: time-based histogram masks
|
|
350
|
+
|
|
351
|
+
Parameters
|
|
352
|
+
----------
|
|
353
|
+
filepath : str | Path
|
|
354
|
+
The path to the GLOWS ancillary file to convert.
|
|
355
|
+
|
|
356
|
+
Returns
|
|
357
|
+
-------
|
|
358
|
+
xr.Dataset
|
|
359
|
+
The converted xarray dataset with appropriate dimensions and variables.
|
|
360
|
+
"""
|
|
361
|
+
filepath = Path(filepath)
|
|
362
|
+
filename = filepath.name
|
|
363
|
+
|
|
364
|
+
if "excluded-regions" in filename:
|
|
365
|
+
# Handle excluded regions (2 columns: longitude, latitude)
|
|
366
|
+
data = np.loadtxt(filepath, comments="#")
|
|
367
|
+
return xr.Dataset(
|
|
368
|
+
{
|
|
369
|
+
"ecliptic_longitude_deg": (["region"], data[:, 0]),
|
|
370
|
+
"ecliptic_latitude_deg": (["region"], data[:, 1]),
|
|
371
|
+
}
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
elif "uv-sources" in filename:
|
|
375
|
+
# Handle UV sources (4 columns: name, longitude, latitude, radius)
|
|
376
|
+
data = np.loadtxt(filepath, comments="#", dtype=str)
|
|
377
|
+
return xr.Dataset(
|
|
378
|
+
{
|
|
379
|
+
"object_name": (["source"], data[:, 0]),
|
|
380
|
+
"ecliptic_longitude_deg": (["source"], data[:, 1].astype(float)),
|
|
381
|
+
"ecliptic_latitude_deg": (["source"], data[:, 2].astype(float)),
|
|
382
|
+
"angular_radius_for_masking": (
|
|
383
|
+
["source"],
|
|
384
|
+
data[:, 3].astype(float),
|
|
385
|
+
),
|
|
386
|
+
}
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
elif "suspected-transients" in filename:
|
|
390
|
+
# Handle suspected transients (time identifier + mask string)
|
|
391
|
+
with open(filepath) as f:
|
|
392
|
+
lines = [line.strip() for line in f if not line.startswith("#")]
|
|
393
|
+
identifiers = [line.split(" ", 1)[0] for line in lines]
|
|
394
|
+
masks = [line.split(" ", 1)[1] for line in lines]
|
|
395
|
+
return xr.Dataset(
|
|
396
|
+
{
|
|
397
|
+
"l1b_unique_block_identifier": (["time_block"], identifiers),
|
|
398
|
+
"histogram_mask_array": (["time_block"], masks),
|
|
399
|
+
}
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
elif "exclusions-by-instr-team" in filename:
|
|
403
|
+
# Handle instrument team exclusions (time identifier + mask string)
|
|
404
|
+
with open(filepath) as f:
|
|
405
|
+
lines = [line.strip() for line in f if not line.startswith("#")]
|
|
406
|
+
identifiers = [line.split(" ", 1)[0] for line in lines]
|
|
407
|
+
masks = [line.split(" ", 1)[1] for line in lines]
|
|
408
|
+
return xr.Dataset(
|
|
409
|
+
{
|
|
410
|
+
"l1b_unique_block_identifier": (["time_block"], identifiers),
|
|
411
|
+
"histogram_mask_array": (["time_block"], masks),
|
|
412
|
+
}
|
|
413
|
+
)
|
|
414
|
+
|
|
415
|
+
elif filename.endswith(".json"):
|
|
416
|
+
# Handle pipeline settings JSON file using the generic read_json method
|
|
417
|
+
return self.convert_json_to_dataset(filepath)
|
|
418
|
+
|
|
419
|
+
else:
|
|
420
|
+
raise ValueError(f"Unknown GLOWS ancillary file type: {filename}")
|