imap-processing 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of imap-processing might be problematic. Click here for more details.
- imap_processing/_version.py +2 -2
- imap_processing/cdf/config/imap_codice_global_cdf_attrs.yaml +13 -1
- imap_processing/cdf/config/imap_codice_l2-hi-omni_variable_attrs.yaml +635 -0
- imap_processing/cdf/config/imap_codice_l2-hi-sectored_variable_attrs.yaml +422 -0
- imap_processing/cdf/config/imap_enamaps_l2-common_variable_attrs.yaml +28 -21
- imap_processing/cdf/config/imap_enamaps_l2-healpix_variable_attrs.yaml +2 -0
- imap_processing/cdf/config/imap_enamaps_l2-rectangular_variable_attrs.yaml +12 -2
- imap_processing/cli.py +6 -11
- imap_processing/codice/codice_l2.py +640 -127
- imap_processing/codice/constants.py +61 -0
- imap_processing/ena_maps/ena_maps.py +111 -60
- imap_processing/ena_maps/utils/coordinates.py +5 -0
- imap_processing/ena_maps/utils/corrections.py +268 -0
- imap_processing/ena_maps/utils/map_utils.py +143 -42
- imap_processing/hi/hi_l2.py +3 -8
- imap_processing/ialirt/constants.py +7 -1
- imap_processing/ialirt/generate_coverage.py +1 -1
- imap_processing/ialirt/l0/process_codice.py +66 -0
- imap_processing/ialirt/utils/create_xarray.py +1 -0
- imap_processing/idex/idex_l2a.py +2 -2
- imap_processing/idex/idex_l2b.py +1 -1
- imap_processing/lo/l1c/lo_l1c.py +61 -3
- imap_processing/lo/l2/lo_l2.py +79 -11
- imap_processing/mag/l1a/mag_l1a.py +2 -2
- imap_processing/mag/l1a/mag_l1a_data.py +71 -13
- imap_processing/mag/l1c/interpolation_methods.py +34 -13
- imap_processing/mag/l1c/mag_l1c.py +117 -67
- imap_processing/mag/l1d/mag_l1d_data.py +3 -1
- imap_processing/spice/geometry.py +11 -9
- imap_processing/spice/pointing_frame.py +77 -50
- imap_processing/swapi/l1/swapi_l1.py +12 -4
- imap_processing/swe/utils/swe_constants.py +7 -7
- imap_processing/ultra/l1b/extendedspin.py +1 -1
- imap_processing/ultra/l1b/ultra_l1b_culling.py +2 -2
- imap_processing/ultra/l1b/ultra_l1b_extended.py +1 -1
- imap_processing/ultra/l1c/helio_pset.py +1 -1
- imap_processing/ultra/l1c/spacecraft_pset.py +2 -2
- imap_processing-1.0.1.dist-info/METADATA +121 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.1.dist-info}/RECORD +42 -40
- imap_processing-1.0.0.dist-info/METADATA +0 -120
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.1.dist-info}/LICENSE +0 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.1.dist-info}/WHEEL +0 -0
- {imap_processing-1.0.0.dist-info → imap_processing-1.0.1.dist-info}/entry_points.txt +0 -0
|
@@ -63,17 +63,16 @@ def mag_l1c(
|
|
|
63
63
|
)
|
|
64
64
|
|
|
65
65
|
interp_function = InterpolationFunction[configuration.L1C_INTERPOLATION_METHOD]
|
|
66
|
-
if
|
|
67
|
-
|
|
68
|
-
|
|
66
|
+
if burst_mode_dataset is not None:
|
|
67
|
+
# Only use day_to_process if there is no norm data
|
|
68
|
+
day_to_process_arg = day_to_process if normal_mode_dataset is None else None
|
|
69
|
+
full_interpolated_timeline: np.ndarray = process_mag_l1c(
|
|
70
|
+
normal_mode_dataset, burst_mode_dataset, interp_function, day_to_process_arg
|
|
69
71
|
)
|
|
70
72
|
elif normal_mode_dataset is not None:
|
|
71
|
-
full_interpolated_timeline = fill_normal_data(
|
|
72
|
-
normal_mode_dataset, normal_mode_dataset["epoch"].data
|
|
73
|
-
)
|
|
73
|
+
full_interpolated_timeline = fill_normal_data(normal_mode_dataset)
|
|
74
74
|
else:
|
|
75
|
-
|
|
76
|
-
raise NotImplementedError
|
|
75
|
+
raise ValueError("At least one of norm or burst dataset must be provided.")
|
|
77
76
|
|
|
78
77
|
completed_timeline = remove_missing_data(full_interpolated_timeline)
|
|
79
78
|
|
|
@@ -127,12 +126,14 @@ def mag_l1c(
|
|
|
127
126
|
global_attributes["missing_sequences"] = ""
|
|
128
127
|
|
|
129
128
|
try:
|
|
130
|
-
|
|
131
|
-
|
|
129
|
+
active_dataset = normal_mode_dataset or burst_mode_dataset
|
|
130
|
+
|
|
131
|
+
global_attributes["is_mago"] = active_dataset.attrs["is_mago"]
|
|
132
|
+
global_attributes["is_active"] = active_dataset.attrs["is_active"]
|
|
132
133
|
|
|
133
134
|
# Check if all vectors are primary in both normal and burst datasets
|
|
134
|
-
is_mago =
|
|
135
|
-
normal_all_primary =
|
|
135
|
+
is_mago = active_dataset.attrs.get("is_mago", "False") == "True"
|
|
136
|
+
normal_all_primary = active_dataset.attrs.get("all_vectors_primary", False)
|
|
136
137
|
|
|
137
138
|
# Default for missing burst dataset: 1 if MAGO (expected primary), 0 if MAGI
|
|
138
139
|
burst_all_primary = is_mago
|
|
@@ -146,14 +147,14 @@ def mag_l1c(
|
|
|
146
147
|
normal_all_primary and burst_all_primary
|
|
147
148
|
)
|
|
148
149
|
|
|
149
|
-
global_attributes["missing_sequences"] =
|
|
150
|
+
global_attributes["missing_sequences"] = active_dataset.attrs[
|
|
150
151
|
"missing_sequences"
|
|
151
152
|
]
|
|
152
153
|
except KeyError as e:
|
|
153
154
|
logger.info(
|
|
154
155
|
f"Key error when assigning global attributes, attribute not found in "
|
|
155
156
|
f"L1B file with logical source "
|
|
156
|
-
f"{
|
|
157
|
+
f"{active_dataset.attrs['Logical_source']}: {e}"
|
|
157
158
|
)
|
|
158
159
|
|
|
159
160
|
global_attributes["interpolation_method"] = interp_function.name
|
|
@@ -176,16 +177,24 @@ def mag_l1c(
|
|
|
176
177
|
attrs=attribute_manager.get_variable_attributes("vector_attrs"),
|
|
177
178
|
)
|
|
178
179
|
|
|
179
|
-
output_dataset["
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
180
|
+
if len(output_dataset["vectors"]) > 0:
|
|
181
|
+
output_dataset["vector_magnitude"] = xr.apply_ufunc(
|
|
182
|
+
lambda x: np.linalg.norm(x[:4]),
|
|
183
|
+
output_dataset["vectors"],
|
|
184
|
+
input_core_dims=[["direction"]],
|
|
185
|
+
output_core_dims=[[]],
|
|
186
|
+
vectorize=True,
|
|
187
|
+
)
|
|
188
|
+
output_dataset[
|
|
189
|
+
"vector_magnitude"
|
|
190
|
+
].attrs = attribute_manager.get_variable_attributes("vector_magnitude_attrs")
|
|
191
|
+
else:
|
|
192
|
+
output_dataset["vector_magnitude"] = xr.DataArray(
|
|
193
|
+
np.empty((0, 1)),
|
|
194
|
+
name="vector_magnitude",
|
|
195
|
+
dims=["epoch", "vector_magnitude"],
|
|
196
|
+
attrs=attribute_manager.get_variable_attributes("vector_magnitude_attrs"),
|
|
197
|
+
)
|
|
189
198
|
|
|
190
199
|
output_dataset["compression_flags"] = xr.DataArray(
|
|
191
200
|
completed_timeline[:, 6:8],
|
|
@@ -265,7 +274,7 @@ def select_datasets(
|
|
|
265
274
|
|
|
266
275
|
|
|
267
276
|
def process_mag_l1c(
|
|
268
|
-
normal_mode_dataset: xr.Dataset,
|
|
277
|
+
normal_mode_dataset: xr.Dataset | None,
|
|
269
278
|
burst_mode_dataset: xr.Dataset,
|
|
270
279
|
interpolation_function: InterpolationFunction,
|
|
271
280
|
day_to_process: np.datetime64 | None = None,
|
|
@@ -305,38 +314,51 @@ def process_mag_l1c(
|
|
|
305
314
|
np.ndarray
|
|
306
315
|
An (n, 8) shaped array containing the completed timeline.
|
|
307
316
|
"""
|
|
308
|
-
norm_epoch = normal_mode_dataset["epoch"].data
|
|
309
|
-
if "vectors_per_second" in normal_mode_dataset.attrs:
|
|
310
|
-
normal_vecsec_dict = vectors_per_second_from_string(
|
|
311
|
-
normal_mode_dataset.attrs["vectors_per_second"]
|
|
312
|
-
)
|
|
313
|
-
else:
|
|
314
|
-
normal_vecsec_dict = None
|
|
315
|
-
|
|
316
|
-
output_dataset = normal_mode_dataset.copy(deep=True)
|
|
317
|
-
output_dataset["sample_interpolated"] = xr.DataArray(
|
|
318
|
-
np.zeros(len(normal_mode_dataset))
|
|
319
|
-
)
|
|
320
317
|
day_start_ns = None
|
|
321
318
|
day_end_ns = None
|
|
322
319
|
|
|
323
320
|
if day_to_process is not None:
|
|
324
|
-
day_start = day_to_process.astype("datetime64[s]") - np.timedelta64(
|
|
321
|
+
day_start = day_to_process.astype("datetime64[s]") - np.timedelta64(30, "m")
|
|
325
322
|
|
|
326
|
-
# get the end of the day plus
|
|
323
|
+
# get the end of the day plus 30 minutes
|
|
327
324
|
day_end = (
|
|
328
325
|
day_to_process.astype("datetime64[s]")
|
|
329
326
|
+ np.timedelta64(1, "D")
|
|
330
|
-
+ np.timedelta64(
|
|
327
|
+
+ np.timedelta64(30, "m")
|
|
331
328
|
)
|
|
332
329
|
|
|
333
330
|
day_start_ns = et_to_ttj2000ns(str_to_et(str(day_start)))
|
|
334
331
|
day_end_ns = et_to_ttj2000ns(str_to_et(str(day_end)))
|
|
335
332
|
|
|
336
|
-
|
|
333
|
+
if normal_mode_dataset:
|
|
334
|
+
norm_epoch = normal_mode_dataset["epoch"].data
|
|
335
|
+
if "vectors_per_second" in normal_mode_dataset.attrs:
|
|
336
|
+
normal_vecsec_dict = vectors_per_second_from_string(
|
|
337
|
+
normal_mode_dataset.attrs["vectors_per_second"]
|
|
338
|
+
)
|
|
339
|
+
else:
|
|
340
|
+
normal_vecsec_dict = None
|
|
341
|
+
|
|
342
|
+
gaps = find_all_gaps(norm_epoch, normal_vecsec_dict, day_start_ns, day_end_ns)
|
|
343
|
+
else:
|
|
344
|
+
norm_epoch = [day_start_ns, day_end_ns]
|
|
345
|
+
gaps = np.array(
|
|
346
|
+
[
|
|
347
|
+
[
|
|
348
|
+
day_start_ns,
|
|
349
|
+
day_end_ns,
|
|
350
|
+
VecSec.TWO_VECS_PER_S.value,
|
|
351
|
+
]
|
|
352
|
+
]
|
|
353
|
+
)
|
|
337
354
|
|
|
338
355
|
new_timeline = generate_timeline(norm_epoch, gaps)
|
|
339
|
-
|
|
356
|
+
|
|
357
|
+
if normal_mode_dataset:
|
|
358
|
+
norm_filled: np.ndarray = fill_normal_data(normal_mode_dataset, new_timeline)
|
|
359
|
+
else:
|
|
360
|
+
norm_filled = generate_empty_norm_array(new_timeline)
|
|
361
|
+
|
|
340
362
|
interpolated = interpolate_gaps(
|
|
341
363
|
burst_mode_dataset, gaps, norm_filled, interpolation_function
|
|
342
364
|
)
|
|
@@ -344,10 +366,32 @@ def process_mag_l1c(
|
|
|
344
366
|
return interpolated
|
|
345
367
|
|
|
346
368
|
|
|
369
|
+
def generate_empty_norm_array(new_timeline: np.ndarray) -> np.ndarray:
|
|
370
|
+
"""
|
|
371
|
+
Generate an empty Normal mode array with the new timeline.
|
|
372
|
+
|
|
373
|
+
Parameters
|
|
374
|
+
----------
|
|
375
|
+
new_timeline : np.ndarray
|
|
376
|
+
A 1D array of timestamps to fill.
|
|
377
|
+
|
|
378
|
+
Returns
|
|
379
|
+
-------
|
|
380
|
+
np.ndarray
|
|
381
|
+
An (n, 8) shaped array containing the timeline filled with `FILLVAL` data.
|
|
382
|
+
"""
|
|
383
|
+
# TODO: fill with FILLVAL
|
|
384
|
+
norm_filled: np.ndarray = np.zeros((len(new_timeline), 8))
|
|
385
|
+
norm_filled[:, 0] = new_timeline
|
|
386
|
+
# Flags, will also indicate any missed timestamps
|
|
387
|
+
norm_filled[:, 5] = ModeFlags.MISSING.value
|
|
388
|
+
|
|
389
|
+
return norm_filled
|
|
390
|
+
|
|
391
|
+
|
|
347
392
|
def fill_normal_data(
|
|
348
393
|
normal_dataset: xr.Dataset,
|
|
349
|
-
new_timeline: np.ndarray,
|
|
350
|
-
day_to_process: np.datetime64 | None = None,
|
|
394
|
+
new_timeline: np.ndarray | None = None,
|
|
351
395
|
) -> np.ndarray:
|
|
352
396
|
"""
|
|
353
397
|
Fill the new timeline with the normal mode data.
|
|
@@ -358,26 +402,23 @@ def fill_normal_data(
|
|
|
358
402
|
----------
|
|
359
403
|
normal_dataset : xr.Dataset
|
|
360
404
|
The normal mode dataset.
|
|
361
|
-
new_timeline : np.ndarray
|
|
362
|
-
A 1D array of timestamps to fill.
|
|
363
|
-
|
|
364
|
-
The day to process, in np.datetime64[D] format. This is used to fill
|
|
365
|
-
gaps at the beginning or end of the day if needed. If not included, these
|
|
366
|
-
gaps will not be filled.
|
|
405
|
+
new_timeline : np.ndarray, optional
|
|
406
|
+
A 1D array of timestamps to fill. If not provided, the normal mode timestamps
|
|
407
|
+
will be used.
|
|
367
408
|
|
|
368
409
|
Returns
|
|
369
410
|
-------
|
|
370
|
-
np.ndarray
|
|
411
|
+
filled_timeline : np.ndarray
|
|
371
412
|
An (n, 8) shaped array containing the timeline filled with normal mode data.
|
|
372
413
|
Gaps are marked as -1 in the generated flag column at index 5.
|
|
373
414
|
Indices: 0 - epoch, 1-4 - vector x, y, z, and range, 5 - generated flag,
|
|
374
415
|
6-7 - compression flags.
|
|
375
416
|
"""
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
417
|
+
if new_timeline is None:
|
|
418
|
+
new_timeline = normal_dataset["epoch"].data
|
|
419
|
+
|
|
420
|
+
filled_timeline = generate_empty_norm_array(new_timeline)
|
|
421
|
+
|
|
381
422
|
for index, timestamp in enumerate(normal_dataset["epoch"].data):
|
|
382
423
|
timeline_index = np.searchsorted(new_timeline, timestamp)
|
|
383
424
|
filled_timeline[timeline_index, 1:5] = normal_dataset["vectors"].data[index]
|
|
@@ -463,20 +504,17 @@ def interpolate_gaps(
|
|
|
463
504
|
]
|
|
464
505
|
|
|
465
506
|
short = (gap_timeline >= burst_epochs[burst_start]) & (
|
|
466
|
-
gap_timeline <= burst_epochs[
|
|
507
|
+
gap_timeline <= burst_epochs[burst_end]
|
|
467
508
|
)
|
|
468
|
-
|
|
469
|
-
|
|
509
|
+
num_short = int(short.sum())
|
|
510
|
+
|
|
511
|
+
if len(gap_timeline) != num_short:
|
|
512
|
+
print(f"Chopping timeline from {len(gap_timeline)} to {num_short}")
|
|
470
513
|
|
|
471
514
|
# Limit timestamps to only include the areas with burst data
|
|
472
|
-
gap_timeline = gap_timeline[
|
|
473
|
-
(
|
|
474
|
-
(gap_timeline >= burst_epochs[burst_start])
|
|
475
|
-
& (gap_timeline <= burst_epochs[burst_gap_end])
|
|
476
|
-
)
|
|
477
|
-
]
|
|
515
|
+
gap_timeline = gap_timeline[short]
|
|
478
516
|
# do not include range
|
|
479
|
-
gap_fill = interpolation_function(
|
|
517
|
+
adjusted_gap_timeline, gap_fill = interpolation_function(
|
|
480
518
|
burst_vectors[burst_start:burst_end, :3],
|
|
481
519
|
burst_epochs[burst_start:burst_end],
|
|
482
520
|
gap_timeline,
|
|
@@ -485,7 +523,7 @@ def interpolate_gaps(
|
|
|
485
523
|
)
|
|
486
524
|
|
|
487
525
|
# gaps should not have data in timeline, still check it
|
|
488
|
-
for index, timestamp in enumerate(
|
|
526
|
+
for index, timestamp in enumerate(adjusted_gap_timeline):
|
|
489
527
|
timeline_index = np.searchsorted(filled_norm_timeline[:, 0], timestamp)
|
|
490
528
|
if sum(
|
|
491
529
|
filled_norm_timeline[timeline_index, 1:4]
|
|
@@ -500,6 +538,18 @@ def interpolate_gaps(
|
|
|
500
538
|
"compression_flags"
|
|
501
539
|
].data[burst_gap_start + index]
|
|
502
540
|
|
|
541
|
+
# for any timestamp that was not filled and is still missing, remove it
|
|
542
|
+
missing_timeline = np.setdiff1d(gap_timeline, adjusted_gap_timeline)
|
|
543
|
+
|
|
544
|
+
for timestamp in missing_timeline:
|
|
545
|
+
timeline_index = np.searchsorted(filled_norm_timeline[:, 0], timestamp)
|
|
546
|
+
if filled_norm_timeline[timeline_index, 5] != ModeFlags.MISSING.value:
|
|
547
|
+
raise RuntimeError(
|
|
548
|
+
"Self-inconsistent data. "
|
|
549
|
+
"Gaps not included in final timeline should be missing."
|
|
550
|
+
)
|
|
551
|
+
np.delete(filled_norm_timeline, timeline_index)
|
|
552
|
+
|
|
503
553
|
return filled_norm_timeline
|
|
504
554
|
|
|
505
555
|
|
|
@@ -693,10 +693,12 @@ class MagL1d(MagL2L1dBase): # type: ignore[misc]
|
|
|
693
693
|
- gradiometer_offset_magnitude: magnitude of the offset vector
|
|
694
694
|
- quality_flags: quality flags (1 if magnitude > threshold, 0 otherwise)
|
|
695
695
|
"""
|
|
696
|
-
|
|
696
|
+
# TODO: should this extrapolate or should non-overlapping data be removed?
|
|
697
|
+
_, aligned_magi = linear(
|
|
697
698
|
magi_vectors,
|
|
698
699
|
magi_epoch,
|
|
699
700
|
mago_epoch,
|
|
701
|
+
extrapolate=True,
|
|
700
702
|
)
|
|
701
703
|
|
|
702
704
|
diff = aligned_magi - mago_vectors
|
|
@@ -129,7 +129,7 @@ def imap_state(
|
|
|
129
129
|
-------
|
|
130
130
|
state : np.ndarray
|
|
131
131
|
The Cartesian state vector representing the position and velocity of the
|
|
132
|
-
IMAP spacecraft.
|
|
132
|
+
IMAP spacecraft. Units are km and km/s.
|
|
133
133
|
"""
|
|
134
134
|
state, _ = spiceypy.spkezr(
|
|
135
135
|
SpiceBody.IMAP.name, et, ref_frame.name, abcorr, observer.name
|
|
@@ -323,6 +323,7 @@ def frame_transform_az_el(
|
|
|
323
323
|
Ephemeris time(s) corresponding to position(s).
|
|
324
324
|
az_el : np.ndarray
|
|
325
325
|
<azimuth, elevation> vector or array of vectors in reference frame `from_frame`.
|
|
326
|
+
Azimuth and elevation pairs are always the final dimension of the array.
|
|
326
327
|
There are several possible shapes for the input az_el and et:
|
|
327
328
|
1. A single az_el vector may be provided for multiple `et` query times
|
|
328
329
|
2. A single `et` may be provided for multiple az_el vectors,
|
|
@@ -340,15 +341,16 @@ def frame_transform_az_el(
|
|
|
340
341
|
to_frame_az_el : np.ndarray
|
|
341
342
|
Azimuth/elevation coordinates in reference frame `to_frame`. This
|
|
342
343
|
output coordinate vector will have shape (2,) if a single `az_el` position
|
|
343
|
-
vector and single `et` time are input. Otherwise, it will have shape (
|
|
344
|
-
where
|
|
345
|
-
axis of the output vector contains azimuth in
|
|
346
|
-
in the 1st position.
|
|
344
|
+
vector and single `et` time are input. Otherwise, it will have shape (..., 2)
|
|
345
|
+
where ... matches the leading dimensions of the input position vector or
|
|
346
|
+
ephemeris times. The last axis of the output vector contains azimuth in
|
|
347
|
+
the 0th position and elevation in the 1st position.
|
|
347
348
|
"""
|
|
348
349
|
# Convert input az/el to Cartesian vectors
|
|
349
|
-
spherical_coords_in = np.
|
|
350
|
-
[np.ones_like(az_el[..., 0]), az_el[..., 0], az_el[..., 1]]
|
|
351
|
-
|
|
350
|
+
spherical_coords_in = np.stack(
|
|
351
|
+
[np.ones_like(az_el[..., 0]), az_el[..., 0], az_el[..., 1]],
|
|
352
|
+
axis=-1,
|
|
353
|
+
)
|
|
352
354
|
from_frame_cartesian = spherical_to_cartesian(spherical_coords_in)
|
|
353
355
|
# Transform to to_frame
|
|
354
356
|
to_frame_cartesian = frame_transform(et, from_frame_cartesian, from_frame, to_frame)
|
|
@@ -531,7 +533,7 @@ def cartesian_to_spherical(
|
|
|
531
533
|
az = np.degrees(az)
|
|
532
534
|
el = np.degrees(el)
|
|
533
535
|
|
|
534
|
-
spherical_coords = np.stack((np.squeeze(magnitude_v), az, el), axis=-1)
|
|
536
|
+
spherical_coords = np.stack((np.squeeze(magnitude_v, -1), az, el), axis=-1)
|
|
535
537
|
|
|
536
538
|
return spherical_coords
|
|
537
539
|
|
|
@@ -34,14 +34,14 @@ POINTING_SEGMENT_DTYPE = np.dtype(
|
|
|
34
34
|
)
|
|
35
35
|
|
|
36
36
|
|
|
37
|
-
def generate_pointing_attitude_kernel(
|
|
37
|
+
def generate_pointing_attitude_kernel(imap_attitude_cks: list[Path]) -> list[Path]:
|
|
38
38
|
"""
|
|
39
39
|
Generate pointing attitude kernel from input IMAP CK kernel.
|
|
40
40
|
|
|
41
41
|
Parameters
|
|
42
42
|
----------
|
|
43
|
-
|
|
44
|
-
|
|
43
|
+
imap_attitude_cks : list[Path]
|
|
44
|
+
List of the IMAP attitude kernels from which to generate pointing
|
|
45
45
|
attitude.
|
|
46
46
|
|
|
47
47
|
Returns
|
|
@@ -49,20 +49,29 @@ def generate_pointing_attitude_kernel(imap_attitude_ck: Path) -> list[Path]:
|
|
|
49
49
|
pointing_kernel_path : list[Path]
|
|
50
50
|
Location of the new pointing kernels.
|
|
51
51
|
"""
|
|
52
|
-
pointing_segments = calculate_pointing_attitude_segments(
|
|
52
|
+
pointing_segments = calculate_pointing_attitude_segments(imap_attitude_cks)
|
|
53
|
+
if len(pointing_segments) == 0:
|
|
54
|
+
raise ValueError("No Pointings covered by input dependencies.")
|
|
55
|
+
|
|
53
56
|
# get the start and end yyyy_doy strings
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
57
|
+
start_datetime = spiceypy.et2datetime(
|
|
58
|
+
sct_to_et(pointing_segments[0]["start_sclk_ticks"])
|
|
59
|
+
)
|
|
60
|
+
end_datetime = spiceypy.et2datetime(
|
|
61
|
+
sct_to_et(pointing_segments[-1]["end_sclk_ticks"])
|
|
62
|
+
)
|
|
63
|
+
# Use the last ck from sorted list to get the version number. I
|
|
64
|
+
# don't think this will be anything but 1.
|
|
65
|
+
sorted_ck_paths = list(sorted(imap_attitude_cks, key=lambda x: x.name))
|
|
66
|
+
spice_file = SPICEFilePath(sorted_ck_paths[-1].name)
|
|
58
67
|
pointing_kernel_path = (
|
|
59
|
-
|
|
60
|
-
f"{
|
|
61
|
-
f"{
|
|
68
|
+
sorted_ck_paths[-1].parent / f"imap_dps_"
|
|
69
|
+
f"{start_datetime.strftime('%Y_%j')}_"
|
|
70
|
+
f"{end_datetime.strftime('%Y_%j')}_"
|
|
62
71
|
f"{spice_file.spice_metadata['version']}.ah.bc"
|
|
63
72
|
)
|
|
64
73
|
write_pointing_frame_ck(
|
|
65
|
-
pointing_kernel_path, pointing_segments,
|
|
74
|
+
pointing_kernel_path, pointing_segments, [p.name for p in imap_attitude_cks]
|
|
66
75
|
)
|
|
67
76
|
return [pointing_kernel_path]
|
|
68
77
|
|
|
@@ -93,7 +102,7 @@ def open_spice_ck_file(pointing_frame_path: Path) -> Generator[int, None, None]:
|
|
|
93
102
|
|
|
94
103
|
|
|
95
104
|
def write_pointing_frame_ck(
|
|
96
|
-
pointing_kernel_path: Path, segment_data: np.ndarray,
|
|
105
|
+
pointing_kernel_path: Path, segment_data: np.ndarray, parent_cks: list[str]
|
|
97
106
|
) -> None:
|
|
98
107
|
"""
|
|
99
108
|
Write a Pointing Frame attitude kernel.
|
|
@@ -108,8 +117,8 @@ def write_pointing_frame_ck(
|
|
|
108
117
|
("end_sclk_ticks", np.float64),
|
|
109
118
|
("quaternion", np.float64, (4,)),
|
|
110
119
|
("pointing_id", np.uint32),
|
|
111
|
-
|
|
112
|
-
|
|
120
|
+
parent_cks : list[str]
|
|
121
|
+
Filenames of the CK kernels that the quaternions were derived from.
|
|
113
122
|
"""
|
|
114
123
|
id_imap_dps = spiceypy.gipool("FRAME_IMAP_DPS", 0, 1)
|
|
115
124
|
|
|
@@ -119,10 +128,12 @@ def write_pointing_frame_ck(
|
|
|
119
128
|
"",
|
|
120
129
|
f"Original file name: {pointing_kernel_path.name}",
|
|
121
130
|
f"Creation date: {datetime.now(timezone.utc).strftime('%Y-%m-%d')}",
|
|
122
|
-
f"Parent
|
|
131
|
+
f"Parent files: {parent_cks}",
|
|
123
132
|
"",
|
|
124
133
|
]
|
|
125
134
|
|
|
135
|
+
logger.debug(f"Writing pointing attitude kernel: {pointing_kernel_path}")
|
|
136
|
+
|
|
126
137
|
with open_spice_ck_file(pointing_kernel_path) as handle:
|
|
127
138
|
# Write the comments to the file
|
|
128
139
|
spiceypy.dafac(handle, comments)
|
|
@@ -161,9 +172,11 @@ def write_pointing_frame_ck(
|
|
|
161
172
|
np.array([TICK_DURATION]),
|
|
162
173
|
)
|
|
163
174
|
|
|
175
|
+
logger.debug(f"Finished writing pointing attitude kernel: {pointing_kernel_path}")
|
|
176
|
+
|
|
164
177
|
|
|
165
178
|
def calculate_pointing_attitude_segments(
|
|
166
|
-
|
|
179
|
+
ck_paths: list[Path],
|
|
167
180
|
) -> NDArray:
|
|
168
181
|
"""
|
|
169
182
|
Calculate the data for each segment of the DPS_FRAME attitude kernel.
|
|
@@ -177,8 +190,8 @@ def calculate_pointing_attitude_segments(
|
|
|
177
190
|
|
|
178
191
|
Parameters
|
|
179
192
|
----------
|
|
180
|
-
|
|
181
|
-
|
|
193
|
+
ck_paths : list[pathlib.Path]
|
|
194
|
+
List of CK kernels to use to generate the pointing attitude kernel.
|
|
182
195
|
|
|
183
196
|
Returns
|
|
184
197
|
-------
|
|
@@ -200,36 +213,41 @@ def calculate_pointing_attitude_segments(
|
|
|
200
213
|
- IMAP historical attitude kernel from which the pointing frame kernel will
|
|
201
214
|
be generated.
|
|
202
215
|
"""
|
|
203
|
-
logger.info(
|
|
216
|
+
logger.info(
|
|
217
|
+
f"Extracting mean spin axes for all Pointings that are"
|
|
218
|
+
f" fully covered by the CK files: {[p.name for p in ck_paths]}"
|
|
219
|
+
)
|
|
204
220
|
# Get IDs.
|
|
205
221
|
# https://spiceypy.readthedocs.io/en/main/documentation.html#spiceypy.spiceypy.gipool
|
|
206
222
|
id_imap_sclk = spiceypy.gipool("CK_-43000_SCLK", 0, 1)
|
|
223
|
+
id_imap_spacecraft = spiceypy.gipool("FRAME_IMAP_SPACECRAFT", 0, 1)
|
|
207
224
|
|
|
208
|
-
#
|
|
209
|
-
#
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
225
|
+
# This job relies on the batch starter to provide all the correct CK kernels
|
|
226
|
+
# to cover the time range of the new repoint table.
|
|
227
|
+
# Get the coverage of the CK files storing the earliest start time and
|
|
228
|
+
# latest end time.
|
|
229
|
+
et_start = np.inf
|
|
230
|
+
et_end = -np.inf
|
|
231
|
+
for ck_path in ck_paths:
|
|
232
|
+
ck_cover = spiceypy.ckcov(
|
|
233
|
+
str(ck_path), int(id_imap_spacecraft), True, "INTERVAL", 0, "TDB"
|
|
215
234
|
)
|
|
235
|
+
num_intervals = spiceypy.wncard(ck_cover)
|
|
236
|
+
individual_ck_start, _ = spiceypy.wnfetd(ck_cover, 0)
|
|
237
|
+
_, individual_ck_end = spiceypy.wnfetd(ck_cover, num_intervals - 1)
|
|
238
|
+
logger.debug(
|
|
239
|
+
f"{ck_path.name} covers time range: ({et_to_utc(individual_ck_start)}, "
|
|
240
|
+
f"{et_to_utc(individual_ck_end)}) in {num_intervals} intervals."
|
|
241
|
+
)
|
|
242
|
+
et_start = min(et_start, individual_ck_start)
|
|
243
|
+
et_end = max(et_end, individual_ck_end)
|
|
216
244
|
|
|
217
|
-
id_imap_spacecraft = spiceypy.gipool("FRAME_IMAP_SPACECRAFT", 0, 1)
|
|
218
|
-
|
|
219
|
-
# Select only the pointings within the attitude coverage.
|
|
220
|
-
ck_cover = spiceypy.ckcov(
|
|
221
|
-
str(ck_path), int(id_imap_spacecraft), True, "INTERVAL", 0, "TDB"
|
|
222
|
-
)
|
|
223
|
-
num_intervals = spiceypy.wncard(ck_cover)
|
|
224
|
-
et_start, _ = spiceypy.wnfetd(ck_cover, 0)
|
|
225
|
-
_, et_end = spiceypy.wnfetd(ck_cover, num_intervals - 1)
|
|
226
245
|
logger.info(
|
|
227
|
-
f"
|
|
228
|
-
f"
|
|
246
|
+
f"CK kernels combined coverage range: "
|
|
247
|
+
f"{(et_to_utc(et_start), et_to_utc(et_end))}, "
|
|
229
248
|
)
|
|
230
249
|
|
|
231
|
-
# Get data from the repoint table and
|
|
232
|
-
# covered by this attitude kernel
|
|
250
|
+
# Get data from the repoint table and convert to Pointings
|
|
233
251
|
repoint_df = get_repoint_data()
|
|
234
252
|
repoint_df["repoint_start_et"] = sct_to_et(
|
|
235
253
|
met_to_sclkticks(repoint_df["repoint_start_met"].values)
|
|
@@ -237,20 +255,29 @@ def calculate_pointing_attitude_segments(
|
|
|
237
255
|
repoint_df["repoint_end_et"] = sct_to_et(
|
|
238
256
|
met_to_sclkticks(repoint_df["repoint_end_met"].values)
|
|
239
257
|
)
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
258
|
+
pointing_ids = repoint_df["repoint_id"].values[:-1]
|
|
259
|
+
pointing_start_ets = repoint_df["repoint_end_et"].values[:-1]
|
|
260
|
+
pointing_end_ets = repoint_df["repoint_start_et"].values[1:]
|
|
261
|
+
|
|
262
|
+
# Keep only the pointings that are fully covered by the attitude kernels.
|
|
263
|
+
keep_mask = (pointing_start_ets >= et_start) & (pointing_end_ets <= et_end)
|
|
264
|
+
# Filter the pointing data.
|
|
265
|
+
pointing_ids = pointing_ids[keep_mask]
|
|
266
|
+
pointing_start_ets = pointing_start_ets[keep_mask]
|
|
267
|
+
pointing_end_ets = pointing_end_ets[keep_mask]
|
|
268
|
+
|
|
269
|
+
n_pointings = len(pointing_ids)
|
|
270
|
+
if n_pointings == 0:
|
|
271
|
+
logger.warning(
|
|
272
|
+
"No Pointings identified based on coverage of CK files. Skipping."
|
|
273
|
+
)
|
|
245
274
|
|
|
246
275
|
pointing_segments = np.zeros(n_pointings, dtype=POINTING_SEGMENT_DTYPE)
|
|
247
276
|
|
|
248
277
|
for i_pointing in range(n_pointings):
|
|
249
|
-
pointing_segments[i_pointing]["pointing_id"] =
|
|
250
|
-
|
|
251
|
-
]
|
|
252
|
-
pointing_start_et = repoint_df.iloc[i_pointing]["repoint_end_et"]
|
|
253
|
-
pointing_end_et = repoint_df.iloc[i_pointing + 1]["repoint_start_et"]
|
|
278
|
+
pointing_segments[i_pointing]["pointing_id"] = pointing_ids[i_pointing]
|
|
279
|
+
pointing_start_et = pointing_start_ets[i_pointing]
|
|
280
|
+
pointing_end_et = pointing_end_ets[i_pointing]
|
|
254
281
|
logger.debug(
|
|
255
282
|
f"Calculating pointing attitude for pointing "
|
|
256
283
|
f"{pointing_segments[i_pointing]['pointing_id']} with time "
|
|
@@ -727,7 +727,7 @@ def process_swapi_science(
|
|
|
727
727
|
return dataset
|
|
728
728
|
|
|
729
729
|
|
|
730
|
-
def swapi_l1(dependencies: ProcessingInputCollection) -> xr.Dataset:
|
|
730
|
+
def swapi_l1(dependencies: ProcessingInputCollection, descriptor: str) -> xr.Dataset:
|
|
731
731
|
"""
|
|
732
732
|
Will process SWAPI level 0 data to level 1.
|
|
733
733
|
|
|
@@ -735,6 +735,9 @@ def swapi_l1(dependencies: ProcessingInputCollection) -> xr.Dataset:
|
|
|
735
735
|
----------
|
|
736
736
|
dependencies : ProcessingInputCollection
|
|
737
737
|
Input dependencies needed for L1 processing.
|
|
738
|
+
descriptor : str
|
|
739
|
+
Descriptor for the type of data to process.
|
|
740
|
+
Options are 'hk' or 'sci'.
|
|
738
741
|
|
|
739
742
|
Returns
|
|
740
743
|
-------
|
|
@@ -754,9 +757,11 @@ def swapi_l1(dependencies: ProcessingInputCollection) -> xr.Dataset:
|
|
|
754
757
|
l0_files[0], xtce_definition, use_derived_value=False
|
|
755
758
|
)
|
|
756
759
|
|
|
757
|
-
|
|
758
|
-
if hk_files and l0_unpacked_dict.get(SWAPIAPID.SWP_SCI, None) is not None:
|
|
760
|
+
if descriptor == "sci":
|
|
759
761
|
logger.info(f"Processing SWAPI science data for {l0_files[0]}.")
|
|
762
|
+
if SWAPIAPID.SWP_SCI not in l0_unpacked_dict:
|
|
763
|
+
logger.warning("No SWP_SCI packets found.")
|
|
764
|
+
return []
|
|
760
765
|
# process science data.
|
|
761
766
|
# First read HK data.
|
|
762
767
|
hk_files = dependencies.get_file_paths(descriptor="hk")
|
|
@@ -770,8 +775,11 @@ def swapi_l1(dependencies: ProcessingInputCollection) -> xr.Dataset:
|
|
|
770
775
|
)
|
|
771
776
|
return [sci_dataset]
|
|
772
777
|
|
|
773
|
-
elif
|
|
778
|
+
elif descriptor == "hk":
|
|
774
779
|
logger.info(f"Processing HK data for {l0_files[0]}.")
|
|
780
|
+
if SWAPIAPID.SWP_HK not in l0_unpacked_dict:
|
|
781
|
+
logger.warning("No SWP_HK packets found.")
|
|
782
|
+
return []
|
|
775
783
|
# Get L1A and L1B HK data.
|
|
776
784
|
l1a_hk_data = l0_unpacked_dict[SWAPIAPID.SWP_HK]
|
|
777
785
|
l1b_hk_data = packet_file_to_datasets(
|
|
@@ -16,13 +16,13 @@ ENERGY_CONVERSION_FACTOR = 4.75
|
|
|
16
16
|
# 7 CEMs geometric factors in cm^2 sr eV/eV units.
|
|
17
17
|
GEOMETRIC_FACTORS = np.array(
|
|
18
18
|
[
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
19
|
+
424.4e-6,
|
|
20
|
+
564.5e-6,
|
|
21
|
+
763.8e-6,
|
|
22
|
+
916.9e-6,
|
|
23
|
+
792.0e-6,
|
|
24
|
+
667.7e-6,
|
|
25
|
+
425.2e-6,
|
|
26
26
|
]
|
|
27
27
|
)
|
|
28
28
|
|
|
@@ -50,7 +50,7 @@ def calculate_extendedspin(
|
|
|
50
50
|
de_dataset["spin"].values,
|
|
51
51
|
de_dataset["energy"].values,
|
|
52
52
|
)
|
|
53
|
-
count_rates, _,
|
|
53
|
+
count_rates, _, _counts, _ = get_energy_histogram(
|
|
54
54
|
de_dataset["spin"].values, de_dataset["energy"].values
|
|
55
55
|
)
|
|
56
56
|
attitude_qf, spin_rates, spin_period, spin_starttime = flag_attitude(
|