dkist-processing-cryonirsp 1.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dkist-processing-cryonirsp might be problematic. Click here for more details.
- changelog/.gitempty +0 -0
- dkist_processing_cryonirsp/__init__.py +11 -0
- dkist_processing_cryonirsp/config.py +12 -0
- dkist_processing_cryonirsp/models/__init__.py +1 -0
- dkist_processing_cryonirsp/models/constants.py +248 -0
- dkist_processing_cryonirsp/models/exposure_conditions.py +26 -0
- dkist_processing_cryonirsp/models/parameters.py +296 -0
- dkist_processing_cryonirsp/models/tags.py +168 -0
- dkist_processing_cryonirsp/models/task_name.py +14 -0
- dkist_processing_cryonirsp/parsers/__init__.py +1 -0
- dkist_processing_cryonirsp/parsers/cryonirsp_l0_fits_access.py +111 -0
- dkist_processing_cryonirsp/parsers/cryonirsp_l1_fits_access.py +30 -0
- dkist_processing_cryonirsp/parsers/exposure_conditions.py +163 -0
- dkist_processing_cryonirsp/parsers/map_repeats.py +40 -0
- dkist_processing_cryonirsp/parsers/measurements.py +55 -0
- dkist_processing_cryonirsp/parsers/modstates.py +31 -0
- dkist_processing_cryonirsp/parsers/optical_density_filters.py +40 -0
- dkist_processing_cryonirsp/parsers/polarimetric_check.py +120 -0
- dkist_processing_cryonirsp/parsers/scan_step.py +412 -0
- dkist_processing_cryonirsp/parsers/time.py +80 -0
- dkist_processing_cryonirsp/parsers/wavelength.py +26 -0
- dkist_processing_cryonirsp/tasks/__init__.py +19 -0
- dkist_processing_cryonirsp/tasks/assemble_movie.py +202 -0
- dkist_processing_cryonirsp/tasks/bad_pixel_map.py +96 -0
- dkist_processing_cryonirsp/tasks/beam_boundaries_base.py +279 -0
- dkist_processing_cryonirsp/tasks/ci_beam_boundaries.py +55 -0
- dkist_processing_cryonirsp/tasks/ci_science.py +169 -0
- dkist_processing_cryonirsp/tasks/cryonirsp_base.py +67 -0
- dkist_processing_cryonirsp/tasks/dark.py +98 -0
- dkist_processing_cryonirsp/tasks/gain.py +251 -0
- dkist_processing_cryonirsp/tasks/instrument_polarization.py +447 -0
- dkist_processing_cryonirsp/tasks/l1_output_data.py +44 -0
- dkist_processing_cryonirsp/tasks/linearity_correction.py +582 -0
- dkist_processing_cryonirsp/tasks/make_movie_frames.py +302 -0
- dkist_processing_cryonirsp/tasks/mixin/__init__.py +1 -0
- dkist_processing_cryonirsp/tasks/mixin/beam_access.py +52 -0
- dkist_processing_cryonirsp/tasks/mixin/corrections.py +177 -0
- dkist_processing_cryonirsp/tasks/mixin/intermediate_frame.py +193 -0
- dkist_processing_cryonirsp/tasks/mixin/linearized_frame.py +309 -0
- dkist_processing_cryonirsp/tasks/mixin/shift_measurements.py +297 -0
- dkist_processing_cryonirsp/tasks/parse.py +281 -0
- dkist_processing_cryonirsp/tasks/quality_metrics.py +271 -0
- dkist_processing_cryonirsp/tasks/science_base.py +511 -0
- dkist_processing_cryonirsp/tasks/sp_beam_boundaries.py +270 -0
- dkist_processing_cryonirsp/tasks/sp_dispersion_axis_correction.py +484 -0
- dkist_processing_cryonirsp/tasks/sp_geometric.py +585 -0
- dkist_processing_cryonirsp/tasks/sp_science.py +299 -0
- dkist_processing_cryonirsp/tasks/sp_solar_gain.py +475 -0
- dkist_processing_cryonirsp/tasks/trial_output_data.py +61 -0
- dkist_processing_cryonirsp/tasks/write_l1.py +1033 -0
- dkist_processing_cryonirsp/tests/__init__.py +1 -0
- dkist_processing_cryonirsp/tests/conftest.py +456 -0
- dkist_processing_cryonirsp/tests/header_models.py +592 -0
- dkist_processing_cryonirsp/tests/local_trial_workflows/__init__.py +0 -0
- dkist_processing_cryonirsp/tests/local_trial_workflows/l0_cals_only.py +541 -0
- dkist_processing_cryonirsp/tests/local_trial_workflows/l0_to_l1.py +615 -0
- dkist_processing_cryonirsp/tests/local_trial_workflows/linearize_only.py +96 -0
- dkist_processing_cryonirsp/tests/local_trial_workflows/local_trial_helpers.py +592 -0
- dkist_processing_cryonirsp/tests/test_assemble_movie.py +144 -0
- dkist_processing_cryonirsp/tests/test_assemble_qualilty.py +517 -0
- dkist_processing_cryonirsp/tests/test_bad_pixel_maps.py +115 -0
- dkist_processing_cryonirsp/tests/test_ci_beam_boundaries.py +106 -0
- dkist_processing_cryonirsp/tests/test_ci_science.py +355 -0
- dkist_processing_cryonirsp/tests/test_corrections.py +126 -0
- dkist_processing_cryonirsp/tests/test_cryo_base.py +202 -0
- dkist_processing_cryonirsp/tests/test_cryo_constants.py +76 -0
- dkist_processing_cryonirsp/tests/test_dark.py +287 -0
- dkist_processing_cryonirsp/tests/test_gain.py +278 -0
- dkist_processing_cryonirsp/tests/test_instrument_polarization.py +531 -0
- dkist_processing_cryonirsp/tests/test_linearity_correction.py +245 -0
- dkist_processing_cryonirsp/tests/test_make_movie_frames.py +111 -0
- dkist_processing_cryonirsp/tests/test_parameters.py +266 -0
- dkist_processing_cryonirsp/tests/test_parse.py +1439 -0
- dkist_processing_cryonirsp/tests/test_quality.py +203 -0
- dkist_processing_cryonirsp/tests/test_sp_beam_boundaries.py +112 -0
- dkist_processing_cryonirsp/tests/test_sp_dispersion_axis_correction.py +155 -0
- dkist_processing_cryonirsp/tests/test_sp_geometric.py +319 -0
- dkist_processing_cryonirsp/tests/test_sp_make_movie_frames.py +121 -0
- dkist_processing_cryonirsp/tests/test_sp_science.py +483 -0
- dkist_processing_cryonirsp/tests/test_sp_solar.py +198 -0
- dkist_processing_cryonirsp/tests/test_trial_create_quality_report.py +79 -0
- dkist_processing_cryonirsp/tests/test_trial_output_data.py +251 -0
- dkist_processing_cryonirsp/tests/test_workflows.py +9 -0
- dkist_processing_cryonirsp/tests/test_write_l1.py +436 -0
- dkist_processing_cryonirsp/workflows/__init__.py +2 -0
- dkist_processing_cryonirsp/workflows/ci_l0_processing.py +77 -0
- dkist_processing_cryonirsp/workflows/sp_l0_processing.py +84 -0
- dkist_processing_cryonirsp/workflows/trial_workflows.py +190 -0
- dkist_processing_cryonirsp-1.3.4.dist-info/METADATA +194 -0
- dkist_processing_cryonirsp-1.3.4.dist-info/RECORD +111 -0
- dkist_processing_cryonirsp-1.3.4.dist-info/WHEEL +5 -0
- dkist_processing_cryonirsp-1.3.4.dist-info/top_level.txt +4 -0
- docs/Makefile +134 -0
- docs/bad_pixel_calibration.rst +47 -0
- docs/beam_angle_calculation.rst +53 -0
- docs/beam_boundary_computation.rst +88 -0
- docs/changelog.rst +7 -0
- docs/ci_science_calibration.rst +33 -0
- docs/conf.py +52 -0
- docs/index.rst +21 -0
- docs/l0_to_l1_cryonirsp_ci-full-trial.rst +10 -0
- docs/l0_to_l1_cryonirsp_ci.rst +10 -0
- docs/l0_to_l1_cryonirsp_sp-full-trial.rst +10 -0
- docs/l0_to_l1_cryonirsp_sp.rst +10 -0
- docs/linearization.rst +43 -0
- docs/make.bat +170 -0
- docs/requirements.txt +1 -0
- docs/requirements_table.rst +8 -0
- docs/scientific_changelog.rst +10 -0
- docs/sp_science_calibration.rst +59 -0
- licenses/LICENSE.rst +11 -0
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
Bad Pixel Calibration
|
|
2
|
+
============================
|
|
3
|
+
|
|
4
|
+
Introduction
|
|
5
|
+
------------
|
|
6
|
+
|
|
7
|
+
Both of the Cryo-NIRSP instrument cameras are known to have significant numbers of bad pixels.
|
|
8
|
+
The effects of these are made worse by the linearization algorithm, which can force some pixels
|
|
9
|
+
to be exactly zero. The algorithm described below is used to identify bad pixels and create a map
|
|
10
|
+
of their locations. The map is an integer array of the same size as the camera output array, with
|
|
11
|
+
zeros for pixels that are good and ones for the pixels that are bad.
|
|
12
|
+
|
|
13
|
+
The Bad Pixel Map Algorithm
|
|
14
|
+
------------------------------
|
|
15
|
+
|
|
16
|
+
Compute Average Solar Gain Image
|
|
17
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
18
|
+
|
|
19
|
+
First we will compute an average solar gain image. The solar gain is used because it has high flux
|
|
20
|
+
and the beam illumination pattern is the same as during normal observing.
|
|
21
|
+
|
|
22
|
+
Smooth the Average Gain Image
|
|
23
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
24
|
+
|
|
25
|
+
We can remove the effects of any bad pixels by smoothing the average gain image using a median filter method,
|
|
26
|
+
filtering only along the spatial axis of the slit for SP data so as to not broaden the spectral lines, and
|
|
27
|
+
filtering along both the spatial and spectral axes for CI data. After the gain image has been smoothed,
|
|
28
|
+
the bad pixel areas should no longer be visible.
|
|
29
|
+
|
|
30
|
+
Threshold the Difference Image to Find Bad Pixels
|
|
31
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
32
|
+
|
|
33
|
+
Next, we subtract the smoothed image from the average gain image. Doing this allows us to identify all
|
|
34
|
+
pixels in the difference image whose absolute value is larger than a set threshold value times the standard
|
|
35
|
+
deviation of the average image (i.e., a difference larger than N standard deviations of the original image is
|
|
36
|
+
considered a bad pixel). The threshold value is a pipeline settable parameter and was derived
|
|
37
|
+
empirically. With a too-low threshold, we start to pick up the solar spectrum in the bad pixel image. With a
|
|
38
|
+
too-large threshold, we start to miss bad pixels. Moreover, we currently use bad pixel corrections only for the
|
|
39
|
+
gain images and not for the observe images, so any potential impacts are limited. This may change in the
|
|
40
|
+
future.
|
|
41
|
+
|
|
42
|
+
Apply Bad Pixel Map
|
|
43
|
+
^^^^^^^^^^^^^^^^^^^
|
|
44
|
+
|
|
45
|
+
This bad pixel map is then applied to the lamp gain and solar gain by replacing the identified
|
|
46
|
+
bad pixels with the median value of the input image over a specified region. This prevents the
|
|
47
|
+
gain image from causing “hot” or “cold” pixels in the resulting gain corrected input images.
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
Beam Angle Computation
|
|
2
|
+
============================
|
|
3
|
+
|
|
4
|
+
Introduction
|
|
5
|
+
------------
|
|
6
|
+
|
|
7
|
+
As part of the geometric calibration, the angular rotation of the slit relative to the image axes
|
|
8
|
+
must be computed and then corrected. There is no fiducial mark or hairline present in the Cryo-NIRSP
|
|
9
|
+
images, so some other method must be used. The angular rotation is derived from the lamp gain images using
|
|
10
|
+
the algorithm described below. It is described for only a single beam, but is equally applicable to use for both
|
|
11
|
+
beams.
|
|
12
|
+
|
|
13
|
+
The Beam Angle Algorithm
|
|
14
|
+
------------------------------
|
|
15
|
+
|
|
16
|
+
Compute Average Lamp Gain Image
|
|
17
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
18
|
+
|
|
19
|
+
We start by using the normalized lamp gain image that is computed as part of the lamp gain calibration.
|
|
20
|
+
The image has been corrected for bad pixels (hot or cold) using the bad pixel map derived from average
|
|
21
|
+
solar gain images (See :doc:`bad pixel calibration <bad_pixel_calibration>`).
|
|
22
|
+
|
|
23
|
+
Compute the Normalized Spatial Gradient Image
|
|
24
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
25
|
+
|
|
26
|
+
The normalized lamp gain image contains some some faint horizontal (i.e. along the spectral axis) lines. These are the result of slight
|
|
27
|
+
imperfections in the slit and in the grating optics, and they can be used to derive the rotation of the slit axis
|
|
28
|
+
relative to the image axes. By computing a normalized spatial gradient along the spatial axis (along the slit),
|
|
29
|
+
we can enhance the faint horizontal lines to make them more pronounced. The gradient is computed by shifting the
|
|
30
|
+
image along the spatial axis in both the + and - directions, computing the difference of the two images, and
|
|
31
|
+
normalizing by the sum of the two at each pixel position. The resulting gradient image contains horizontal
|
|
32
|
+
lines that are more emphasized.
|
|
33
|
+
|
|
34
|
+
Compute the Angular Rotation
|
|
35
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
36
|
+
|
|
37
|
+
Next we identify two long, narrow regions, or strips along the spatial axis. The length of each of the strips
|
|
38
|
+
is one half of the slit length in pixels and they are centered about the mid-point of the length of the slit
|
|
39
|
+
as seen on the sensor.
|
|
40
|
+
|
|
41
|
+
We then compute the median value along the spectral axis (or rows). This condenses each strip into a 1D signal
|
|
42
|
+
that represents the static fluctuations in the image along the spatial axis. The next step is to compute the
|
|
43
|
+
cross correlation of the right signal relative to the left signal (the reference). The result is the measured
|
|
44
|
+
shift of the right signal relative to the left. Using the measured shift between the signals and the known
|
|
45
|
+
separation along the spectral axis between the midpoints of the strips, we can then compute the angular
|
|
46
|
+
rotation using a simple arc-tangent.
|
|
47
|
+
|
|
48
|
+
Correcting the Rotational Offset
|
|
49
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
50
|
+
|
|
51
|
+
Finally, we can remove this rotational offset. The slit axis is now aligned with the spatial axis of the image.
|
|
52
|
+
The angular rotation measured here using the lamp gain image is then used to correct all of the
|
|
53
|
+
observe images as part of the geometric corrections.
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
Beam Boundary Computation
|
|
2
|
+
============================
|
|
3
|
+
|
|
4
|
+
Introduction
|
|
5
|
+
------------
|
|
6
|
+
|
|
7
|
+
Raw Cryo-NIRSP frames are not fully illuminated; in some areas, there is no illumination at all.
|
|
8
|
+
These areas should not be used for science processing and
|
|
9
|
+
must be accounted for. Moreover, the illuminated region(s) will change slightly depending
|
|
10
|
+
on the optical alignment of the instrument. Hence, we cannot use a particular set
|
|
11
|
+
of beam boundary constants from which to extract the illuminated regions. Instead we need to
|
|
12
|
+
compute these regions as part of the calibration process.
|
|
13
|
+
|
|
14
|
+
The Beam Boundary Algorithm
|
|
15
|
+
------------------------------
|
|
16
|
+
|
|
17
|
+
The basic idea of the algorithm is to use an average solar gain image and apply a segmentation
|
|
18
|
+
algorithm to it that separates the image into illuminated pixels and non-illuminated pixels.
|
|
19
|
+
(Note: We use solar gain images because they have larger flux than the lamp gain images
|
|
20
|
+
and the lamp gain images do not have the same illumination pattern as the solar
|
|
21
|
+
gain images. Therefore, in order to make sure the beam boundaries match the on-sky data,
|
|
22
|
+
and are as correct as they can be, we must use solar gain images. We only use a single frame because the
|
|
23
|
+
illuminated portion of the CCD is always constant.)
|
|
24
|
+
The segmentation must be robust enough to have all pixels within a well defined outer boundary
|
|
25
|
+
considered to be illuminated. This is sufficient to extract an illuminated region from a single
|
|
26
|
+
beam for the Context Imager (CI). For the Spectropolarimeter (SP), however, we need to identify
|
|
27
|
+
the separate beam regions and perform an initial alignment prior to computing the final individual
|
|
28
|
+
beam boundaries.
|
|
29
|
+
|
|
30
|
+
Compute the Average Solar Gain Image
|
|
31
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
32
|
+
|
|
33
|
+
We start by computing an average solar gain image. Because we are interested only in identifying
|
|
34
|
+
the regions of the sensor that are illuminated, we do not need to perform any dark correction.
|
|
35
|
+
Simply averaging the input solar gain images is sufficient.
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
Correct Bad Pixels Using the Median Filter
|
|
39
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
40
|
+
|
|
41
|
+
The effects of the bad pixels must be corrected, otherwise they might be mistaken for non-illuminated
|
|
42
|
+
regions. We correct these bad pixels using the :doc:`bad pixel calibration algorithm <bad_pixel_calibration>`
|
|
43
|
+
and by applying a median filter to only those pixels that are flagged as bad. We use the masked array feature
|
|
44
|
+
of numpy to achieve this filtering. For the SP, the correction algorithm is applied only in the spatial
|
|
45
|
+
direction along the slit. For the CI, it is applied along both axes. For this application, the
|
|
46
|
+
difference does not matter, as all we are doing is identifying illuminated pixels. However, later on when
|
|
47
|
+
the solar gain array is computed, the correction must not broaden the spectral lines and so the restriction
|
|
48
|
+
is important.
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
Segment the Image Into Illuminated and Non-Illuminated Regions
|
|
52
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
53
|
+
|
|
54
|
+
Next we smooth the entire array so the image segmentation algorithm does not recognize the absorption
|
|
55
|
+
lines as part of the non-illuminated region. The smoothed solar gain array is then used as the input to an
|
|
56
|
+
image segmentation algorithm. The `Scikit Image threshold minimum <https://scikit-image.org/docs/stable/api/skimage.filters.html#skimage.filters.threshold_minimum>`_
|
|
57
|
+
method is used to find the threshold between light and dark and this threshold is then
|
|
58
|
+
used to generate a boolean map describing which pixels are illuminated (True) and which pixels are non-illuminated
|
|
59
|
+
(False). However, this map cannot be used to easily extract illuminated regions because it is not guaranteed to be
|
|
60
|
+
contiguous. Ideally, we would like to be able to identify a slice that can be used to extract the illuminated
|
|
61
|
+
region as a rectangular array.
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
Split Into Two Separate beam Images
|
|
65
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
66
|
+
|
|
67
|
+
Now that we know the illuminated boundaries, we can identify and extract the largest inscribed rectangular
|
|
68
|
+
region within the illuminated map. This is done using the `largestinteriorrectangle package <https://pypi.org/project/largestinteriorrectangle/>`_.
|
|
69
|
+
We split the image into two beam images, avoiding a 10% spectral region
|
|
70
|
+
surrounding the beam boundary in the middle of the image.
|
|
71
|
+
|
|
72
|
+
Compute Relative Horizontal Shift of Right Beam to Left Beam
|
|
73
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
74
|
+
|
|
75
|
+
There is a relative shift between these two images, so we need to determine that shift and compute the
|
|
76
|
+
boundaries of each so that they overlap properly to within a single horizontal pixel. Note that the rotational
|
|
77
|
+
differences and any additional shifts required to align the images are computed later as part of the geometric
|
|
78
|
+
calibration. The horizontal shift of the right beam relative to the left is computed using the
|
|
79
|
+
`Scikit Image phase cross correlation <https://scikit-image.org/docs/stable/api/skimage.registration.html#skimage.registration.phase_cross_correlation>`_
|
|
80
|
+
method.
|
|
81
|
+
|
|
82
|
+
Compute Beam Boundaries for Identical Size Overlapping Regions
|
|
83
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
84
|
+
The final step is to use the shift to adjust the boundaries of each beam image such that both images
|
|
85
|
+
have the same horizontal dimension and represent essentially the same spatial and spectral regions
|
|
86
|
+
(as mentioned above, final adjustments will come later in the geometric calibration). Moreover the beam
|
|
87
|
+
boundaries must be defined so that they represent slices into the original image and can then be used
|
|
88
|
+
later to extract each beam from any type of input image (dark, gain, polcal, observe).
|
docs/changelog.rst
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
CI L1 Science Calibration
|
|
2
|
+
=========================
|
|
3
|
+
|
|
4
|
+
Introduction
|
|
5
|
+
------------
|
|
6
|
+
|
|
7
|
+
The `~dkist_processing_cryonirsp.tasks.ci_science` module takes L0 input science frames and fully calibrates
|
|
8
|
+
them into L1 science products. This page describes the basic steps in this processes as well as important
|
|
9
|
+
features of the Cryo-NIRSP Context Imager (CI) algorithm that may not be obvious.
|
|
10
|
+
|
|
11
|
+
L1 Coordinate System
|
|
12
|
+
^^^^^^^^^^^^^^^^^^^^
|
|
13
|
+
|
|
14
|
+
The final step of the science pipeline places L1 data into a coordinate frame that matches the coordinates used by
|
|
15
|
+
SDO/HMI and HINDOE-SP. Namely, -Q and +Q will be aligned parallel and perpendicular to the central meridian of the Sun,
|
|
16
|
+
respectively.
|
|
17
|
+
|
|
18
|
+
Algorithm
|
|
19
|
+
---------
|
|
20
|
+
|
|
21
|
+
Input CI science data is processed into L1 science data via the following steps:
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
#. Dark signals are subtracted from linearized input data.
|
|
25
|
+
|
|
26
|
+
#. :doc:`Bad Pixel correction <bad_pixel_calibration>` is done.
|
|
27
|
+
|
|
28
|
+
#. A solar gain calibration frame is divided from the data.
|
|
29
|
+
|
|
30
|
+
#. If data is polarimetric, demodulation matricies are applied.
|
|
31
|
+
|
|
32
|
+
#. If data is polarimetric, the Telescope Polarization is removed. This removes the polarization effects of all DKIST mirrors upstream
|
|
33
|
+
of Cryo-NIRSP. This step also includes the rotation into the coordinate frame described above.
|
docs/conf.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
"""Configuration file for the Sphinx documentation builder."""
|
|
2
|
+
# -- stdlib imports ------------------------------------------------------------
|
|
3
|
+
import importlib
|
|
4
|
+
import sys
|
|
5
|
+
import warnings
|
|
6
|
+
from importlib.metadata import distribution
|
|
7
|
+
|
|
8
|
+
from dkist_sphinx_theme.conf import *
|
|
9
|
+
from dkist_sphinx_theme.create_intersphinx_mapping import create_intersphinx_mapping
|
|
10
|
+
from packaging.version import Version
|
|
11
|
+
|
|
12
|
+
# Need a name for the overall repo
|
|
13
|
+
# __name__ where this code executes is "builtins" so that is no help
|
|
14
|
+
repo_name = "dkist-processing-cryonirsp"
|
|
15
|
+
package_name = repo_name.replace("-", "_")
|
|
16
|
+
|
|
17
|
+
dist = distribution(package_name)
|
|
18
|
+
package = importlib.import_module(package_name)
|
|
19
|
+
|
|
20
|
+
# -- Check for docs dependencies ----------------------------------------------------
|
|
21
|
+
missing_requirements = missing_dependencies_by_extra(package_name, extras=["docs"])
|
|
22
|
+
if missing_requirements["docs"]:
|
|
23
|
+
print(
|
|
24
|
+
f"The {' '.join(missing_requirements['docs'])} package(s) could not be found and "
|
|
25
|
+
"is needed to build the documentation, please install the 'docs' requirements."
|
|
26
|
+
)
|
|
27
|
+
sys.exit(1)
|
|
28
|
+
|
|
29
|
+
# auto api parameters that cannot be moved into the theme:
|
|
30
|
+
autoapi_dirs = [Path(package.__file__).parent]
|
|
31
|
+
# Uncomment this for debugging
|
|
32
|
+
# autoapi_keep_files = True
|
|
33
|
+
|
|
34
|
+
# -- Options for intersphinx extension -----------------------------------------
|
|
35
|
+
intersphinx_mapping = create_intersphinx_mapping(repo_name)
|
|
36
|
+
# Remaining sphinx settings are in dkist-sphinx-theme conf.py
|
|
37
|
+
|
|
38
|
+
# -- Project information -------------------------------------------------------
|
|
39
|
+
project = "DKIST-PROCESSING-CRYONIRSP"
|
|
40
|
+
|
|
41
|
+
# The full version, including alpha/beta/rc tags
|
|
42
|
+
dkist_version = Version(dist.version)
|
|
43
|
+
is_release = not (dkist_version.is_prerelease or dkist_version.is_devrelease)
|
|
44
|
+
# We want to ignore all warnings in a release version.
|
|
45
|
+
if is_release:
|
|
46
|
+
warnings.simplefilter("ignore")
|
|
47
|
+
|
|
48
|
+
# Extensions so we can create the reqmts table and the workflow diagram
|
|
49
|
+
extensions += [
|
|
50
|
+
"dkist_sphinx_theme.create_requirements_table",
|
|
51
|
+
"dkist_sphinx_theme.create_workflow_diagram",
|
|
52
|
+
]
|
docs/index.rst
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
.. include:: ../README.rst
|
|
2
|
+
|
|
3
|
+
.. toctree::
|
|
4
|
+
:maxdepth: 2
|
|
5
|
+
:hidden:
|
|
6
|
+
|
|
7
|
+
self
|
|
8
|
+
l0_to_l1_cryonirsp_ci
|
|
9
|
+
l0_to_l1_cryonirsp_sp
|
|
10
|
+
l0_to_l1_cryonirsp_ci-full-trial
|
|
11
|
+
l0_to_l1_cryonirsp_sp-full-trial
|
|
12
|
+
scientific_changelog
|
|
13
|
+
linearization
|
|
14
|
+
bad_pixel_calibration
|
|
15
|
+
beam_boundary_computation
|
|
16
|
+
beam_angle_calculation
|
|
17
|
+
ci_science_calibration
|
|
18
|
+
sp_science_calibration
|
|
19
|
+
autoapi/index
|
|
20
|
+
requirements_table
|
|
21
|
+
changelog
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
l0_to_l1_cryonirsp_ci-full-trial
|
|
2
|
+
================================
|
|
3
|
+
|
|
4
|
+
This trial workflow is designed for pipeline testing internal to the DKIST Data Center (DC). It runs the full science
|
|
5
|
+
pipeline, but stops short of publishing the results or activating downstream DC services. The pipeline products
|
|
6
|
+
are transferred to an internal location where they can be examined by DC personnel or DKIST scientists.
|
|
7
|
+
|
|
8
|
+
For more detail on each workflow task, you can click on the task in the diagram.
|
|
9
|
+
|
|
10
|
+
.. workflow_diagram:: dkist_processing_cryonirsp.workflows.trial_workflows.full_trial_ci_pipeline
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
l0_to_l1_cryonirsp_ci
|
|
2
|
+
=====================
|
|
3
|
+
|
|
4
|
+
In the normal Cryo-NIRSP Context Imager (CI) operating mode, raw Cryo-NIRSP data are gathered at the summit and
|
|
5
|
+
delivered to the Data Center. The Data Center then calibrates this data and prepares it for storage using the
|
|
6
|
+
following workflow.
|
|
7
|
+
|
|
8
|
+
For more detail on each workflow task, you can click on the task in the diagram.
|
|
9
|
+
|
|
10
|
+
.. workflow_diagram:: dkist_processing_cryonirsp.workflows.ci_l0_processing.l0_pipeline
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
l0_to_l1_cryonirsp_sp-full-trial
|
|
2
|
+
================================
|
|
3
|
+
|
|
4
|
+
This trial workflow is designed for pipeline testing internal to the DKIST Data Center (DC). It runs the full science
|
|
5
|
+
pipeline, but stops short of publishing the results or activating downstream DC services. The pipeline products
|
|
6
|
+
are transferred to an internal location where they can be examined by DC personnel or DKIST scientists.
|
|
7
|
+
|
|
8
|
+
For more detail on each workflow task, you can click on the task in the diagram.
|
|
9
|
+
|
|
10
|
+
.. workflow_diagram:: dkist_processing_cryonirsp.workflows.trial_workflows.full_trial_sp_pipeline
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
l0_to_l1_cryonirsp_sp
|
|
2
|
+
=====================
|
|
3
|
+
|
|
4
|
+
In the normal Cryo-NIRSP Spectropolarimetric (SP) operating mode, raw Cryo-NIRSP data are gathered at the summit
|
|
5
|
+
and delivered to the Data Center. The Data Center then calibrates this data and prepares it for storage using
|
|
6
|
+
the following workflow.
|
|
7
|
+
|
|
8
|
+
For more detail on each workflow task, you can click on the task in the diagram.
|
|
9
|
+
|
|
10
|
+
.. workflow_diagram:: dkist_processing_cryonirsp.workflows.sp_l0_processing.l0_pipeline
|
docs/linearization.rst
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
Linearization
|
|
2
|
+
=============
|
|
3
|
+
|
|
4
|
+
Introduction
|
|
5
|
+
------------
|
|
6
|
+
|
|
7
|
+
The Cryo-NIRSP camera has an H2RG detector, which has a non-linear response to light with
|
|
8
|
+
increasing exposure time. This can vary from pixel to pixel. Because of the non-linear response, the count
|
|
9
|
+
values at the final exposure do not accurately represent the light falling on the chip and therefore need to
|
|
10
|
+
be corrected. We call this correction ‘linearization’. During an exposure, the camera
|
|
11
|
+
reads out multiple frames and saves them while it continues to expose. The accumulated charge on the chip is
|
|
12
|
+
not erased when this happens, so they are referred to as Non-Destructive Readouts (NDRs). The NDRs are
|
|
13
|
+
saved at a pre-selected rate that is a fraction of the overall desired exposure time for a
|
|
14
|
+
complete NDR. A set of NDRs with exposure times varying from 0 to the final desired exposure time are
|
|
15
|
+
called a ramp. The linearization process reads all of the NDRs associated with a particular ramp and then
|
|
16
|
+
applies an algorithm to compute what the final count values at the desired exposure time should be if the
|
|
17
|
+
response of the detector was linear. Basically it takes the ramp set and "linearizes" it to produce a single
|
|
18
|
+
NDR that has the correct counts at each pixel.
|
|
19
|
+
|
|
20
|
+
A ramp can have many frames (anywhere from 10 to as much as 100, or more), and the linearization
|
|
21
|
+
algorithm is agnostic to just about everything except the final exposure time. So after linearization, we
|
|
22
|
+
have over 10x fewer frames to process, and the pipeline proceeds as though the linearized frames are raw input frames.
|
|
23
|
+
|
|
24
|
+
The Linearization Algorithm
|
|
25
|
+
---------------------------
|
|
26
|
+
|
|
27
|
+
The Algorithm
|
|
28
|
+
^^^^^^^^^^^^^
|
|
29
|
+
|
|
30
|
+
First, we identify all NDRs from a single exposure of the Cryo-NIRSP cameras (i.e., a ramp set). A ramp is
|
|
31
|
+
identified as all the files having the same DATE-OBS value. Note: If a ramp set contains only a single frame,
|
|
32
|
+
it is discarded.
|
|
33
|
+
|
|
34
|
+
Next, we need to identify the NDRs that fall in the “linear” portion of the chip response. These are those
|
|
35
|
+
NDRs whose pixel value is below a pre-computed threshold value. I.e., the threshold values define the upper-limit
|
|
36
|
+
of the linear response of each pixel.
|
|
37
|
+
|
|
38
|
+
Then, we linearize this ramp set by normalizing the measured raw flux by the final ramp exposure time of the ramp set.
|
|
39
|
+
Essentially, this removes the non-linear response of each pixel in the array. The resulting ramp set is essentially
|
|
40
|
+
linear in ADUs v exposure time.
|
|
41
|
+
|
|
42
|
+
Finally, we normalize the array by converting to counts per second and correct the counts for the Optical Density
|
|
43
|
+
filter used during observations.
|
docs/make.bat
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
@ECHO OFF
|
|
2
|
+
|
|
3
|
+
REM Command file for Sphinx documentation
|
|
4
|
+
|
|
5
|
+
if "%SPHINXBUILD%" == "" (
|
|
6
|
+
set SPHINXBUILD=sphinx-build
|
|
7
|
+
)
|
|
8
|
+
set BUILDDIR=_build
|
|
9
|
+
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
|
|
10
|
+
if NOT "%PAPER%" == "" (
|
|
11
|
+
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
if "%1" == "" goto help
|
|
15
|
+
|
|
16
|
+
if "%1" == "help" (
|
|
17
|
+
:help
|
|
18
|
+
echo.Please use `make ^<target^>` where ^<target^> is one of
|
|
19
|
+
echo. html to make standalone HTML files
|
|
20
|
+
echo. dirhtml to make HTML files named index.html in directories
|
|
21
|
+
echo. singlehtml to make a single large HTML file
|
|
22
|
+
echo. pickle to make pickle files
|
|
23
|
+
echo. json to make JSON files
|
|
24
|
+
echo. htmlhelp to make HTML files and a HTML help project
|
|
25
|
+
echo. qthelp to make HTML files and a qthelp project
|
|
26
|
+
echo. devhelp to make HTML files and a Devhelp project
|
|
27
|
+
echo. epub to make an epub
|
|
28
|
+
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
|
|
29
|
+
echo. text to make text files
|
|
30
|
+
echo. man to make manual pages
|
|
31
|
+
echo. changes to make an overview over all changed/added/deprecated items
|
|
32
|
+
echo. linkcheck to check all external links for integrity
|
|
33
|
+
echo. doctest to run all doctests embedded in the documentation if enabled
|
|
34
|
+
goto end
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
if "%1" == "clean" (
|
|
38
|
+
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
|
|
39
|
+
del /q /s %BUILDDIR%\*
|
|
40
|
+
goto end
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
if "%1" == "html" (
|
|
44
|
+
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
|
|
45
|
+
if errorlevel 1 exit /b 1
|
|
46
|
+
echo.
|
|
47
|
+
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
|
|
48
|
+
goto end
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
if "%1" == "dirhtml" (
|
|
52
|
+
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
|
|
53
|
+
if errorlevel 1 exit /b 1
|
|
54
|
+
echo.
|
|
55
|
+
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
|
|
56
|
+
goto end
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
if "%1" == "singlehtml" (
|
|
60
|
+
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
|
|
61
|
+
if errorlevel 1 exit /b 1
|
|
62
|
+
echo.
|
|
63
|
+
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
|
|
64
|
+
goto end
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
if "%1" == "pickle" (
|
|
68
|
+
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
|
|
69
|
+
if errorlevel 1 exit /b 1
|
|
70
|
+
echo.
|
|
71
|
+
echo.Build finished; now you can process the pickle files.
|
|
72
|
+
goto end
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
if "%1" == "json" (
|
|
76
|
+
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
|
|
77
|
+
if errorlevel 1 exit /b 1
|
|
78
|
+
echo.
|
|
79
|
+
echo.Build finished; now you can process the JSON files.
|
|
80
|
+
goto end
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
if "%1" == "htmlhelp" (
|
|
84
|
+
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
|
|
85
|
+
if errorlevel 1 exit /b 1
|
|
86
|
+
echo.
|
|
87
|
+
echo.Build finished; now you can run HTML Help Workshop with the ^
|
|
88
|
+
.hhp project file in %BUILDDIR%/htmlhelp.
|
|
89
|
+
goto end
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
if "%1" == "qthelp" (
|
|
93
|
+
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
|
|
94
|
+
if errorlevel 1 exit /b 1
|
|
95
|
+
echo.
|
|
96
|
+
echo.Build finished; now you can run "qcollectiongenerator" with the ^
|
|
97
|
+
.qhcp project file in %BUILDDIR%/qthelp, like this:
|
|
98
|
+
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Astropy.qhcp
|
|
99
|
+
echo.To view the help file:
|
|
100
|
+
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Astropy.ghc
|
|
101
|
+
goto end
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
if "%1" == "devhelp" (
|
|
105
|
+
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
|
|
106
|
+
if errorlevel 1 exit /b 1
|
|
107
|
+
echo.
|
|
108
|
+
echo.Build finished.
|
|
109
|
+
goto end
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
if "%1" == "epub" (
|
|
113
|
+
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
|
|
114
|
+
if errorlevel 1 exit /b 1
|
|
115
|
+
echo.
|
|
116
|
+
echo.Build finished. The epub file is in %BUILDDIR%/epub.
|
|
117
|
+
goto end
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
if "%1" == "latex" (
|
|
121
|
+
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
|
|
122
|
+
if errorlevel 1 exit /b 1
|
|
123
|
+
echo.
|
|
124
|
+
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
|
|
125
|
+
goto end
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
if "%1" == "text" (
|
|
129
|
+
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
|
|
130
|
+
if errorlevel 1 exit /b 1
|
|
131
|
+
echo.
|
|
132
|
+
echo.Build finished. The text files are in %BUILDDIR%/text.
|
|
133
|
+
goto end
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
if "%1" == "man" (
|
|
137
|
+
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
|
|
138
|
+
if errorlevel 1 exit /b 1
|
|
139
|
+
echo.
|
|
140
|
+
echo.Build finished. The manual pages are in %BUILDDIR%/man.
|
|
141
|
+
goto end
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
if "%1" == "changes" (
|
|
145
|
+
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
|
|
146
|
+
if errorlevel 1 exit /b 1
|
|
147
|
+
echo.
|
|
148
|
+
echo.The overview file is in %BUILDDIR%/changes.
|
|
149
|
+
goto end
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
if "%1" == "linkcheck" (
|
|
153
|
+
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
|
|
154
|
+
if errorlevel 1 exit /b 1
|
|
155
|
+
echo.
|
|
156
|
+
echo.Link check complete; look for any errors in the above output ^
|
|
157
|
+
or in %BUILDDIR%/linkcheck/output.txt.
|
|
158
|
+
goto end
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
if "%1" == "doctest" (
|
|
162
|
+
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
|
|
163
|
+
if errorlevel 1 exit /b 1
|
|
164
|
+
echo.
|
|
165
|
+
echo.Testing of doctests in the sources finished, look at the ^
|
|
166
|
+
results in %BUILDDIR%/doctest/output.txt.
|
|
167
|
+
goto end
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
:end
|
docs/requirements.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# git deps for read the docs
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
Package Requirements
|
|
2
|
+
====================
|
|
3
|
+
|
|
4
|
+
This table shows the main Python packages and their respective
|
|
5
|
+
versions that were used to build the calibration codes documented
|
|
6
|
+
here. All of these packages are available on `PyPI <https://pypi.org>`_.
|
|
7
|
+
|
|
8
|
+
.. requirements_table:: dkist-processing-cryonirsp
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
Scientific Changelog
|
|
2
|
+
####################
|
|
3
|
+
|
|
4
|
+
This page distills the verbosity of the :doc:`full Changelog <changelog>` into only those changes that affect the
|
|
5
|
+
scientific quality of the L1 data.
|
|
6
|
+
|
|
7
|
+
.. changelog::
|
|
8
|
+
:towncrier:
|
|
9
|
+
:towncrier-skip-if-empty:
|
|
10
|
+
:changelog_file: ../SCIENCE_CHANGELOG.rst
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
SP L1 Science Calibration
|
|
2
|
+
=========================
|
|
3
|
+
|
|
4
|
+
Introduction
|
|
5
|
+
------------
|
|
6
|
+
|
|
7
|
+
The `~dkist_processing_cryonirsp.tasks.sp_science` module takes L0 input science frames and fully calibrates
|
|
8
|
+
them into L1 science products. This page describes the basic steps in this processes as well as important
|
|
9
|
+
features of the Cryo-NIRSP Spectropolarimetric (SP) algorithm that may not be obvious.
|
|
10
|
+
|
|
11
|
+
Important Features
|
|
12
|
+
------------------
|
|
13
|
+
|
|
14
|
+
Beam Combination
|
|
15
|
+
^^^^^^^^^^^^^^^^
|
|
16
|
+
|
|
17
|
+
Apart from the order in which the basic corrections are applied (described below), it is important to state how the two
|
|
18
|
+
polarimetric beams of Cryo-NIRSP are combined to produce a single L1 data frame. After demodulation the 4 Stokes components of
|
|
19
|
+
the two beams are combined thusly:
|
|
20
|
+
|
|
21
|
+
.. math::
|
|
22
|
+
|
|
23
|
+
I_{comb} &= (I_1 + I_2) / 2 \\
|
|
24
|
+
Q_{comb} &= I_{comb} \left(\frac{Q_1}{I_1} + \frac{Q_2}{I_2}\right) / 2 \\
|
|
25
|
+
U_{comb} &= I_{comb} \left(\frac{U_1}{I_1} + \frac{U_2}{I_2}\right) / 2 \\
|
|
26
|
+
V_{comb} &= I_{comb} \left(\frac{V_1}{I_1} + \frac{V_2}{I_2}\right) / 2,
|
|
27
|
+
|
|
28
|
+
where numbered subscripts correspond to beam number. This combination scheme improves the signal-to-noise of the data
|
|
29
|
+
and mitigates residual polarization artifacts caused by temporal-based modulation (e.g., atmospheric seeing).
|
|
30
|
+
|
|
31
|
+
L1 Coordinate System
|
|
32
|
+
^^^^^^^^^^^^^^^^^^^^
|
|
33
|
+
|
|
34
|
+
The final step of the science pipeline places L1 data into a coordinate frame that matches the coordinates used by
|
|
35
|
+
SDO/HMI and HINDOE-SP. Namely, -Q and +Q will be aligned parallel and perpendicular to the central meridian of the Sun,
|
|
36
|
+
respectively.
|
|
37
|
+
|
|
38
|
+
Algorithm
|
|
39
|
+
---------
|
|
40
|
+
|
|
41
|
+
Input SP science data is processed into L1 science data via the following steps:
|
|
42
|
+
|
|
43
|
+
#. Dark signals are subtracted from linearized input data.
|
|
44
|
+
|
|
45
|
+
#. :doc:`Bad Pixel correction <bad_pixel_calibration>` is done.
|
|
46
|
+
|
|
47
|
+
#. A solar gain calibration frame is divided from the data.
|
|
48
|
+
|
|
49
|
+
#. If data is polarimetric, demodulation matrices are applied.
|
|
50
|
+
|
|
51
|
+
#. Geometric distortions (spectral rotation, x/y shift, spectral curvature) are removed via interpolation.
|
|
52
|
+
This step aligns the dispersion axis with a pixel axis, places both beams on the same pixel grid, and
|
|
53
|
+
straightens the spectra so that a single spectral pixel corresponds to the same physical wavelength for
|
|
54
|
+
all locations along the slit.
|
|
55
|
+
|
|
56
|
+
#. The beams are combined as described above. For non-polarimetric data, the combination is a simple average.
|
|
57
|
+
|
|
58
|
+
#. If data is polarimetric, the Telescope Polarization is removed. This removes the polarization effects of all DKIST mirrors upstream
|
|
59
|
+
of Cryo-NIRSP. This step also includes the rotation into the coordinate frame described above.
|
licenses/LICENSE.rst
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
Copyright 2021 National Solar Observatory
|
|
2
|
+
|
|
3
|
+
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
|
4
|
+
|
|
5
|
+
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
|
6
|
+
|
|
7
|
+
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
|
8
|
+
|
|
9
|
+
Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
|
10
|
+
|
|
11
|
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|