pyreduce-astro 0.7a4__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. pyreduce/__init__.py +67 -0
  2. pyreduce/__main__.py +322 -0
  3. pyreduce/cli.py +342 -0
  4. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.exp +0 -0
  5. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.lib +0 -0
  6. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.exp +0 -0
  7. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.lib +0 -0
  8. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.exp +0 -0
  9. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.lib +0 -0
  10. pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.exp +0 -0
  11. pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.lib +0 -0
  12. pyreduce/clib/Release/_slitfunc_2d.obj +0 -0
  13. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.exp +0 -0
  14. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.lib +0 -0
  15. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.exp +0 -0
  16. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.lib +0 -0
  17. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.exp +0 -0
  18. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.lib +0 -0
  19. pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.exp +0 -0
  20. pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.lib +0 -0
  21. pyreduce/clib/Release/_slitfunc_bd.obj +0 -0
  22. pyreduce/clib/__init__.py +0 -0
  23. pyreduce/clib/_slitfunc_2d.cp311-win_amd64.pyd +0 -0
  24. pyreduce/clib/_slitfunc_2d.cp312-win_amd64.pyd +0 -0
  25. pyreduce/clib/_slitfunc_2d.cp313-win_amd64.pyd +0 -0
  26. pyreduce/clib/_slitfunc_2d.cp314-win_amd64.pyd +0 -0
  27. pyreduce/clib/_slitfunc_bd.cp311-win_amd64.pyd +0 -0
  28. pyreduce/clib/_slitfunc_bd.cp312-win_amd64.pyd +0 -0
  29. pyreduce/clib/_slitfunc_bd.cp313-win_amd64.pyd +0 -0
  30. pyreduce/clib/_slitfunc_bd.cp314-win_amd64.pyd +0 -0
  31. pyreduce/clib/build_extract.py +75 -0
  32. pyreduce/clib/slit_func_2d_xi_zeta_bd.c +1313 -0
  33. pyreduce/clib/slit_func_2d_xi_zeta_bd.h +55 -0
  34. pyreduce/clib/slit_func_bd.c +362 -0
  35. pyreduce/clib/slit_func_bd.h +17 -0
  36. pyreduce/clipnflip.py +147 -0
  37. pyreduce/combine_frames.py +861 -0
  38. pyreduce/configuration.py +191 -0
  39. pyreduce/continuum_normalization.py +329 -0
  40. pyreduce/cwrappers.py +404 -0
  41. pyreduce/datasets.py +238 -0
  42. pyreduce/echelle.py +413 -0
  43. pyreduce/estimate_background_scatter.py +130 -0
  44. pyreduce/extract.py +1362 -0
  45. pyreduce/extraction_width.py +77 -0
  46. pyreduce/instruments/__init__.py +0 -0
  47. pyreduce/instruments/aj.py +9 -0
  48. pyreduce/instruments/aj.yaml +51 -0
  49. pyreduce/instruments/andes.py +102 -0
  50. pyreduce/instruments/andes.yaml +72 -0
  51. pyreduce/instruments/common.py +711 -0
  52. pyreduce/instruments/common.yaml +57 -0
  53. pyreduce/instruments/crires_plus.py +103 -0
  54. pyreduce/instruments/crires_plus.yaml +101 -0
  55. pyreduce/instruments/filters.py +195 -0
  56. pyreduce/instruments/harpn.py +203 -0
  57. pyreduce/instruments/harpn.yaml +140 -0
  58. pyreduce/instruments/harps.py +312 -0
  59. pyreduce/instruments/harps.yaml +144 -0
  60. pyreduce/instruments/instrument_info.py +140 -0
  61. pyreduce/instruments/jwst_miri.py +29 -0
  62. pyreduce/instruments/jwst_miri.yaml +53 -0
  63. pyreduce/instruments/jwst_niriss.py +98 -0
  64. pyreduce/instruments/jwst_niriss.yaml +60 -0
  65. pyreduce/instruments/lick_apf.py +35 -0
  66. pyreduce/instruments/lick_apf.yaml +60 -0
  67. pyreduce/instruments/mcdonald.py +123 -0
  68. pyreduce/instruments/mcdonald.yaml +56 -0
  69. pyreduce/instruments/metis_ifu.py +45 -0
  70. pyreduce/instruments/metis_ifu.yaml +62 -0
  71. pyreduce/instruments/metis_lss.py +45 -0
  72. pyreduce/instruments/metis_lss.yaml +62 -0
  73. pyreduce/instruments/micado.py +45 -0
  74. pyreduce/instruments/micado.yaml +62 -0
  75. pyreduce/instruments/models.py +257 -0
  76. pyreduce/instruments/neid.py +156 -0
  77. pyreduce/instruments/neid.yaml +61 -0
  78. pyreduce/instruments/nirspec.py +215 -0
  79. pyreduce/instruments/nirspec.yaml +63 -0
  80. pyreduce/instruments/nte.py +42 -0
  81. pyreduce/instruments/nte.yaml +55 -0
  82. pyreduce/instruments/uves.py +46 -0
  83. pyreduce/instruments/uves.yaml +65 -0
  84. pyreduce/instruments/xshooter.py +39 -0
  85. pyreduce/instruments/xshooter.yaml +63 -0
  86. pyreduce/make_shear.py +607 -0
  87. pyreduce/masks/mask_crires_plus_det1.fits.gz +0 -0
  88. pyreduce/masks/mask_crires_plus_det2.fits.gz +0 -0
  89. pyreduce/masks/mask_crires_plus_det3.fits.gz +0 -0
  90. pyreduce/masks/mask_ctio_chiron.fits.gz +0 -0
  91. pyreduce/masks/mask_elodie.fits.gz +0 -0
  92. pyreduce/masks/mask_feros3.fits.gz +0 -0
  93. pyreduce/masks/mask_flames_giraffe.fits.gz +0 -0
  94. pyreduce/masks/mask_harps_blue.fits.gz +0 -0
  95. pyreduce/masks/mask_harps_red.fits.gz +0 -0
  96. pyreduce/masks/mask_hds_blue.fits.gz +0 -0
  97. pyreduce/masks/mask_hds_red.fits.gz +0 -0
  98. pyreduce/masks/mask_het_hrs_2x5.fits.gz +0 -0
  99. pyreduce/masks/mask_jwst_miri_lrs_slitless.fits.gz +0 -0
  100. pyreduce/masks/mask_jwst_niriss_gr700xd.fits.gz +0 -0
  101. pyreduce/masks/mask_lick_apf_.fits.gz +0 -0
  102. pyreduce/masks/mask_mcdonald.fits.gz +0 -0
  103. pyreduce/masks/mask_nes.fits.gz +0 -0
  104. pyreduce/masks/mask_nirspec_nirspec.fits.gz +0 -0
  105. pyreduce/masks/mask_sarg.fits.gz +0 -0
  106. pyreduce/masks/mask_sarg_2x2a.fits.gz +0 -0
  107. pyreduce/masks/mask_sarg_2x2b.fits.gz +0 -0
  108. pyreduce/masks/mask_subaru_hds_red.fits.gz +0 -0
  109. pyreduce/masks/mask_uves_blue.fits.gz +0 -0
  110. pyreduce/masks/mask_uves_blue_binned_2_2.fits.gz +0 -0
  111. pyreduce/masks/mask_uves_middle.fits.gz +0 -0
  112. pyreduce/masks/mask_uves_middle_2x2_split.fits.gz +0 -0
  113. pyreduce/masks/mask_uves_middle_binned_2_2.fits.gz +0 -0
  114. pyreduce/masks/mask_uves_red.fits.gz +0 -0
  115. pyreduce/masks/mask_uves_red_2x2.fits.gz +0 -0
  116. pyreduce/masks/mask_uves_red_2x2_split.fits.gz +0 -0
  117. pyreduce/masks/mask_uves_red_binned_2_2.fits.gz +0 -0
  118. pyreduce/masks/mask_xshooter_nir.fits.gz +0 -0
  119. pyreduce/pipeline.py +619 -0
  120. pyreduce/rectify.py +138 -0
  121. pyreduce/reduce.py +2065 -0
  122. pyreduce/settings/settings_AJ.json +19 -0
  123. pyreduce/settings/settings_ANDES.json +89 -0
  124. pyreduce/settings/settings_CRIRES_PLUS.json +89 -0
  125. pyreduce/settings/settings_HARPN.json +73 -0
  126. pyreduce/settings/settings_HARPS.json +69 -0
  127. pyreduce/settings/settings_JWST_MIRI.json +55 -0
  128. pyreduce/settings/settings_JWST_NIRISS.json +55 -0
  129. pyreduce/settings/settings_LICK_APF.json +62 -0
  130. pyreduce/settings/settings_MCDONALD.json +58 -0
  131. pyreduce/settings/settings_METIS_IFU.json +77 -0
  132. pyreduce/settings/settings_METIS_LSS.json +77 -0
  133. pyreduce/settings/settings_MICADO.json +78 -0
  134. pyreduce/settings/settings_NEID.json +73 -0
  135. pyreduce/settings/settings_NIRSPEC.json +58 -0
  136. pyreduce/settings/settings_NTE.json +60 -0
  137. pyreduce/settings/settings_UVES.json +54 -0
  138. pyreduce/settings/settings_XSHOOTER.json +78 -0
  139. pyreduce/settings/settings_pyreduce.json +184 -0
  140. pyreduce/settings/settings_schema.json +850 -0
  141. pyreduce/tools/__init__.py +0 -0
  142. pyreduce/tools/combine.py +117 -0
  143. pyreduce/trace.py +979 -0
  144. pyreduce/util.py +1366 -0
  145. pyreduce/wavecal/MICADO_HK_3arcsec_chip5.npz +0 -0
  146. pyreduce/wavecal/atlas/thar.fits +4946 -13
  147. pyreduce/wavecal/atlas/thar_list.txt +4172 -0
  148. pyreduce/wavecal/atlas/une.fits +0 -0
  149. pyreduce/wavecal/convert.py +38 -0
  150. pyreduce/wavecal/crires_plus_J1228_Open_det1.npz +0 -0
  151. pyreduce/wavecal/crires_plus_J1228_Open_det2.npz +0 -0
  152. pyreduce/wavecal/crires_plus_J1228_Open_det3.npz +0 -0
  153. pyreduce/wavecal/harpn_harpn_2D.npz +0 -0
  154. pyreduce/wavecal/harps_blue_2D.npz +0 -0
  155. pyreduce/wavecal/harps_blue_pol_2D.npz +0 -0
  156. pyreduce/wavecal/harps_red_2D.npz +0 -0
  157. pyreduce/wavecal/harps_red_pol_2D.npz +0 -0
  158. pyreduce/wavecal/mcdonald.npz +0 -0
  159. pyreduce/wavecal/metis_lss_l_2D.npz +0 -0
  160. pyreduce/wavecal/metis_lss_m_2D.npz +0 -0
  161. pyreduce/wavecal/nirspec_K2.npz +0 -0
  162. pyreduce/wavecal/uves_blue_360nm_2D.npz +0 -0
  163. pyreduce/wavecal/uves_blue_390nm_2D.npz +0 -0
  164. pyreduce/wavecal/uves_blue_437nm_2D.npz +0 -0
  165. pyreduce/wavecal/uves_middle_2x2_2D.npz +0 -0
  166. pyreduce/wavecal/uves_middle_565nm_2D.npz +0 -0
  167. pyreduce/wavecal/uves_middle_580nm_2D.npz +0 -0
  168. pyreduce/wavecal/uves_middle_600nm_2D.npz +0 -0
  169. pyreduce/wavecal/uves_middle_665nm_2D.npz +0 -0
  170. pyreduce/wavecal/uves_middle_860nm_2D.npz +0 -0
  171. pyreduce/wavecal/uves_red_580nm_2D.npz +0 -0
  172. pyreduce/wavecal/uves_red_600nm_2D.npz +0 -0
  173. pyreduce/wavecal/uves_red_665nm_2D.npz +0 -0
  174. pyreduce/wavecal/uves_red_760nm_2D.npz +0 -0
  175. pyreduce/wavecal/uves_red_860nm_2D.npz +0 -0
  176. pyreduce/wavecal/xshooter_nir.npz +0 -0
  177. pyreduce/wavelength_calibration.py +1871 -0
  178. pyreduce_astro-0.7a4.dist-info/METADATA +106 -0
  179. pyreduce_astro-0.7a4.dist-info/RECORD +182 -0
  180. pyreduce_astro-0.7a4.dist-info/WHEEL +4 -0
  181. pyreduce_astro-0.7a4.dist-info/entry_points.txt +2 -0
  182. pyreduce_astro-0.7a4.dist-info/licenses/LICENSE +674 -0
pyreduce/reduce.py ADDED
@@ -0,0 +1,2065 @@
1
+ """
2
+ REDUCE script for spectrograph data
3
+
4
+ Authors
5
+ -------
6
+ Ansgar Wehrhahn (ansgar.wehrhahn@physics.uu.se)
7
+ Thomas Marquart (thomas.marquart@physics.uu.se)
8
+ Alexis Lavail (alexis.lavail@physics.uu.se)
9
+ Nikolai Piskunov (nikolai.piskunov@physics.uu.se)
10
+
11
+ Version
12
+ -------
13
+ 1.0 - Initial PyReduce
14
+
15
+ License
16
+ --------
17
+ ...
18
+
19
+ """
20
+
21
+ import logging
22
+ import os.path
23
+ import warnings
24
+ from itertools import product
25
+ from os.path import join
26
+
27
+ import joblib
28
+ import matplotlib.pyplot as plt
29
+ import numpy as np
30
+ from astropy.io import fits
31
+ from astropy.io.fits.verify import VerifyWarning
32
+
33
+ warnings.simplefilter("ignore", category=VerifyWarning)
34
+
35
+
36
+ from tqdm import tqdm
37
+
38
+ # PyReduce subpackages
39
+ from . import __version__, echelle, instruments, util
40
+ from .combine_frames import (
41
+ combine_bias,
42
+ combine_calibrate,
43
+ combine_polynomial,
44
+ )
45
+ from .configuration import load_config
46
+ from .continuum_normalization import continuum_normalize, splice_orders
47
+ from .estimate_background_scatter import estimate_background_scatter
48
+ from .extract import extract
49
+ from .make_shear import Curvature as CurvatureModule
50
+ from .rectify import merge_images, rectify_image
51
+ from .trace import trace as mark_orders
52
+ from .wavelength_calibration import LineList, WavelengthCalibrationComb
53
+ from .wavelength_calibration import WavelengthCalibration as WavelengthCalibrationModule
54
+ from .wavelength_calibration import (
55
+ WavelengthCalibrationInitialize as WavelengthCalibrationInitializeModule,
56
+ )
57
+
58
+ # TODO Naming of functions and modules
59
+ # TODO License
60
+
61
+ # TODO automatic determination of the extraction width
62
+ logger = logging.getLogger(__name__)
63
+
64
+
65
+ def main(
66
+ instrument,
67
+ target,
68
+ night=None,
69
+ arms=None,
70
+ steps="all",
71
+ base_dir=None,
72
+ input_dir=None,
73
+ output_dir=None,
74
+ configuration=None,
75
+ order_range=None,
76
+ allow_calibration_only=False,
77
+ skip_existing=False,
78
+ plot=0,
79
+ plot_dir=None,
80
+ ):
81
+ r"""
82
+ Main entry point for REDUCE scripts.
83
+
84
+ Default values can be changed as required if reduce is used as a script.
85
+ Finds input directories, and loops over observation nights and instrument arms.
86
+
87
+ .. deprecated::
88
+ Use :meth:`Pipeline.from_instrument` instead.
89
+
90
+ Parameters
91
+ ----------
92
+ instrument : str, list[str]
93
+ instrument used for the observation (e.g. UVES, HARPS)
94
+ target : str, list[str]
95
+ the observed star, as named in the folder structure/fits headers
96
+ night : str, list[str]
97
+ the observation nights to reduce, as named in the folder structure. Accepts bash wildcards (i.e. \*, ?), but then relies on the folder structure for restricting the nights
98
+ arms : str, list[str], dict[{instrument}:list], None, optional
99
+ the instrument arms to use, if None will use all known arms for the current instrument. See instruments for possible options
100
+ steps : tuple(str), "all", optional
101
+ which steps of the reduction process to perform
102
+ the possible steps are: "bias", "flat", "orders", "norm_flat", "wavecal", "science"
103
+ alternatively set steps to "all", which is equivalent to setting all steps
104
+ Note that the later steps require the previous intermediary products to exist and raise an exception otherwise
105
+ base_dir : str, optional
106
+ base data directory that Reduce should work in, is prefixxed on input_dir and output_dir (default: use settings_pyreduce.json)
107
+ input_dir : str, optional
108
+ input directory containing raw files. Can contain placeholders {instrument}, {target}, {night}, {arm} as well as wildcards. If relative will use base_dir as root (default: use settings_pyreduce.json)
109
+ output_dir : str, optional
110
+ output directory for intermediary and final results. Can contain placeholders {instrument}, {target}, {night}, {arm}, but no wildcards. If relative will use base_dir as root (default: use settings_pyreduce.json)
111
+ configuration : dict[str:obj], str, list[str], dict[{instrument}:dict,str], optional
112
+ configuration file for the current run, contains parameters for different parts of reduce. Can be a path to a json file, or a dict with configurations for the different instruments. When a list, the order must be the same as instruments (default: settings_{instrument.upper()}.json)
113
+ """
114
+ warnings.warn(
115
+ "pyreduce.reduce.main() is deprecated. Use Pipeline.from_instrument() instead:\n"
116
+ " from pyreduce.pipeline import Pipeline\n"
117
+ " result = Pipeline.from_instrument(instrument, target, ...).run()",
118
+ DeprecationWarning,
119
+ stacklevel=2,
120
+ )
121
+
122
+ if target is None or np.isscalar(target):
123
+ target = [target]
124
+ if night is None or np.isscalar(night):
125
+ night = [night]
126
+
127
+ output = []
128
+
129
+ # Loop over everything
130
+
131
+ # settings: default settings of PyReduce
132
+ # config: paramters for the current reduction
133
+ # info: constant, instrument specific parameters
134
+ config = load_config(configuration, instrument, 0)
135
+
136
+ # Environment variable overrides for plot (useful for headless runs)
137
+ if "PYREDUCE_PLOT" in os.environ:
138
+ plot = int(os.environ["PYREDUCE_PLOT"])
139
+ if "PYREDUCE_PLOT_DIR" in os.environ:
140
+ plot_dir = os.environ["PYREDUCE_PLOT_DIR"]
141
+
142
+ # Set global plot directory for util.show_or_save()
143
+ util.set_plot_dir(plot_dir)
144
+
145
+ if isinstance(instrument, str):
146
+ instrument = instruments.instrument_info.load_instrument(instrument)
147
+ info = instrument.info
148
+
149
+ # load default settings from settings_pyreduce.json
150
+ if base_dir is None:
151
+ base_dir = config["reduce"]["base_dir"]
152
+ if input_dir is None:
153
+ input_dir = config["reduce"]["input_dir"]
154
+ if output_dir is None:
155
+ output_dir = config["reduce"]["output_dir"]
156
+
157
+ input_dir = join(base_dir, input_dir)
158
+ output_dir = join(base_dir, output_dir)
159
+
160
+ if arms is None:
161
+ arms = info["arms"]
162
+ if np.isscalar(arms):
163
+ arms = [arms]
164
+
165
+ for t, n, a in product(target, night, arms):
166
+ log_file = join(
167
+ base_dir.format(instrument=str(instrument), arm=arms, target=t),
168
+ f"logs/{t}.log",
169
+ )
170
+ util.start_logging(log_file)
171
+ # find input files and sort them by type
172
+ files = instrument.sort_files(
173
+ input_dir,
174
+ t,
175
+ n,
176
+ arm=a,
177
+ **config["instrument"],
178
+ allow_calibration_only=allow_calibration_only,
179
+ )
180
+ if len(files) == 0:
181
+ logger.warning(
182
+ "No files found for instrument: %s, target: %s, night: %s, arm: %s in folder: %s",
183
+ instrument,
184
+ t,
185
+ n,
186
+ a,
187
+ input_dir,
188
+ )
189
+ continue
190
+ for k, f in files:
191
+ logger.info("Settings:")
192
+ for key, value in k.items():
193
+ logger.info("%s: %s", key, value)
194
+ logger.debug("Files:\n%s", f)
195
+
196
+ from .pipeline import Pipeline
197
+
198
+ pipe = Pipeline.from_files(
199
+ files=f,
200
+ output_dir=output_dir,
201
+ target=k.get("target"),
202
+ instrument=instrument,
203
+ arm=a,
204
+ night=k.get("night"),
205
+ config=config,
206
+ order_range=order_range,
207
+ steps=steps,
208
+ plot=plot,
209
+ plot_dir=plot_dir,
210
+ )
211
+ data = pipe.run(skip_existing=skip_existing)
212
+ output.append(data)
213
+ return output
214
+
215
+
216
+ class Step:
217
+ """Parent class for all steps"""
218
+
219
+ def __init__(
220
+ self, instrument, arm, target, night, output_dir, order_range, **config
221
+ ):
222
+ self._dependsOn = []
223
+ self._loadDependsOn = []
224
+ #:str: Name of the instrument
225
+ self.instrument = instrument
226
+ #:str: Name of the instrument arm
227
+ self.arm = arm
228
+ #:str: Name of the observation target
229
+ self.target = target
230
+ #:str: Date of the observation (as a string)
231
+ self.night = night
232
+ #:tuple(int, int): First and Last(+1) order to process
233
+ self.order_range = order_range
234
+ #:bool: Whether to plot the results or the progress of this step
235
+ self.plot = config.get("plot", False)
236
+ #:str: Title used in the plots, if any
237
+ self.plot_title = config.get("plot_title", None)
238
+ self._output_dir = output_dir
239
+
240
+ def run(self, files, *args): # pragma: no cover
241
+ """Execute the current step
242
+
243
+ This should fail if files are missing or anything else goes wrong.
244
+ If the user does not want to run this step, they should not specify it in steps.
245
+
246
+ Parameters
247
+ ----------
248
+ files : list(str)
249
+ data files required for this step
250
+
251
+ Raises
252
+ ------
253
+ NotImplementedError
254
+ needs to be implemented for each step
255
+ """
256
+ raise NotImplementedError
257
+
258
+ def save(self, *args): # pragma: no cover
259
+ """Save the results of this step
260
+
261
+ Parameters
262
+ ----------
263
+ *args : obj
264
+ things to save
265
+
266
+ Raises
267
+ ------
268
+ NotImplementedError
269
+ Needs to be implemented for each step
270
+ """
271
+ raise NotImplementedError
272
+
273
+ def load(self): # pragma: no cover
274
+ """Load results from a previous execution
275
+
276
+ If this raises a FileNotFoundError, run() will be used instead
277
+ For calibration steps it is preferred however to print a warning
278
+ and return None. Other modules can then use a default value instead.
279
+
280
+ Raises
281
+ ------
282
+ NotImplementedError
283
+ Needs to be implemented for each step
284
+ """
285
+ raise NotImplementedError
286
+
287
+ @property
288
+ def dependsOn(self):
289
+ """list(str): Steps that are required before running this step"""
290
+ return list(set(self._dependsOn))
291
+
292
+ @property
293
+ def loadDependsOn(self):
294
+ """list(str): Steps that are required before loading data from this step"""
295
+ return list(set(self._loadDependsOn))
296
+
297
+ @property
298
+ def output_dir(self):
299
+ """str: output directory, may contain tags {instrument}, {night}, {target}, {arm}"""
300
+ return self._output_dir.format(
301
+ instrument=self.instrument.name.upper(),
302
+ target=self.target,
303
+ night=self.night,
304
+ arm=self.arm,
305
+ )
306
+
307
+ @property
308
+ def prefix(self):
309
+ """str: temporary file prefix"""
310
+ i = self.instrument.name.lower()
311
+ if self.arm is not None and self.arm != "":
312
+ a = self.arm.lower()
313
+ return f"{i}_{a}"
314
+ else:
315
+ return i
316
+
317
+
318
+ class CalibrationStep(Step):
319
+ def __init__(self, *args, **config):
320
+ super().__init__(*args, **config)
321
+ self._dependsOn += ["mask", "bias"]
322
+
323
+ #:{'number_of_files', 'exposure_time', 'mean', 'median', 'none'}: how to adjust for diferences between the bias and flat field exposure times
324
+ self.bias_scaling = config["bias_scaling"]
325
+ #:{'divide', 'none'}: how to apply the normalized flat field
326
+ self.norm_scaling = config["norm_scaling"]
327
+
328
+ def calibrate(self, files, mask, bias=None, norm_flat=None):
329
+ bias, bhead = bias if bias is not None else (None, None)
330
+ norm, blaze = norm_flat if norm_flat is not None else (None, None)
331
+ orig, thead = combine_calibrate(
332
+ files,
333
+ self.instrument,
334
+ self.arm,
335
+ mask,
336
+ bias=bias,
337
+ bhead=bhead,
338
+ norm=norm,
339
+ bias_scaling=self.bias_scaling,
340
+ norm_scaling=self.norm_scaling,
341
+ plot=self.plot,
342
+ plot_title=self.plot_title,
343
+ )
344
+
345
+ return orig, thead
346
+
347
+
348
+ class ExtractionStep(Step):
349
+ def __init__(self, *args, **config):
350
+ super().__init__(*args, **config)
351
+ self._dependsOn += [
352
+ "orders",
353
+ ]
354
+
355
+ #:{'arc', 'optimal'}: Extraction method to use
356
+ self.extraction_method = config["extraction_method"]
357
+ if self.extraction_method == "arc":
358
+ #:dict: arguments for the extraction
359
+ self.extraction_kwargs = {
360
+ "extraction_width": config["extraction_width"],
361
+ "sigma_cutoff": config["extraction_cutoff"],
362
+ "collapse_function": config["collapse_function"],
363
+ }
364
+ elif self.extraction_method == "optimal":
365
+ self.extraction_kwargs = {
366
+ "extraction_width": config["extraction_width"],
367
+ "lambda_sf": config["smooth_slitfunction"],
368
+ "lambda_sp": config["smooth_spectrum"],
369
+ "osample": config["oversampling"],
370
+ "swath_width": config["swath_width"],
371
+ "sigma_cutoff": config["extraction_cutoff"],
372
+ "maxiter": config["maxiter"],
373
+ }
374
+ else:
375
+ raise ValueError(
376
+ f"Extraction method {self.extraction_method} not supported for step 'wavecal'"
377
+ )
378
+
379
+ def extract(self, img, head, orders, curvature, scatter=None):
380
+ orders, column_range = orders if orders is not None else (None, None)
381
+ tilt, shear = curvature if curvature is not None else (None, None)
382
+
383
+ data, unc, slitfu, cr = extract(
384
+ img,
385
+ orders,
386
+ gain=head["e_gain"],
387
+ readnoise=head["e_readn"],
388
+ dark=head["e_drk"],
389
+ column_range=column_range,
390
+ extraction_type=self.extraction_method,
391
+ order_range=self.order_range,
392
+ plot=self.plot,
393
+ plot_title=self.plot_title,
394
+ tilt=tilt,
395
+ shear=shear,
396
+ scatter=scatter,
397
+ **self.extraction_kwargs,
398
+ )
399
+ return data, unc, slitfu, cr
400
+
401
+
402
+ class FitsIOStep(Step):
403
+ def __init__(self, *args, **kwargs):
404
+ super().__init__(*args, **kwargs)
405
+ self._loadDependsOn += ["mask"]
406
+ self.allow_failure = True
407
+
408
+ def save(self, data, head, dtype=None):
409
+ """
410
+ Save the data to a FITS file
411
+
412
+ Parameters
413
+ ----------
414
+ data : array of shape (nrow, ncol)
415
+ bias data
416
+ head : FITS header
417
+ bias header
418
+ """
419
+ if dtype is not None:
420
+ data = np.asarray(data, dtype=np.float32)
421
+
422
+ fits.writeto(
423
+ self.savefile,
424
+ data=data,
425
+ header=head,
426
+ overwrite=True,
427
+ output_verify="silentfix+ignore",
428
+ )
429
+ logger.info("Created data file: %s", self.savefile)
430
+
431
+ def load(self, mask):
432
+ """
433
+ Load the master bias from a previous run
434
+
435
+ Parameters
436
+ ----------
437
+ mask : array of shape (nrow, ncol)
438
+ Bad pixel mask
439
+
440
+ Returns
441
+ -------
442
+ data : masked array of shape (nrow, ncol)
443
+ master bias data, with the bad pixel mask applied
444
+ head : FITS header
445
+ header of the master bias
446
+ """
447
+ try:
448
+ with fits.open(self.savefile, memmap=False) as hdu:
449
+ data, head = hdu[0].data, hdu[0].head
450
+ data = np.ma.masked_array(data, mask=mask)
451
+ logger.info("Data file: %s", self.savefile)
452
+ except FileNotFoundError as ex:
453
+ if self.allow_failure:
454
+ logger.warning("No data file found")
455
+ data, head = None, None
456
+ else:
457
+ raise ex
458
+ return data, head
459
+
460
+
461
+ class Mask(Step):
462
+ """Load the bad pixel mask for the given instrument/arm"""
463
+
464
+ def __init__(self, *args, **config):
465
+ super().__init__(*args, **config)
466
+
467
+ def run(self):
468
+ """Load the mask file from disk
469
+
470
+ Returns
471
+ -------
472
+ mask : array of shape (nrow, ncol)
473
+ Bad pixel mask for this setting
474
+ """
475
+ return self.load()
476
+
477
+ def load(self):
478
+ """Load the mask file from disk
479
+
480
+ Returns
481
+ -------
482
+ mask : array of shape (nrow, ncol)
483
+ Bad pixel mask for this setting
484
+ """
485
+ mask_file = self.instrument.get_mask_filename(arm=self.arm)
486
+ try:
487
+ mask, _ = self.instrument.load_fits(mask_file, self.arm, extension=0)
488
+ mask = ~mask.data.astype(bool) # REDUCE mask are inverse to numpy masks
489
+ logger.info("Bad pixel mask file: %s", mask_file)
490
+ except (FileNotFoundError, ValueError):
491
+ logger.error(
492
+ "Bad Pixel Mask datafile %s not found. Using all pixels instead.",
493
+ mask_file,
494
+ )
495
+ mask = False
496
+ return mask
497
+
498
+
499
+ class Bias(Step):
500
+ """Calculates the master bias"""
501
+
502
+ def __init__(self, *args, **config):
503
+ super().__init__(*args, **config)
504
+ self._dependsOn += ["mask"]
505
+ self._loadDependsOn += ["mask"]
506
+
507
+ #:int: polynomial degree of the fit between exposure time and pixel values
508
+ self.degree = config["degree"]
509
+
510
+ @property
511
+ def savefile(self):
512
+ """str: Name of master bias fits file"""
513
+ return join(self.output_dir, self.prefix + ".bias.fits")
514
+
515
+ def run(self, files, mask):
516
+ """Calculate the master bias
517
+
518
+ Parameters
519
+ ----------
520
+ files : list(str)
521
+ bias files
522
+ mask : array of shape (nrow, ncol)
523
+ bad pixel map
524
+
525
+ Returns
526
+ -------
527
+ bias : masked array of shape (nrow, ncol)
528
+ master bias data, with the bad pixel mask applied
529
+ bhead : FITS header
530
+ header of the master bias
531
+ """
532
+ logger.info("Bias Files: %s", files)
533
+
534
+ if self.degree == 0:
535
+ # If the degree is 0, we just combine all images into a single master bias
536
+ # this works great if we assume there is no dark at exposure time 0
537
+ bias, bhead = combine_bias(
538
+ files,
539
+ self.instrument,
540
+ self.arm,
541
+ mask=mask,
542
+ plot=self.plot,
543
+ plot_title=self.plot_title,
544
+ )
545
+ else:
546
+ # Otherwise we fit a polynomial to each pixel in the image, with
547
+ # the pixel value versus the exposure time. The constant coefficients
548
+ # are then the bias, and the others are used to scale with the
549
+ # exposure time
550
+ bias, bhead = combine_polynomial(
551
+ files,
552
+ self.instrument,
553
+ self.arm,
554
+ mask=mask,
555
+ degree=self.degree,
556
+ plot=self.plot,
557
+ plot_title=self.plot_title,
558
+ )
559
+
560
+ self.save(bias.data, bhead)
561
+ return bias, bhead
562
+
563
+ def save(self, bias, bhead):
564
+ """Save the master bias to a FITS file
565
+
566
+ Parameters
567
+ ----------
568
+ bias : array of shape (nrow, ncol)
569
+ bias data
570
+ bhead : FITS header
571
+ bias header
572
+ """
573
+ bias = np.asarray(bias, dtype=np.float32)
574
+
575
+ if self.degree == 0:
576
+ hdus = [fits.PrimaryHDU(data=bias, header=bhead, scale_back=False)]
577
+ else:
578
+ hdus = [fits.PrimaryHDU(data=bias[0], header=bhead, scale_back=False)]
579
+ for i in range(1, len(bias)):
580
+ hdus += [fits.ImageHDU(data=bias[i])]
581
+ hdus = fits.HDUList(hdus)
582
+
583
+ hdus[0].header["BZERO"] = 0
584
+ hdus.writeto(
585
+ self.savefile,
586
+ overwrite=True,
587
+ output_verify="silentfix+ignore",
588
+ )
589
+ logger.info("Created master bias file: %s", self.savefile)
590
+
591
+ def load(self, mask):
592
+ """Load the master bias from a previous run
593
+
594
+ Parameters
595
+ ----------
596
+ mask : array of shape (nrow, ncol)
597
+ Bad pixel mask
598
+
599
+ Returns
600
+ -------
601
+ bias : masked array of shape (nrow, ncol)
602
+ master bias data, with the bad pixel mask applied
603
+ bhead : FITS header
604
+ header of the master bias
605
+ """
606
+ try:
607
+ logger.info("Master bias file: %s", self.savefile)
608
+ with fits.open(self.savefile, memmap=False) as hdu:
609
+ degree = len(hdu) - 1
610
+ if degree == 0:
611
+ bias, bhead = hdu[0].data, hdu[0].header
612
+ bias = np.ma.masked_array(bias, mask=mask)
613
+ else:
614
+ bhead = hdu[0].header
615
+ bias = np.array([h.data for h in hdu])
616
+ bias = np.ma.masked_array(
617
+ bias, mask=[mask for _ in range(len(hdu))]
618
+ )
619
+ except FileNotFoundError:
620
+ logger.warning("No intermediate bias file found. Using Bias = 0 instead.")
621
+ bias, bhead = None, None
622
+ return bias, bhead
623
+
624
+
625
+ class Flat(CalibrationStep):
626
+ """Calculates the master flat"""
627
+
628
+ def __init__(self, *args, **config):
629
+ super().__init__(*args, **config)
630
+ self._loadDependsOn += ["mask"]
631
+
632
+ @property
633
+ def savefile(self):
634
+ """str: Name of master bias fits file"""
635
+ return join(self.output_dir, self.prefix + ".flat.fits")
636
+
637
+ def save(self, flat, fhead):
638
+ """Save the master flat to a FITS file
639
+
640
+ Parameters
641
+ ----------
642
+ flat : array of shape (nrow, ncol)
643
+ master flat data
644
+ fhead : FITS header
645
+ master flat header
646
+ """
647
+ flat = np.asarray(flat, dtype=np.float32)
648
+ fits.writeto(
649
+ self.savefile,
650
+ data=flat,
651
+ header=fhead,
652
+ overwrite=True,
653
+ output_verify="silentfix+ignore",
654
+ )
655
+ logger.info("Created master flat file: %s", self.savefile)
656
+
657
+ def run(self, files, bias, mask):
658
+ """Calculate the master flat, with the bias already subtracted
659
+
660
+ Parameters
661
+ ----------
662
+ files : list(str)
663
+ flat files
664
+ bias : tuple(array of shape (nrow, ncol), FITS header)
665
+ master bias and header
666
+ mask : array of shape (nrow, ncol)
667
+ Bad pixel mask
668
+
669
+ Returns
670
+ -------
671
+ flat : masked array of shape (nrow, ncol)
672
+ Master flat with bad pixel map applied
673
+ fhead : FITS header
674
+ Master flat FITS header
675
+ """
676
+ logger.info("Flat files: %s", files)
677
+ # This is just the calibration of images
678
+ flat, fhead = self.calibrate(files, mask, bias, None)
679
+ # And then save it
680
+ self.save(flat.data, fhead)
681
+ return flat, fhead
682
+
683
+ def load(self, mask):
684
+ """Load master flat from disk
685
+
686
+ Parameters
687
+ ----------
688
+ mask : array of shape (nrow, ncol)
689
+ Bad pixel mask
690
+
691
+ Returns
692
+ -------
693
+ flat : masked array of shape (nrow, ncol)
694
+ Master flat with bad pixel map applied
695
+ fhead : FITS header
696
+ Master flat FITS header
697
+ """
698
+ try:
699
+ with fits.open(self.savefile, memmap=False) as hdu:
700
+ flat, fhead = hdu[0].data, hdu[0].header
701
+ flat = np.ma.masked_array(flat, mask=mask)
702
+ logger.info("Master flat file: %s", self.savefile)
703
+ except FileNotFoundError:
704
+ logger.warning(
705
+ "No intermediate file for the flat field found. Using Flat = 1 instead"
706
+ )
707
+ flat, fhead = None, None
708
+ return flat, fhead
709
+
710
+
711
+ class OrderTracing(CalibrationStep):
712
+ """Determine the polynomial fits describing the pixel locations of each order"""
713
+
714
+ def __init__(self, *args, **config):
715
+ super().__init__(*args, **config)
716
+
717
+ #:int: Minimum size of each cluster to be included in further processing
718
+ self.min_cluster = config["min_cluster"]
719
+ #:int, float: Minimum width of each cluster after mergin
720
+ self.min_width = config["min_width"]
721
+ #:int: Smoothing width along x-axis (dispersion direction)
722
+ self.filter_x = config.get("filter_x", 0)
723
+ #:int: Smoothing width along y-axis (cross-dispersion direction)
724
+ self.filter_y = config["filter_y"]
725
+ #:str: Type of smoothing filter (boxcar, gaussian, whittaker)
726
+ self.filter_type = config.get("filter_type", "boxcar")
727
+ #:int: Background noise value threshold
728
+ self.noise = config["noise"]
729
+ #:int: Polynomial degree of the fit to each order
730
+ self.fit_degree = config["degree"]
731
+
732
+ self.degree_before_merge = config["degree_before_merge"]
733
+ self.regularization = config["regularization"]
734
+ self.closing_shape = config["closing_shape"]
735
+ self.opening_shape = config["opening_shape"]
736
+ self.auto_merge_threshold = config["auto_merge_threshold"]
737
+ self.merge_min_threshold = config["merge_min_threshold"]
738
+ self.sigma = config["split_sigma"]
739
+ #:int: Number of pixels at the edge of the detector to ignore
740
+ self.border_width = config["border_width"]
741
+ #:bool: Whether to use manual alignment
742
+ self.manual = config["manual"]
743
+
744
+ @property
745
+ def savefile(self):
746
+ """str: Name of the order tracing file"""
747
+ return join(self.output_dir, self.prefix + ".ord_default.npz")
748
+
749
+ def run(self, files, mask, bias):
750
+ """Determine polynomial coefficients describing order locations
751
+
752
+ Parameters
753
+ ----------
754
+ files : list(str)
755
+ Observation used for order tracing (should only have one element)
756
+ mask : array of shape (nrow, ncol)
757
+ Bad pixel mask
758
+ bias : tuple or None
759
+ Bias correction
760
+
761
+ Returns
762
+ -------
763
+ orders : array of shape (nord, ndegree+1)
764
+ polynomial coefficients for each order
765
+ column_range : array of shape (nord, 2)
766
+ first and last(+1) column that carries signal in each order
767
+ """
768
+
769
+ logger.info("Order tracing files: %s", files)
770
+
771
+ order_img, ohead = self.calibrate(files, mask, bias, None)
772
+
773
+ orders, column_range = mark_orders(
774
+ order_img,
775
+ min_cluster=self.min_cluster,
776
+ min_width=self.min_width,
777
+ filter_x=self.filter_x,
778
+ filter_y=self.filter_y,
779
+ filter_type=self.filter_type,
780
+ noise=self.noise,
781
+ degree=self.fit_degree,
782
+ degree_before_merge=self.degree_before_merge,
783
+ regularization=self.regularization,
784
+ closing_shape=self.closing_shape,
785
+ opening_shape=self.opening_shape,
786
+ border_width=self.border_width,
787
+ manual=self.manual,
788
+ auto_merge_threshold=self.auto_merge_threshold,
789
+ merge_min_threshold=self.merge_min_threshold,
790
+ sigma=self.sigma,
791
+ plot=self.plot,
792
+ plot_title=self.plot_title,
793
+ )
794
+
795
+ self.save(orders, column_range)
796
+
797
+ return orders, column_range
798
+
799
+ def save(self, orders, column_range):
800
+ """Save order tracing results to disk
801
+
802
+ Parameters
803
+ ----------
804
+ orders : array of shape (nord, ndegree+1)
805
+ polynomial coefficients
806
+ column_range : array of shape (nord, 2)
807
+ first and last(+1) column that carry signal in each order
808
+ """
809
+ np.savez(self.savefile, orders=orders, column_range=column_range)
810
+ logger.info("Created order tracing file: %s", self.savefile)
811
+
812
+ def load(self):
813
+ """Load order tracing results
814
+
815
+ Returns
816
+ -------
817
+ orders : array of shape (nord, ndegree+1)
818
+ polynomial coefficients for each order
819
+ column_range : array of shape (nord, 2)
820
+ first and last(+1) column that carries signal in each order
821
+ """
822
+ logger.info("Order tracing file: %s", self.savefile)
823
+ data = np.load(self.savefile, allow_pickle=True)
824
+ orders = data["orders"]
825
+ column_range = data["column_range"]
826
+ return orders, column_range
827
+
828
+
829
+ class BackgroundScatter(CalibrationStep):
830
+ """Determine the background scatter"""
831
+
832
+ def __init__(self, *args, **config):
833
+ super().__init__(*args, **config)
834
+ self._dependsOn += ["orders"]
835
+
836
+ #:tuple(int, int): Polynomial degrees for the background scatter fit, in row, column direction
837
+ self.scatter_degree = config["scatter_degree"]
838
+ self.extraction_width = config["extraction_width"]
839
+ self.sigma_cutoff = config["scatter_cutoff"]
840
+ self.border_width = config["border_width"]
841
+
842
+ @property
843
+ def savefile(self):
844
+ """str: Name of the scatter file"""
845
+ return join(self.output_dir, self.prefix + ".scatter.npz")
846
+
847
+ def run(self, files, mask, bias, orders):
848
+ logger.info("Background scatter files: %s", files)
849
+
850
+ scatter_img, shead = self.calibrate(files, mask, bias)
851
+
852
+ orders, column_range = orders
853
+ scatter = estimate_background_scatter(
854
+ scatter_img,
855
+ orders,
856
+ column_range=column_range,
857
+ extraction_width=self.extraction_width,
858
+ scatter_degree=self.scatter_degree,
859
+ sigma_cutoff=self.sigma_cutoff,
860
+ border_width=self.border_width,
861
+ plot=self.plot,
862
+ plot_title=self.plot_title,
863
+ )
864
+
865
+ self.save(scatter)
866
+ return scatter
867
+
868
+ def save(self, scatter):
869
+ """Save scatter results to disk
870
+
871
+ Parameters
872
+ ----------
873
+ scatter : array
874
+ scatter coefficients
875
+ """
876
+ np.savez(self.savefile, scatter=scatter)
877
+ logger.info("Created background scatter file: %s", self.savefile)
878
+
879
+ def load(self):
880
+ """Load scatter results from disk
881
+
882
+ Returns
883
+ -------
884
+ scatter : array
885
+ scatter coefficients
886
+ """
887
+ try:
888
+ data = np.load(self.savefile, allow_pickle=True)
889
+ logger.info("Background scatter file: %s", self.savefile)
890
+ except FileNotFoundError:
891
+ logger.warning(
892
+ "No intermediate files found for the scatter. Using scatter = 0 instead."
893
+ )
894
+ data = {"scatter": None}
895
+ scatter = data["scatter"]
896
+ return scatter
897
+
898
+
899
+ class NormalizeFlatField(Step):
900
+ """Calculate the 'normalized' flat field image"""
901
+
902
+ def __init__(self, *args, **config):
903
+ super().__init__(*args, **config)
904
+ self._dependsOn += ["flat", "orders", "scatter", "curvature"]
905
+
906
+ #:{'normalize'}: Extraction method to use
907
+ self.extraction_method = config["extraction_method"]
908
+ if self.extraction_method == "normalize":
909
+ #:dict: arguments for the extraction
910
+ self.extraction_kwargs = {
911
+ "extraction_width": config["extraction_width"],
912
+ "lambda_sf": config["smooth_slitfunction"],
913
+ "lambda_sp": config["smooth_spectrum"],
914
+ "osample": config["oversampling"],
915
+ "swath_width": config["swath_width"],
916
+ "sigma_cutoff": config["extraction_cutoff"],
917
+ "maxiter": config["maxiter"],
918
+ }
919
+ else:
920
+ raise ValueError(
921
+ f"Extraction method {self.extraction_method} not supported for step 'norm_flat'"
922
+ )
923
+ #:int: Threshold of the normalized flat field (values below this are just 1)
924
+ self.threshold = config["threshold"]
925
+ self.threshold_lower = config["threshold_lower"]
926
+
927
+ @property
928
+ def savefile(self):
929
+ """str: Name of the blaze file"""
930
+ return join(self.output_dir, self.prefix + ".flat_norm.npz")
931
+
932
+ def run(self, flat, orders, scatter, curvature):
933
+ """Calculate the 'normalized' flat field
934
+
935
+ Parameters
936
+ ----------
937
+ flat : tuple(array, header)
938
+ Master flat, and its FITS header
939
+ orders : tuple(array, array)
940
+ Polynomial coefficients for each order, and the first and last(+1) column containing signal
941
+
942
+ Returns
943
+ -------
944
+ norm : array of shape (nrow, ncol)
945
+ normalized flat field
946
+ blaze : array of shape (nord, ncol)
947
+ Continuum level as determined from the flat field for each order
948
+ """
949
+ flat, fhead = flat
950
+ orders, column_range = orders
951
+ tilt, shear = curvature
952
+
953
+ # if threshold is smaller than 1, assume percentage value is given
954
+ if self.threshold <= 1:
955
+ threshold = np.percentile(flat, self.threshold * 100)
956
+ else:
957
+ threshold = self.threshold
958
+
959
+ norm, _, blaze, _ = extract(
960
+ flat,
961
+ orders,
962
+ gain=fhead["e_gain"],
963
+ readnoise=fhead["e_readn"],
964
+ dark=fhead["e_drk"],
965
+ order_range=self.order_range,
966
+ column_range=column_range,
967
+ scatter=scatter,
968
+ threshold=threshold,
969
+ threshold_lower=self.threshold_lower,
970
+ extraction_type=self.extraction_method,
971
+ tilt=tilt,
972
+ shear=shear,
973
+ plot=self.plot,
974
+ plot_title=self.plot_title,
975
+ **self.extraction_kwargs,
976
+ )
977
+
978
+ blaze = np.ma.filled(blaze, 0)
979
+ norm = np.ma.filled(norm, 1)
980
+ norm = np.nan_to_num(norm, nan=1)
981
+ self.save(norm, blaze)
982
+ return norm, blaze
983
+
984
+ def save(self, norm, blaze):
985
+ """Save normalized flat field results to disk
986
+
987
+ Parameters
988
+ ----------
989
+ norm : array of shape (nrow, ncol)
990
+ normalized flat field
991
+ blaze : array of shape (nord, ncol)
992
+ Continuum level as determined from the flat field for each order
993
+ """
994
+ np.savez(self.savefile, blaze=blaze, norm=norm)
995
+ logger.info("Created normalized flat file: %s", self.savefile)
996
+
997
+ def load(self):
998
+ """Load normalized flat field results from disk
999
+
1000
+ Returns
1001
+ -------
1002
+ norm : array of shape (nrow, ncol)
1003
+ normalized flat field
1004
+ blaze : array of shape (nord, ncol)
1005
+ Continuum level as determined from the flat field for each order
1006
+ """
1007
+ try:
1008
+ data = np.load(self.savefile, allow_pickle=True)
1009
+ logger.info("Normalized flat file: %s", self.savefile)
1010
+ except FileNotFoundError:
1011
+ logger.warning(
1012
+ "No intermediate files found for the normalized flat field. Using flat = 1 instead."
1013
+ )
1014
+ data = {"blaze": None, "norm": None}
1015
+ blaze = data["blaze"]
1016
+ norm = data["norm"]
1017
+ return norm, blaze
1018
+
1019
+
1020
+ class WavelengthCalibrationMaster(CalibrationStep, ExtractionStep):
1021
+ """Create wavelength calibration master image"""
1022
+
1023
+ def __init__(self, *args, **config):
1024
+ super().__init__(*args, **config)
1025
+ self._dependsOn += ["norm_flat", "curvature", "bias"]
1026
+
1027
+ @property
1028
+ def savefile(self):
1029
+ """str: Name of the wavelength echelle file"""
1030
+ return join(self.output_dir, self.prefix + ".thar_master.fits")
1031
+
1032
+ def run(self, files, orders, mask, curvature, bias, norm_flat):
1033
+ """Perform wavelength calibration
1034
+
1035
+ This consists of extracting the wavelength image
1036
+ and fitting a polynomial the the known spectral lines
1037
+
1038
+ Parameters
1039
+ ----------
1040
+ files : list(str)
1041
+ wavelength calibration files
1042
+ orders : tuple(array, array)
1043
+ Polynomial coefficients of each order, and columns with signal of each order
1044
+ mask : array of shape (nrow, ncol)
1045
+ Bad pixel mask
1046
+
1047
+ Returns
1048
+ -------
1049
+ wave : array of shape (nord, ncol)
1050
+ wavelength for each point in the spectrum
1051
+ thar : array of shape (nrow, ncol)
1052
+ extracted wavelength calibration image
1053
+ coef : array of shape (*ndegrees,)
1054
+ polynomial coefficients of the wavelength fit
1055
+ linelist : record array of shape (nlines,)
1056
+ Updated line information for all lines
1057
+ """
1058
+ if len(files) == 0:
1059
+ raise FileNotFoundError("No files found for wavelength calibration")
1060
+ logger.info("Wavelength calibration files: %s", files)
1061
+ # Load wavecal image
1062
+ orig, thead = self.calibrate(files, mask, bias, norm_flat)
1063
+ # Extract wavecal spectrum
1064
+ thar, _, _, _ = self.extract(orig, thead, orders, curvature)
1065
+ self.save(thar, thead)
1066
+ return thar, thead
1067
+
1068
+ def save(self, thar, thead):
1069
+ """Save the master wavelength calibration to a FITS file
1070
+
1071
+ Parameters
1072
+ ----------
1073
+ thar : array of shape (nrow, ncol)
1074
+ master flat data
1075
+ thead : FITS header
1076
+ master flat header
1077
+ """
1078
+ thar = np.asarray(thar, dtype=np.float64)
1079
+ fits.writeto(
1080
+ self.savefile,
1081
+ data=thar,
1082
+ header=thead,
1083
+ overwrite=True,
1084
+ output_verify="silentfix+ignore",
1085
+ )
1086
+ logger.info("Created wavelength calibration spectrum file: %s", self.savefile)
1087
+
1088
+ def load(self):
1089
+ """Load master wavelength calibration from disk
1090
+
1091
+ Returns
1092
+ -------
1093
+ thar : masked array of shape (nrow, ncol)
1094
+ Master wavecal with bad pixel map applied
1095
+ thead : FITS header
1096
+ Master wavecal FITS header
1097
+ """
1098
+ with fits.open(self.savefile, memmap=False) as hdu:
1099
+ thar, thead = hdu[0].data, hdu[0].header
1100
+ logger.info("Wavelength calibration spectrum file: %s", self.savefile)
1101
+ return thar, thead
1102
+
1103
+
1104
+ class WavelengthCalibrationInitialize(Step):
1105
+ """Create the initial wavelength solution file"""
1106
+
1107
+ def __init__(self, *args, **config):
1108
+ super().__init__(*args, **config)
1109
+ self._dependsOn += ["wavecal_master"]
1110
+ self._loadDependsOn += ["config", "wavecal_master"]
1111
+
1112
+ #:tuple(int, int): Polynomial degree of the wavelength calibration in order, column direction
1113
+ self.degree = config["degree"]
1114
+ #:float: wavelength range around the initial guess to explore
1115
+ self.wave_delta = config["wave_delta"]
1116
+ #:int: number of walkers in the MCMC
1117
+ self.nwalkers = config["nwalkers"]
1118
+ #:int: number of steps in the MCMC
1119
+ self.steps = config["steps"]
1120
+ #:float: resiudal range to accept as match between peaks and atlas in m/s
1121
+ self.resid_delta = config["resid_delta"]
1122
+ #:str: element for the atlas to use
1123
+ self.element = config["element"]
1124
+ #:str: medium the medium of the instrument, air or vac
1125
+ self.medium = config["medium"]
1126
+ #:float: Gaussian smoothing parameter applied to the observed spectrum in pixel scale, set to 0 to disable smoothing
1127
+ self.smoothing = config["smoothing"]
1128
+ #:float: Minimum height of spectral lines in the normalized spectrum, values of 1 and above are interpreted as percentiles of the spectrum, set to 0 to disable the cutoff
1129
+ self.cutoff = config["cutoff"]
1130
+
1131
+ @property
1132
+ def savefile(self):
1133
+ """str: Name of the wavelength echelle file"""
1134
+ return join(self.output_dir, self.prefix + ".linelist.npz")
1135
+
1136
+ def run(self, wavecal_master):
1137
+ thar, thead = wavecal_master
1138
+
1139
+ # Get the initial wavelength guess from the instrument
1140
+ wave_range = self.instrument.get_wavelength_range(thead, self.arm)
1141
+ if wave_range is None:
1142
+ raise ValueError(
1143
+ "This instrument is missing an initial wavelength guess for wavecal_init"
1144
+ )
1145
+
1146
+ module = WavelengthCalibrationInitializeModule(
1147
+ plot=self.plot,
1148
+ plot_title=self.plot_title,
1149
+ degree=self.degree,
1150
+ wave_delta=self.wave_delta,
1151
+ nwalkers=self.nwalkers,
1152
+ steps=self.steps,
1153
+ resid_delta=self.resid_delta,
1154
+ element=self.element,
1155
+ medium=self.medium,
1156
+ smoothing=self.smoothing,
1157
+ cutoff=self.cutoff,
1158
+ )
1159
+ linelist = module.execute(thar, wave_range)
1160
+ self.save(linelist)
1161
+ return linelist
1162
+
1163
+ def save(self, linelist):
1164
+ linelist.save(self.savefile)
1165
+ logger.info("Created wavelength calibration linelist file: %s", self.savefile)
1166
+
1167
+ def load(self, config, wavecal_master):
1168
+ thar, thead = wavecal_master
1169
+ try:
1170
+ # Try loading the custom reference file
1171
+ reference = self.savefile
1172
+ linelist = LineList.load(reference)
1173
+ except FileNotFoundError:
1174
+ # If that fails, load the file provided by PyReduce
1175
+ # It usually fails because we want to use this one
1176
+ reference = self.instrument.get_wavecal_filename(
1177
+ thead, self.arm, **config["instrument"]
1178
+ )
1179
+
1180
+ # This should fail if there is no provided file by PyReduce
1181
+ linelist = LineList.load(reference)
1182
+ logger.info("Wavelength calibration linelist file: %s", reference)
1183
+ return linelist
1184
+
1185
+
1186
+ class WavelengthCalibrationFinalize(Step):
1187
+ """Perform wavelength calibration"""
1188
+
1189
+ def __init__(self, *args, **config):
1190
+ super().__init__(*args, **config)
1191
+ self._dependsOn += ["wavecal_master", "wavecal_init"]
1192
+
1193
+ #:tuple(int, int): Polynomial degree of the wavelength calibration in order, column direction
1194
+ self.degree = config["degree"]
1195
+ #:bool: Whether to use manual alignment instead of cross correlation
1196
+ self.manual = config["manual"]
1197
+ #:float: residual threshold in m/s
1198
+ self.threshold = config["threshold"]
1199
+ #:int: Number of iterations in the remove lines, auto id cycle
1200
+ self.iterations = config["iterations"]
1201
+ #:{'1D', '2D'}: Whether to use 1d or 2d polynomials
1202
+ self.dimensionality = config["dimensionality"]
1203
+ #:int: Number of detector offset steps, due to detector design
1204
+ self.nstep = config["nstep"]
1205
+ #:int: How many columns to use in the 2D cross correlation alignment. 0 means all pixels (slow).
1206
+ self.correlate_cols = config["correlate_cols"]
1207
+ #:float: fraction of columns, to allow individual orders to shift
1208
+ self.shift_window = config["shift_window"]
1209
+ #:str: elements of the spectral lamp
1210
+ self.element = config["element"]
1211
+ #:str: medium of the detector, vac or air
1212
+ self.medium = config["medium"]
1213
+
1214
+ @property
1215
+ def savefile(self):
1216
+ """str: Name of the wavelength echelle file"""
1217
+ return join(self.output_dir, self.prefix + ".thar.npz")
1218
+
1219
+ def run(self, wavecal_master, wavecal_init):
1220
+ """Perform wavelength calibration
1221
+
1222
+ This consists of extracting the wavelength image
1223
+ and fitting a polynomial the the known spectral lines
1224
+
1225
+ Parameters
1226
+ ----------
1227
+ wavecal_master : tuple
1228
+ results of the wavecal_master step, containing the master wavecal image
1229
+ and its header
1230
+ wavecal_init : LineList
1231
+ the initial LineList guess with the positions and wavelengths of lines
1232
+
1233
+ Returns
1234
+ -------
1235
+ wave : array of shape (nord, ncol)
1236
+ wavelength for each point in the spectrum
1237
+ coef : array of shape (*ndegrees,)
1238
+ polynomial coefficients of the wavelength fit
1239
+ linelist : record array of shape (nlines,)
1240
+ Updated line information for all lines
1241
+ """
1242
+ thar, thead = wavecal_master
1243
+ linelist = wavecal_init
1244
+
1245
+ module = WavelengthCalibrationModule(
1246
+ plot=self.plot,
1247
+ plot_title=self.plot_title,
1248
+ manual=self.manual,
1249
+ degree=self.degree,
1250
+ threshold=self.threshold,
1251
+ iterations=self.iterations,
1252
+ dimensionality=self.dimensionality,
1253
+ nstep=self.nstep,
1254
+ correlate_cols=self.correlate_cols,
1255
+ shift_window=self.shift_window,
1256
+ element=self.element,
1257
+ medium=self.medium,
1258
+ )
1259
+ wave, coef, linelist = module.execute(thar, linelist)
1260
+ self.save(wave, coef, linelist)
1261
+ return wave, coef, linelist
1262
+
1263
+ def save(self, wave, coef, linelist):
1264
+ """Save the results of the wavelength calibration
1265
+
1266
+ Parameters
1267
+ ----------
1268
+ wave : array of shape (nord, ncol)
1269
+ wavelength for each point in the spectrum
1270
+ coef : array of shape (ndegrees,)
1271
+ polynomial coefficients of the wavelength fit
1272
+ linelist : record array of shape (nlines,)
1273
+ Updated line information for all lines
1274
+ """
1275
+ np.savez(self.savefile, wave=wave, coef=coef, linelist=linelist)
1276
+ logger.info("Created wavelength calibration file: %s", self.savefile)
1277
+
1278
+ def load(self):
1279
+ """Load the results of the wavelength calibration
1280
+
1281
+ Returns
1282
+ -------
1283
+ wave : array of shape (nord, ncol)
1284
+ wavelength for each point in the spectrum
1285
+ coef : array of shape (*ndegrees,)
1286
+ polynomial coefficients of the wavelength fit
1287
+ linelist : record array of shape (nlines,)
1288
+ Updated line information for all lines
1289
+ """
1290
+ data = np.load(self.savefile, allow_pickle=True)
1291
+ logger.info("Wavelength calibration file: %s", self.savefile)
1292
+ wave = data["wave"]
1293
+ coef = data["coef"]
1294
+ linelist = data["linelist"]
1295
+ return wave, coef, linelist
1296
+
1297
+
1298
+ class LaserFrequencyCombMaster(CalibrationStep, ExtractionStep):
1299
+ """Create a laser frequency comb (or similar) master image"""
1300
+
1301
+ def __init__(self, *args, **config):
1302
+ super().__init__(*args, **config)
1303
+ self._dependsOn += ["norm_flat", "curvature"]
1304
+
1305
+ @property
1306
+ def savefile(self):
1307
+ """str: Name of the wavelength echelle file"""
1308
+ return join(self.output_dir, self.prefix + ".comb_master.fits")
1309
+
1310
+ def run(self, files, orders, mask, curvature, bias, norm_flat):
1311
+ """Improve the wavelength calibration with a laser frequency comb (or similar)
1312
+
1313
+ Parameters
1314
+ ----------
1315
+ files : list(str)
1316
+ observation files
1317
+ orders : tuple
1318
+ results from the order tracing step
1319
+ mask : array of shape (nrow, ncol)
1320
+ Bad pixel mask
1321
+ curvature : tuple
1322
+ results from the curvature step
1323
+ bias : tuple
1324
+ results from the bias step
1325
+
1326
+ Returns
1327
+ -------
1328
+ comb : array of shape (nord, ncol)
1329
+ extracted frequency comb image
1330
+ chead : Header
1331
+ FITS header of the combined image
1332
+ """
1333
+
1334
+ if len(files) == 0:
1335
+ raise FileNotFoundError("No files for Laser Frequency Comb found")
1336
+ logger.info("Frequency comb files: %s", files)
1337
+
1338
+ # Combine the input files and calibrate
1339
+ orig, chead = self.calibrate(files, mask, bias, norm_flat)
1340
+ # Extract the spectrum
1341
+ comb, _, _, _ = self.extract(orig, chead, orders, curvature)
1342
+ self.save(comb, chead)
1343
+ return comb, chead
1344
+
1345
+ def save(self, comb, chead):
1346
+ """Save the master comb to a FITS file
1347
+
1348
+ Parameters
1349
+ ----------
1350
+ comb : array of shape (nrow, ncol)
1351
+ master comb data
1352
+ chead : FITS header
1353
+ master comb header
1354
+ """
1355
+ comb = np.asarray(comb, dtype=np.float64)
1356
+ fits.writeto(
1357
+ self.savefile,
1358
+ data=comb,
1359
+ header=chead,
1360
+ overwrite=True,
1361
+ output_verify="silentfix+ignore",
1362
+ )
1363
+ logger.info("Created frequency comb master spectrum: %s", self.savefile)
1364
+
1365
+ def load(self):
1366
+ """Load master comb from disk
1367
+
1368
+ Returns
1369
+ -------
1370
+ comb : masked array of shape (nrow, ncol)
1371
+ Master comb with bad pixel map applied
1372
+ chead : FITS header
1373
+ Master comb FITS header
1374
+ """
1375
+ with fits.open(self.savefile, memmap=False) as hdu:
1376
+ comb, chead = hdu[0].data, hdu[0].header
1377
+ logger.info("Frequency comb master spectrum: %s", self.savefile)
1378
+ return comb, chead
1379
+
1380
+
1381
+ class LaserFrequencyCombFinalize(Step):
1382
+ """Improve the precision of the wavelength calibration with a laser frequency comb"""
1383
+
1384
+ def __init__(self, *args, **config):
1385
+ super().__init__(*args, **config)
1386
+ self._dependsOn += ["freq_comb_master", "wavecal"]
1387
+ self._loadDependsOn += ["wavecal"]
1388
+
1389
+ #:tuple(int, int): polynomial degree of the wavelength fit
1390
+ self.degree = config["degree"]
1391
+ #:float: residual threshold in m/s above which to remove lines
1392
+ self.threshold = config["threshold"]
1393
+ #:{'1D', '2D'}: Whether to use 1D or 2D polynomials
1394
+ self.dimensionality = config["dimensionality"]
1395
+ self.nstep = config["nstep"]
1396
+ #:int: Width of the peaks for finding them in the spectrum
1397
+ self.lfc_peak_width = config["lfc_peak_width"]
1398
+
1399
+ @property
1400
+ def savefile(self):
1401
+ """str: Name of the wavelength echelle file"""
1402
+ return join(self.output_dir, self.prefix + ".comb.npz")
1403
+
1404
+ def run(self, freq_comb_master, wavecal):
1405
+ """Improve the wavelength calibration with a laser frequency comb (or similar)
1406
+
1407
+ Parameters
1408
+ ----------
1409
+ files : list(str)
1410
+ observation files
1411
+ wavecal : tuple()
1412
+ results from the wavelength calibration step
1413
+ orders : tuple
1414
+ results from the order tracing step
1415
+ mask : array of shape (nrow, ncol)
1416
+ Bad pixel mask
1417
+
1418
+ Returns
1419
+ -------
1420
+ wave : array of shape (nord, ncol)
1421
+ improved wavelength solution
1422
+ comb : array of shape (nord, ncol)
1423
+ extracted frequency comb image
1424
+ """
1425
+ comb, chead = freq_comb_master
1426
+ wave, coef, linelist = wavecal
1427
+
1428
+ module = WavelengthCalibrationComb(
1429
+ plot=self.plot,
1430
+ plot_title=self.plot_title,
1431
+ degree=self.degree,
1432
+ threshold=self.threshold,
1433
+ dimensionality=self.dimensionality,
1434
+ nstep=self.nstep,
1435
+ lfc_peak_width=self.lfc_peak_width,
1436
+ )
1437
+ wave = module.execute(comb, wave, linelist)
1438
+
1439
+ self.save(wave)
1440
+ return wave
1441
+
1442
+ def save(self, wave):
1443
+ """Save the results of the frequency comb improvement
1444
+
1445
+ Parameters
1446
+ ----------
1447
+ wave : array of shape (nord, ncol)
1448
+ improved wavelength solution
1449
+ """
1450
+ np.savez(self.savefile, wave=wave)
1451
+ logger.info("Created frequency comb wavecal file: %s", self.savefile)
1452
+
1453
+ def load(self, wavecal):
1454
+ """Load the results of the frequency comb improvement if possible,
1455
+ otherwise just use the normal wavelength solution
1456
+
1457
+ Parameters
1458
+ ----------
1459
+ wavecal : tuple
1460
+ results from the wavelength calibration step
1461
+
1462
+ Returns
1463
+ -------
1464
+ wave : array of shape (nord, ncol)
1465
+ improved wavelength solution
1466
+ comb : array of shape (nord, ncol)
1467
+ extracted frequency comb image
1468
+ """
1469
+ try:
1470
+ data = np.load(self.savefile, allow_pickle=True)
1471
+ logger.info("Frequency comb wavecal file: %s", self.savefile)
1472
+ except FileNotFoundError:
1473
+ logger.warning(
1474
+ "No data for Laser Frequency Comb found, using regular wavelength calibration instead"
1475
+ )
1476
+ wave, coef, linelist = wavecal
1477
+ data = {"wave": wave}
1478
+ wave = data["wave"]
1479
+ return wave
1480
+
1481
+
1482
+ class SlitCurvatureDetermination(CalibrationStep, ExtractionStep):
1483
+ """Determine the curvature of the slit"""
1484
+
1485
+ def __init__(self, *args, **config):
1486
+ super().__init__(*args, **config)
1487
+
1488
+ #:float: how many sigma of bad lines to cut away
1489
+ self.sigma_cutoff = config["curvature_cutoff"]
1490
+ #:float: width of the orders in the extraction
1491
+ self.extraction_width = config["extraction_width"]
1492
+ #:int: Polynomial degree of the overall fit
1493
+ self.fit_degree = config["degree"]
1494
+ #:int: Orders of the curvature to fit, currently supports only 1 and 2
1495
+ self.curv_degree = config["curv_degree"]
1496
+ #:{'1D', '2D'}: Whether to use 1d or 2d polynomials
1497
+ self.curvature_mode = config["dimensionality"]
1498
+ #:float: peak finding noise threshold
1499
+ self.peak_threshold = config["peak_threshold"]
1500
+ #:int: peak width
1501
+ self.peak_width = config["peak_width"]
1502
+ #:float: window width to search for peak in each row
1503
+ self.window_width = config["window_width"]
1504
+ #:str: Function shape that is fit to individual peaks
1505
+ self.peak_function = config["peak_function"]
1506
+
1507
+ @property
1508
+ def savefile(self):
1509
+ """str: Name of the tilt/shear save file"""
1510
+ return join(self.output_dir, self.prefix + ".shear.npz")
1511
+
1512
+ def run(self, files, orders, mask, bias):
1513
+ """Determine the curvature of the slit
1514
+
1515
+ Parameters
1516
+ ----------
1517
+ files : list(str)
1518
+ files to use for this
1519
+ orders : tuple
1520
+ results of the order tracing
1521
+ mask : array of shape (nrow, ncol)
1522
+ Bad pixel mask
1523
+
1524
+ Returns
1525
+ -------
1526
+ tilt : array of shape (nord, ncol)
1527
+ first order slit curvature at each point
1528
+ shear : array of shape (nord, ncol)
1529
+ second order slit curvature at each point
1530
+ """
1531
+
1532
+ logger.info("Slit curvature files: %s", files)
1533
+
1534
+ orig, thead = self.calibrate(files, mask, bias, None)
1535
+ extracted, _, _, _ = self.extract(orig, thead, orders, None)
1536
+
1537
+ orders, column_range = orders
1538
+ module = CurvatureModule(
1539
+ orders,
1540
+ column_range=column_range,
1541
+ extraction_width=self.extraction_width,
1542
+ order_range=self.order_range,
1543
+ fit_degree=self.fit_degree,
1544
+ curv_degree=self.curv_degree,
1545
+ sigma_cutoff=self.sigma_cutoff,
1546
+ mode=self.curvature_mode,
1547
+ peak_threshold=self.peak_threshold,
1548
+ peak_width=self.peak_width,
1549
+ window_width=self.window_width,
1550
+ peak_function=self.peak_function,
1551
+ plot=self.plot,
1552
+ plot_title=self.plot_title,
1553
+ )
1554
+ tilt, shear = module.execute(extracted, orig)
1555
+ self.save(tilt, shear)
1556
+ return tilt, shear
1557
+
1558
+ def save(self, tilt, shear):
1559
+ """Save results from the curvature
1560
+
1561
+ Parameters
1562
+ ----------
1563
+ tilt : array of shape (nord, ncol)
1564
+ first order slit curvature at each point
1565
+ shear : array of shape (nord, ncol)
1566
+ second order slit curvature at each point
1567
+ """
1568
+ np.savez(self.savefile, tilt=tilt, shear=shear)
1569
+ logger.info("Created slit curvature file: %s", self.savefile)
1570
+
1571
+ def load(self):
1572
+ """Load the curvature if possible, otherwise return None, None, i.e. use vertical extraction
1573
+
1574
+ Returns
1575
+ -------
1576
+ tilt : array of shape (nord, ncol)
1577
+ first order slit curvature at each point
1578
+ shear : array of shape (nord, ncol)
1579
+ second order slit curvature at each point
1580
+ """
1581
+ try:
1582
+ data = np.load(self.savefile, allow_pickle=True)
1583
+ logger.info("Slit curvature file: %s", self.savefile)
1584
+ except FileNotFoundError:
1585
+ logger.warning("No data for slit curvature found, setting it to 0.")
1586
+ data = {"tilt": None, "shear": None}
1587
+
1588
+ tilt = data["tilt"]
1589
+ shear = data["shear"]
1590
+ return tilt, shear
1591
+
1592
+
1593
+ class RectifyImage(Step):
1594
+ """Create a 2D image of the rectified orders"""
1595
+
1596
+ def __init__(self, *args, **config):
1597
+ super().__init__(*args, **config)
1598
+ self._dependsOn += ["files", "orders", "curvature", "mask", "freq_comb"]
1599
+ # self._loadDependsOn += []
1600
+
1601
+ self.extraction_width = config["extraction_width"]
1602
+ self.input_files = config["input_files"]
1603
+
1604
+ def filename(self, name):
1605
+ return util.swap_extension(name, ".rectify.fits", path=self.output_dir)
1606
+
1607
+ def run(self, files, orders, curvature, mask, freq_comb):
1608
+ orders, column_range = orders
1609
+ tilt, shear = curvature
1610
+ wave = freq_comb
1611
+
1612
+ files = files[self.input_files]
1613
+
1614
+ rectified = {}
1615
+ for fname in tqdm(files, desc="Files"):
1616
+ img, head = self.instrument.load_fits(
1617
+ fname, self.arm, mask=mask, dtype="f8"
1618
+ )
1619
+
1620
+ images, cr, xwd = rectify_image(
1621
+ img,
1622
+ orders,
1623
+ column_range,
1624
+ self.extraction_width,
1625
+ self.order_range,
1626
+ tilt,
1627
+ shear,
1628
+ )
1629
+ wavelength, image = merge_images(images, wave, cr, xwd)
1630
+
1631
+ self.save(fname, image, wavelength, header=head)
1632
+ rectified[fname] = (wavelength, image)
1633
+
1634
+ return rectified
1635
+
1636
+ def save(self, fname, image, wavelength, header=None):
1637
+ # Change filename
1638
+ fname = self.filename(fname)
1639
+ # Create HDU List, one extension per order
1640
+ primary = fits.PrimaryHDU(header=header)
1641
+ secondary = fits.ImageHDU(data=image)
1642
+ column = fits.Column(name="wavelength", array=wavelength, format="D")
1643
+ tertiary = fits.BinTableHDU.from_columns([column])
1644
+ hdus = fits.HDUList([primary, secondary, tertiary])
1645
+ # Save data to file
1646
+ hdus.writeto(fname, overwrite=True, output_verify="silentfix")
1647
+
1648
+ def load(self, files):
1649
+ files = files[self.input_files]
1650
+
1651
+ rectified = {}
1652
+ for orig_fname in files:
1653
+ fname = self.filename(orig_fname)
1654
+ with fits.open(fname, memmap=False) as hdu:
1655
+ img = hdu[1].data
1656
+ wave = hdu[2].data["wavelength"]
1657
+ rectified[orig_fname] = (wave, img)
1658
+
1659
+ return rectified
1660
+
1661
+
1662
+ class ScienceExtraction(CalibrationStep, ExtractionStep):
1663
+ """Extract the science spectra"""
1664
+
1665
+ def __init__(self, *args, **config):
1666
+ super().__init__(*args, **config)
1667
+ self._dependsOn += ["norm_flat", "curvature", "scatter"]
1668
+ self._loadDependsOn += ["files"]
1669
+
1670
+ def science_file(self, name):
1671
+ """Name of the science file in disk, based on the input file
1672
+
1673
+ Parameters
1674
+ ----------
1675
+ name : str
1676
+ name of the observation file
1677
+
1678
+ Returns
1679
+ -------
1680
+ name : str
1681
+ science file name
1682
+ """
1683
+ return util.swap_extension(name, ".science.fits", path=self.output_dir)
1684
+
1685
+ def run(self, files, bias, orders, norm_flat, curvature, scatter, mask):
1686
+ """Extract Science spectra from observation
1687
+
1688
+ Parameters
1689
+ ----------
1690
+ files : list(str)
1691
+ list of observations
1692
+ bias : tuple
1693
+ results from master bias step
1694
+ orders : tuple
1695
+ results from order tracing step
1696
+ norm_flat : tuple
1697
+ results from flat normalization
1698
+ curvature : tuple
1699
+ results from slit curvature step
1700
+ mask : array of shape (nrow, ncol)
1701
+ bad pixel map
1702
+
1703
+ Returns
1704
+ -------
1705
+ heads : list(FITS header)
1706
+ FITS headers of each observation
1707
+ specs : list(array of shape (nord, ncol))
1708
+ extracted spectra
1709
+ sigmas : list(array of shape (nord, ncol))
1710
+ uncertainties of the extracted spectra
1711
+ slitfu: list(array of shape (nord, (extr_height*oversample+1)+1)
1712
+ slit illumination function
1713
+ columns : list(array of shape (nord, 2))
1714
+ column ranges for each spectra
1715
+ """
1716
+ heads, specs, sigmas, slitfus, columns = [], [], [], [], []
1717
+ for fname in tqdm(files, desc="Files"):
1718
+ logger.info("Science file: %s", fname)
1719
+ # Calibrate the input image
1720
+ im, head = self.calibrate([fname], mask, bias, norm_flat)
1721
+ # Optimally extract science spectrum
1722
+ spec, sigma, slitfu, cr = self.extract(
1723
+ im, head, orders, curvature, scatter=scatter
1724
+ )
1725
+
1726
+ # make slitfus from swaths into one
1727
+ # print(len(slitfu),[len(sf) for sf in slitfu])
1728
+ # slitfu = np.median(np.array(slitfu),axis=0)
1729
+ # save spectrum to disk
1730
+ self.save(fname, head, spec, sigma, slitfu, cr)
1731
+ heads.append(head)
1732
+ specs.append(spec)
1733
+ sigmas.append(sigma)
1734
+ slitfus.append(slitfu)
1735
+ columns.append(cr)
1736
+
1737
+ return heads, specs, sigmas, slitfus, columns
1738
+
1739
+ def save(self, fname, head, spec, sigma, slitfu, column_range):
1740
+ """Save the results of one extraction
1741
+
1742
+ Parameters
1743
+ ----------
1744
+ fname : str
1745
+ filename to save to
1746
+ head : FITS header
1747
+ FITS header
1748
+ spec : array of shape (nord, ncol)
1749
+ extracted spectrum
1750
+ sigma : array of shape (nord, ncol)
1751
+ uncertainties of the extracted spectrum
1752
+ slitfu: list(array of shape (nord, (extr_height*oversample+1)+1)
1753
+ slit illumination function
1754
+ column_range : array of shape (nord, 2)
1755
+ range of columns that have spectrum
1756
+ """
1757
+ nameout = self.science_file(fname)
1758
+ echelle.save(
1759
+ nameout, head, spec=spec, sig=sigma, slitfu=slitfu, columns=column_range
1760
+ )
1761
+ logger.info("Created science file: %s", nameout)
1762
+
1763
+ def load(self, files):
1764
+ """Load all science spectra from disk
1765
+
1766
+ Returns
1767
+ -------
1768
+ heads : list(FITS header)
1769
+ FITS headers of each observation
1770
+ specs : list(array of shape (nord, ncol))
1771
+ extracted spectra
1772
+ sigmas : list(array of shape (nord, ncol))
1773
+ uncertainties of the extracted spectra
1774
+ columns : list(array of shape (nord, 2))
1775
+ column ranges for each spectra
1776
+ """
1777
+ files = files["science"]
1778
+ files = [self.science_file(fname) for fname in files]
1779
+
1780
+ if len(files) == 0:
1781
+ raise FileNotFoundError("Science files are required to load them")
1782
+
1783
+ logger.info("Science files: %s", files)
1784
+
1785
+ heads, specs, sigmas, columns = [], [], [], []
1786
+ for fname in files:
1787
+ # fname = join(self.output_dir, fname)
1788
+ science = echelle.read(
1789
+ fname,
1790
+ continuum_normalization=False,
1791
+ barycentric_correction=False,
1792
+ radial_velociy_correction=False,
1793
+ )
1794
+ heads.append(science.header)
1795
+ specs.append(science["spec"])
1796
+ sigmas.append(science["sig"])
1797
+ columns.append(science["columns"])
1798
+
1799
+ return heads, specs, sigmas, None, columns
1800
+
1801
+
1802
+ class ContinuumNormalization(Step):
1803
+ """Determine the continuum to each observation"""
1804
+
1805
+ def __init__(self, *args, **config):
1806
+ super().__init__(*args, **config)
1807
+ self._dependsOn += ["science", "freq_comb", "norm_flat"]
1808
+ self._loadDependsOn += ["norm_flat", "science"]
1809
+
1810
+ @property
1811
+ def savefile(self):
1812
+ """str: savefile name"""
1813
+ return join(self.output_dir, self.prefix + ".cont.npz")
1814
+
1815
+ def run(self, science, freq_comb, norm_flat):
1816
+ """Determine the continuum to each observation
1817
+ Also splices the orders together
1818
+
1819
+ Parameters
1820
+ ----------
1821
+ science : tuple
1822
+ results from science step
1823
+ freq_comb : tuple
1824
+ results from freq_comb step (or wavecal if those don't exist)
1825
+ norm_flat : tuple
1826
+ results from the normalized flatfield step
1827
+
1828
+ Returns
1829
+ -------
1830
+ heads : list(FITS header)
1831
+ FITS headers of each observation
1832
+ specs : list(array of shape (nord, ncol))
1833
+ extracted spectra
1834
+ sigmas : list(array of shape (nord, ncol))
1835
+ uncertainties of the extracted spectra
1836
+ conts : list(array of shape (nord, ncol))
1837
+ continuum for each spectrum
1838
+ columns : list(array of shape (nord, 2))
1839
+ column ranges for each spectra
1840
+ """
1841
+ wave = freq_comb
1842
+ heads, specs, sigmas, _, columns = science
1843
+ norm, blaze = norm_flat
1844
+
1845
+ logger.info("Continuum normalization")
1846
+ conts = [None for _ in specs]
1847
+ for j, (spec, sigma) in enumerate(zip(specs, sigmas, strict=False)):
1848
+ logger.info("Splicing orders")
1849
+ specs[j], wave, blaze, sigmas[j] = splice_orders(
1850
+ spec,
1851
+ wave,
1852
+ blaze,
1853
+ sigma,
1854
+ scaling=True,
1855
+ plot=self.plot,
1856
+ plot_title=self.plot_title,
1857
+ )
1858
+ logger.info("Normalizing continuum")
1859
+ conts[j] = continuum_normalize(
1860
+ specs[j],
1861
+ wave,
1862
+ blaze,
1863
+ sigmas[j],
1864
+ plot=self.plot,
1865
+ plot_title=self.plot_title,
1866
+ )
1867
+
1868
+ self.save(heads, specs, sigmas, conts, columns)
1869
+ return heads, specs, sigmas, conts, columns
1870
+
1871
+ def save(self, heads, specs, sigmas, conts, columns):
1872
+ """Save the results from the continuum normalization
1873
+
1874
+ Parameters
1875
+ ----------
1876
+ heads : list(FITS header)
1877
+ FITS headers of each observation
1878
+ specs : list(array of shape (nord, ncol))
1879
+ extracted spectra
1880
+ sigmas : list(array of shape (nord, ncol))
1881
+ uncertainties of the extracted spectra
1882
+ conts : list(array of shape (nord, ncol))
1883
+ continuum for each spectrum
1884
+ columns : list(array of shape (nord, 2))
1885
+ column ranges for each spectra
1886
+ """
1887
+ value = {
1888
+ "heads": heads,
1889
+ "specs": specs,
1890
+ "sigmas": sigmas,
1891
+ "conts": conts,
1892
+ "columns": columns,
1893
+ }
1894
+ joblib.dump(value, self.savefile)
1895
+ logger.info("Created continuum normalization file: %s", self.savefile)
1896
+
1897
+ def load(self, norm_flat, science):
1898
+ """Load the results from the continuum normalization
1899
+
1900
+ Returns
1901
+ -------
1902
+ heads : list(FITS header)
1903
+ FITS headers of each observation
1904
+ specs : list(array of shape (nord, ncol))
1905
+ extracted spectra
1906
+ sigmas : list(array of shape (nord, ncol))
1907
+ uncertainties of the extracted spectra
1908
+ conts : list(array of shape (nord, ncol))
1909
+ continuum for each spectrum
1910
+ columns : list(array of shape (nord, 2))
1911
+ column ranges for each spectra
1912
+ """
1913
+ try:
1914
+ data = joblib.load(self.savefile)
1915
+ logger.info("Continuum normalization file: %s", self.savefile)
1916
+ except FileNotFoundError:
1917
+ # Use science files instead
1918
+ logger.warning(
1919
+ "No continuum normalized data found. Using unnormalized results instead."
1920
+ )
1921
+ heads, specs, sigmas, columns = science
1922
+ norm, blaze = norm_flat
1923
+ conts = [blaze for _ in specs]
1924
+ data = {
1925
+ "heads": heads,
1926
+ "specs": specs,
1927
+ "sigmas": sigmas,
1928
+ "conts": conts,
1929
+ "columns": columns,
1930
+ }
1931
+ heads = data["heads"]
1932
+ specs = data["specs"]
1933
+ sigmas = data["sigmas"]
1934
+ conts = data["conts"]
1935
+ columns = data["columns"]
1936
+ return heads, specs, sigmas, conts, columns
1937
+
1938
+
1939
+ class Finalize(Step):
1940
+ """Create the final output files"""
1941
+
1942
+ def __init__(self, *args, **config):
1943
+ super().__init__(*args, **config)
1944
+ self._dependsOn += ["continuum", "freq_comb", "config"]
1945
+ self.filename = config["filename"]
1946
+
1947
+ def output_file(self, number, name):
1948
+ """str: output file name"""
1949
+ out = self.filename.format(
1950
+ instrument=self.instrument.name,
1951
+ night=self.night,
1952
+ arm=self.arm,
1953
+ number=number,
1954
+ input=name,
1955
+ )
1956
+ return join(self.output_dir, out)
1957
+
1958
+ def save_config_to_header(self, head, config, prefix="PR"):
1959
+ for key, value in config.items():
1960
+ if isinstance(value, dict):
1961
+ head = self.save_config_to_header(
1962
+ head, value, prefix=f"{prefix} {key.upper()}"
1963
+ )
1964
+ else:
1965
+ if key in ["plot", "$schema", "__skip_existing__"]:
1966
+ # Skip values that are not relevant to the file product
1967
+ continue
1968
+ if value is None:
1969
+ value = "null"
1970
+ elif not np.isscalar(value):
1971
+ value = str(value)
1972
+ head[f"HIERARCH {prefix} {key.upper()}"] = value
1973
+ return head
1974
+
1975
+ def run(self, continuum, freq_comb, config):
1976
+ """Create the final output files
1977
+
1978
+ this is includes:
1979
+ - heliocentric corrections
1980
+ - creating one echelle file
1981
+
1982
+ Parameters
1983
+ ----------
1984
+ continuum : tuple
1985
+ results from the continuum normalization
1986
+ freq_comb : tuple
1987
+ results from the frequency comb step (or wavelength calibration)
1988
+ """
1989
+ heads, specs, sigmas, conts, columns = continuum
1990
+ wave = freq_comb
1991
+
1992
+ fnames = []
1993
+ # Combine science with wavecal and continuum
1994
+ for i, (head, spec, sigma, blaze, column) in enumerate(
1995
+ zip(heads, specs, sigmas, conts, columns, strict=False)
1996
+ ):
1997
+ head["e_erscle"] = ("absolute", "error scale")
1998
+
1999
+ # Add heliocentric correction
2000
+ try:
2001
+ rv_corr, bjd = util.helcorr(
2002
+ head["e_obslon"],
2003
+ head["e_obslat"],
2004
+ head["e_obsalt"],
2005
+ head["e_ra"],
2006
+ head["e_dec"],
2007
+ head["e_jd"],
2008
+ )
2009
+
2010
+ logger.debug("Heliocentric correction: %f km/s", rv_corr)
2011
+ logger.debug("Heliocentric Julian Date: %s", str(bjd))
2012
+ except KeyError:
2013
+ logger.warning("Could not calculate heliocentric correction")
2014
+ # logger.warning("Telescope is in space?")
2015
+ rv_corr = 0
2016
+ bjd = head["e_jd"]
2017
+
2018
+ head["barycorr"] = rv_corr
2019
+ head["e_jd"] = bjd
2020
+ head["HIERARCH PR_version"] = __version__
2021
+
2022
+ head = self.save_config_to_header(head, config)
2023
+
2024
+ if self.plot:
2025
+ plt.plot(wave.T, (spec / blaze).T)
2026
+ if self.plot_title is not None:
2027
+ plt.title(self.plot_title)
2028
+ util.show_or_save(f"finalize_{i}")
2029
+
2030
+ fname = self.save(i, head, spec, sigma, blaze, wave, column)
2031
+ fnames.append(fname)
2032
+ return fnames
2033
+
2034
+ def save(self, i, head, spec, sigma, cont, wave, columns):
2035
+ """Save one output spectrum to disk
2036
+
2037
+ Parameters
2038
+ ----------
2039
+ i : int
2040
+ individual number of each file
2041
+ head : FITS header
2042
+ FITS header
2043
+ spec : array of shape (nord, ncol)
2044
+ final spectrum
2045
+ sigma : array of shape (nord, ncol)
2046
+ final uncertainties
2047
+ cont : array of shape (nord, ncol)
2048
+ final continuum scales
2049
+ wave : array of shape (nord, ncol)
2050
+ wavelength solution
2051
+ columns : array of shape (nord, 2)
2052
+ columns that carry signal
2053
+
2054
+ Returns
2055
+ -------
2056
+ out_file : str
2057
+ name of the output file
2058
+ """
2059
+ original_name = os.path.splitext(head["e_input"])[0]
2060
+ out_file = self.output_file(i, original_name)
2061
+ echelle.save(
2062
+ out_file, head, spec=spec, sig=sigma, cont=cont, wave=wave, columns=columns
2063
+ )
2064
+ logger.info("Final science file: %s", out_file)
2065
+ return out_file