pyreduce-astro 0.6.0b5__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (158) hide show
  1. pyreduce/__init__.py +67 -0
  2. pyreduce/__main__.py +106 -0
  3. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.exp +0 -0
  4. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.lib +0 -0
  5. pyreduce/clib/Release/_slitfunc_2d.obj +0 -0
  6. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.exp +0 -0
  7. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.lib +0 -0
  8. pyreduce/clib/Release/_slitfunc_bd.obj +0 -0
  9. pyreduce/clib/__init__.py +0 -0
  10. pyreduce/clib/_slitfunc_2d.cp311-win_amd64.pyd +0 -0
  11. pyreduce/clib/_slitfunc_bd.cp311-win_amd64.pyd +0 -0
  12. pyreduce/clib/build_extract.py +75 -0
  13. pyreduce/clib/slit_func_2d_xi_zeta_bd.c +1313 -0
  14. pyreduce/clib/slit_func_2d_xi_zeta_bd.h +55 -0
  15. pyreduce/clib/slit_func_bd.c +362 -0
  16. pyreduce/clib/slit_func_bd.h +17 -0
  17. pyreduce/clipnflip.py +147 -0
  18. pyreduce/combine_frames.py +855 -0
  19. pyreduce/configuration.py +186 -0
  20. pyreduce/continuum_normalization.py +329 -0
  21. pyreduce/cwrappers.py +404 -0
  22. pyreduce/datasets.py +231 -0
  23. pyreduce/echelle.py +413 -0
  24. pyreduce/estimate_background_scatter.py +129 -0
  25. pyreduce/extract.py +1361 -0
  26. pyreduce/extraction_width.py +77 -0
  27. pyreduce/instruments/__init__.py +0 -0
  28. pyreduce/instruments/andes.json +61 -0
  29. pyreduce/instruments/andes.py +102 -0
  30. pyreduce/instruments/common.json +46 -0
  31. pyreduce/instruments/common.py +675 -0
  32. pyreduce/instruments/crires_plus.json +63 -0
  33. pyreduce/instruments/crires_plus.py +103 -0
  34. pyreduce/instruments/filters.py +195 -0
  35. pyreduce/instruments/harpn.json +136 -0
  36. pyreduce/instruments/harpn.py +201 -0
  37. pyreduce/instruments/harps.json +155 -0
  38. pyreduce/instruments/harps.py +310 -0
  39. pyreduce/instruments/instrument_info.py +140 -0
  40. pyreduce/instruments/instrument_schema.json +221 -0
  41. pyreduce/instruments/jwst_miri.json +53 -0
  42. pyreduce/instruments/jwst_miri.py +29 -0
  43. pyreduce/instruments/jwst_niriss.json +52 -0
  44. pyreduce/instruments/jwst_niriss.py +98 -0
  45. pyreduce/instruments/lick_apf.json +53 -0
  46. pyreduce/instruments/lick_apf.py +35 -0
  47. pyreduce/instruments/mcdonald.json +59 -0
  48. pyreduce/instruments/mcdonald.py +123 -0
  49. pyreduce/instruments/metis_ifu.json +63 -0
  50. pyreduce/instruments/metis_ifu.py +45 -0
  51. pyreduce/instruments/metis_lss.json +65 -0
  52. pyreduce/instruments/metis_lss.py +45 -0
  53. pyreduce/instruments/micado.json +53 -0
  54. pyreduce/instruments/micado.py +45 -0
  55. pyreduce/instruments/neid.json +51 -0
  56. pyreduce/instruments/neid.py +154 -0
  57. pyreduce/instruments/nirspec.json +56 -0
  58. pyreduce/instruments/nirspec.py +215 -0
  59. pyreduce/instruments/nte.json +47 -0
  60. pyreduce/instruments/nte.py +42 -0
  61. pyreduce/instruments/uves.json +59 -0
  62. pyreduce/instruments/uves.py +46 -0
  63. pyreduce/instruments/xshooter.json +66 -0
  64. pyreduce/instruments/xshooter.py +39 -0
  65. pyreduce/make_shear.py +606 -0
  66. pyreduce/masks/mask_crires_plus_det1.fits.gz +0 -0
  67. pyreduce/masks/mask_crires_plus_det2.fits.gz +0 -0
  68. pyreduce/masks/mask_crires_plus_det3.fits.gz +0 -0
  69. pyreduce/masks/mask_ctio_chiron.fits.gz +0 -0
  70. pyreduce/masks/mask_elodie.fits.gz +0 -0
  71. pyreduce/masks/mask_feros3.fits.gz +0 -0
  72. pyreduce/masks/mask_flames_giraffe.fits.gz +0 -0
  73. pyreduce/masks/mask_harps_blue.fits.gz +0 -0
  74. pyreduce/masks/mask_harps_red.fits.gz +0 -0
  75. pyreduce/masks/mask_hds_blue.fits.gz +0 -0
  76. pyreduce/masks/mask_hds_red.fits.gz +0 -0
  77. pyreduce/masks/mask_het_hrs_2x5.fits.gz +0 -0
  78. pyreduce/masks/mask_jwst_miri_lrs_slitless.fits.gz +0 -0
  79. pyreduce/masks/mask_jwst_niriss_gr700xd.fits.gz +0 -0
  80. pyreduce/masks/mask_lick_apf_.fits.gz +0 -0
  81. pyreduce/masks/mask_mcdonald.fits.gz +0 -0
  82. pyreduce/masks/mask_nes.fits.gz +0 -0
  83. pyreduce/masks/mask_nirspec_nirspec.fits.gz +0 -0
  84. pyreduce/masks/mask_sarg.fits.gz +0 -0
  85. pyreduce/masks/mask_sarg_2x2a.fits.gz +0 -0
  86. pyreduce/masks/mask_sarg_2x2b.fits.gz +0 -0
  87. pyreduce/masks/mask_subaru_hds_red.fits.gz +0 -0
  88. pyreduce/masks/mask_uves_blue.fits.gz +0 -0
  89. pyreduce/masks/mask_uves_blue_binned_2_2.fits.gz +0 -0
  90. pyreduce/masks/mask_uves_middle.fits.gz +0 -0
  91. pyreduce/masks/mask_uves_middle_2x2_split.fits.gz +0 -0
  92. pyreduce/masks/mask_uves_middle_binned_2_2.fits.gz +0 -0
  93. pyreduce/masks/mask_uves_red.fits.gz +0 -0
  94. pyreduce/masks/mask_uves_red_2x2.fits.gz +0 -0
  95. pyreduce/masks/mask_uves_red_2x2_split.fits.gz +0 -0
  96. pyreduce/masks/mask_uves_red_binned_2_2.fits.gz +0 -0
  97. pyreduce/masks/mask_xshooter_nir.fits.gz +0 -0
  98. pyreduce/rectify.py +138 -0
  99. pyreduce/reduce.py +2205 -0
  100. pyreduce/settings/settings_ANDES.json +89 -0
  101. pyreduce/settings/settings_CRIRES_PLUS.json +89 -0
  102. pyreduce/settings/settings_HARPN.json +73 -0
  103. pyreduce/settings/settings_HARPS.json +69 -0
  104. pyreduce/settings/settings_JWST_MIRI.json +55 -0
  105. pyreduce/settings/settings_JWST_NIRISS.json +55 -0
  106. pyreduce/settings/settings_LICK_APF.json +62 -0
  107. pyreduce/settings/settings_MCDONALD.json +58 -0
  108. pyreduce/settings/settings_METIS_IFU.json +77 -0
  109. pyreduce/settings/settings_METIS_LSS.json +77 -0
  110. pyreduce/settings/settings_MICADO.json +78 -0
  111. pyreduce/settings/settings_NEID.json +73 -0
  112. pyreduce/settings/settings_NIRSPEC.json +58 -0
  113. pyreduce/settings/settings_NTE.json +60 -0
  114. pyreduce/settings/settings_UVES.json +54 -0
  115. pyreduce/settings/settings_XSHOOTER.json +78 -0
  116. pyreduce/settings/settings_pyreduce.json +178 -0
  117. pyreduce/settings/settings_schema.json +827 -0
  118. pyreduce/tools/__init__.py +0 -0
  119. pyreduce/tools/combine.py +117 -0
  120. pyreduce/trace_orders.py +645 -0
  121. pyreduce/util.py +1288 -0
  122. pyreduce/wavecal/MICADO_HK_3arcsec_chip5.npz +0 -0
  123. pyreduce/wavecal/atlas/thar.fits +4946 -13
  124. pyreduce/wavecal/atlas/thar_list.txt +4172 -0
  125. pyreduce/wavecal/atlas/une.fits +0 -0
  126. pyreduce/wavecal/convert.py +38 -0
  127. pyreduce/wavecal/crires_plus_J1228_Open_det1.npz +0 -0
  128. pyreduce/wavecal/crires_plus_J1228_Open_det2.npz +0 -0
  129. pyreduce/wavecal/crires_plus_J1228_Open_det3.npz +0 -0
  130. pyreduce/wavecal/harpn_harpn_2D.npz +0 -0
  131. pyreduce/wavecal/harps_blue_2D.npz +0 -0
  132. pyreduce/wavecal/harps_blue_pol_2D.npz +0 -0
  133. pyreduce/wavecal/harps_red_2D.npz +0 -0
  134. pyreduce/wavecal/harps_red_pol_2D.npz +0 -0
  135. pyreduce/wavecal/mcdonald.npz +0 -0
  136. pyreduce/wavecal/metis_lss_l_2D.npz +0 -0
  137. pyreduce/wavecal/metis_lss_m_2D.npz +0 -0
  138. pyreduce/wavecal/nirspec_K2.npz +0 -0
  139. pyreduce/wavecal/uves_blue_360nm_2D.npz +0 -0
  140. pyreduce/wavecal/uves_blue_390nm_2D.npz +0 -0
  141. pyreduce/wavecal/uves_blue_437nm_2D.npz +0 -0
  142. pyreduce/wavecal/uves_middle_2x2_2D.npz +0 -0
  143. pyreduce/wavecal/uves_middle_565nm_2D.npz +0 -0
  144. pyreduce/wavecal/uves_middle_580nm_2D.npz +0 -0
  145. pyreduce/wavecal/uves_middle_600nm_2D.npz +0 -0
  146. pyreduce/wavecal/uves_middle_665nm_2D.npz +0 -0
  147. pyreduce/wavecal/uves_middle_860nm_2D.npz +0 -0
  148. pyreduce/wavecal/uves_red_580nm_2D.npz +0 -0
  149. pyreduce/wavecal/uves_red_600nm_2D.npz +0 -0
  150. pyreduce/wavecal/uves_red_665nm_2D.npz +0 -0
  151. pyreduce/wavecal/uves_red_760nm_2D.npz +0 -0
  152. pyreduce/wavecal/uves_red_860nm_2D.npz +0 -0
  153. pyreduce/wavecal/xshooter_nir.npz +0 -0
  154. pyreduce/wavelength_calibration.py +1873 -0
  155. pyreduce_astro-0.6.0b5.dist-info/METADATA +113 -0
  156. pyreduce_astro-0.6.0b5.dist-info/RECORD +158 -0
  157. pyreduce_astro-0.6.0b5.dist-info/WHEEL +4 -0
  158. pyreduce_astro-0.6.0b5.dist-info/licenses/LICENSE +674 -0
pyreduce/reduce.py ADDED
@@ -0,0 +1,2205 @@
1
+ """
2
+ REDUCE script for spectrograph data
3
+
4
+ Authors
5
+ -------
6
+ Ansgar Wehrhahn (ansgar.wehrhahn@physics.uu.se)
7
+ Thomas Marquart (thomas.marquart@physics.uu.se)
8
+ Alexis Lavail (alexis.lavail@physics.uu.se)
9
+ Nikolai Piskunov (nikolai.piskunov@physics.uu.se)
10
+
11
+ Version
12
+ -------
13
+ 1.0 - Initial PyReduce
14
+
15
+ License
16
+ --------
17
+ ...
18
+
19
+ """
20
+
21
+ import glob
22
+ import logging
23
+ import os.path
24
+ import warnings
25
+ from itertools import product
26
+ from os.path import join
27
+
28
+ import joblib
29
+ import matplotlib.pyplot as plt
30
+ import numpy as np
31
+ from astropy.io import fits
32
+ from astropy.io.fits.verify import VerifyWarning
33
+
34
+ warnings.simplefilter("ignore", category=VerifyWarning)
35
+
36
+
37
+ from tqdm import tqdm
38
+
39
+ # PyReduce subpackages
40
+ from . import __version__, echelle, instruments, util
41
+ from .combine_frames import (
42
+ combine_bias,
43
+ combine_calibrate,
44
+ combine_polynomial,
45
+ )
46
+ from .configuration import load_config
47
+ from .continuum_normalization import continuum_normalize, splice_orders
48
+ from .estimate_background_scatter import estimate_background_scatter
49
+ from .extract import extract
50
+ from .instruments.instrument_info import load_instrument
51
+ from .make_shear import Curvature as CurvatureModule
52
+ from .rectify import merge_images, rectify_image
53
+ from .trace_orders import mark_orders
54
+ from .wavelength_calibration import LineList, WavelengthCalibrationComb
55
+ from .wavelength_calibration import WavelengthCalibration as WavelengthCalibrationModule
56
+ from .wavelength_calibration import (
57
+ WavelengthCalibrationInitialize as WavelengthCalibrationInitializeModule,
58
+ )
59
+
60
+ # TODO Naming of functions and modules
61
+ # TODO License
62
+
63
+ # TODO automatic determination of the extraction width
64
+ logger = logging.getLogger(__name__)
65
+
66
+
67
+ def main(
68
+ instrument,
69
+ target,
70
+ night=None,
71
+ modes=None,
72
+ steps="all",
73
+ base_dir=None,
74
+ input_dir=None,
75
+ output_dir=None,
76
+ configuration=None,
77
+ order_range=None,
78
+ allow_calibration_only=False,
79
+ skip_existing=False,
80
+ ):
81
+ r"""
82
+ Main entry point for REDUCE scripts,
83
+ default values can be changed as required if reduce is used as a script
84
+ Finds input directories, and loops over observation nights and instrument modes
85
+
86
+ Parameters
87
+ ----------
88
+ instrument : str, list[str]
89
+ instrument used for the observation (e.g. UVES, HARPS)
90
+ target : str, list[str]
91
+ the observed star, as named in the folder structure/fits headers
92
+ night : str, list[str]
93
+ the observation nights to reduce, as named in the folder structure. Accepts bash wildcards (i.e. \*, ?), but then relies on the folder structure for restricting the nights
94
+ modes : str, list[str], dict[{instrument}:list], None, optional
95
+ the instrument modes to use, if None will use all known modes for the current instrument. See instruments for possible options
96
+ steps : tuple(str), "all", optional
97
+ which steps of the reduction process to perform
98
+ the possible steps are: "bias", "flat", "orders", "norm_flat", "wavecal", "science"
99
+ alternatively set steps to "all", which is equivalent to setting all steps
100
+ Note that the later steps require the previous intermediary products to exist and raise an exception otherwise
101
+ base_dir : str, optional
102
+ base data directory that Reduce should work in, is prefixxed on input_dir and output_dir (default: use settings_pyreduce.json)
103
+ input_dir : str, optional
104
+ input directory containing raw files. Can contain placeholders {instrument}, {target}, {night}, {mode} as well as wildcards. If relative will use base_dir as root (default: use settings_pyreduce.json)
105
+ output_dir : str, optional
106
+ output directory for intermediary and final results. Can contain placeholders {instrument}, {target}, {night}, {mode}, but no wildcards. If relative will use base_dir as root (default: use settings_pyreduce.json)
107
+ configuration : dict[str:obj], str, list[str], dict[{instrument}:dict,str], optional
108
+ configuration file for the current run, contains parameters for different parts of reduce. Can be a path to a json file, or a dict with configurations for the different instruments. When a list, the order must be the same as instruments (default: settings_{instrument.upper()}.json)
109
+ """
110
+ if target is None or np.isscalar(target):
111
+ target = [target]
112
+ if night is None or np.isscalar(night):
113
+ night = [night]
114
+
115
+ output = []
116
+
117
+ # Loop over everything
118
+
119
+ # settings: default settings of PyReduce
120
+ # config: paramters for the current reduction
121
+ # info: constant, instrument specific parameters
122
+ config = load_config(configuration, instrument, 0)
123
+ if isinstance(instrument, str):
124
+ instrument = instruments.instrument_info.load_instrument(instrument)
125
+ info = instrument.info
126
+
127
+ # load default settings from settings_pyreduce.json
128
+ if base_dir is None:
129
+ base_dir = config["reduce"]["base_dir"]
130
+ if input_dir is None:
131
+ input_dir = config["reduce"]["input_dir"]
132
+ if output_dir is None:
133
+ output_dir = config["reduce"]["output_dir"]
134
+
135
+ input_dir = join(base_dir, input_dir)
136
+ output_dir = join(base_dir, output_dir)
137
+
138
+ if modes is None:
139
+ modes = info["modes"]
140
+ if np.isscalar(modes):
141
+ modes = [modes]
142
+
143
+ for t, n, m in product(target, night, modes):
144
+ log_file = join(
145
+ base_dir.format(instrument=str(instrument), mode=modes, target=t),
146
+ f"logs/{t}.log",
147
+ )
148
+ util.start_logging(log_file)
149
+ # find input files and sort them by type
150
+ files = instrument.sort_files(
151
+ input_dir,
152
+ t,
153
+ n,
154
+ mode=m,
155
+ **config["instrument"],
156
+ allow_calibration_only=allow_calibration_only,
157
+ )
158
+ if len(files) == 0:
159
+ logger.warning(
160
+ "No files found for instrument: %s, target: %s, night: %s, mode: %s in folder: %s",
161
+ instrument,
162
+ t,
163
+ n,
164
+ m,
165
+ input_dir,
166
+ )
167
+ continue
168
+ for k, f in files:
169
+ logger.info("Settings:")
170
+ for key, value in k.items():
171
+ logger.info("%s: %s", key, value)
172
+ logger.debug("Files:\n%s", f)
173
+
174
+ reducer = Reducer(
175
+ f,
176
+ output_dir,
177
+ k.get("target"),
178
+ instrument,
179
+ m,
180
+ k.get("night"),
181
+ config,
182
+ order_range=order_range,
183
+ skip_existing=skip_existing,
184
+ )
185
+ # try:
186
+ data = reducer.run_steps(steps=steps)
187
+ output.append(data)
188
+ # except Exception as e:
189
+ # logger.error("Reduction failed with error message: %s", str(e))
190
+ # logger.info("------------")
191
+ return output
192
+
193
+
194
+ class Step:
195
+ """Parent class for all steps"""
196
+
197
+ def __init__(
198
+ self, instrument, mode, target, night, output_dir, order_range, **config
199
+ ):
200
+ self._dependsOn = []
201
+ self._loadDependsOn = []
202
+ #:str: Name of the instrument
203
+ self.instrument = instrument
204
+ #:str: Name of the instrument mode
205
+ self.mode = mode
206
+ #:str: Name of the observation target
207
+ self.target = target
208
+ #:str: Date of the observation (as a string)
209
+ self.night = night
210
+ #:tuple(int, int): First and Last(+1) order to process
211
+ self.order_range = order_range
212
+ #:bool: Whether to plot the results or the progress of this step
213
+ self.plot = config.get("plot", False)
214
+ #:str: Title used in the plots, if any
215
+ self.plot_title = config.get("plot_title", None)
216
+ self._output_dir = output_dir
217
+
218
+ def run(self, files, *args): # pragma: no cover
219
+ """Execute the current step
220
+
221
+ This should fail if files are missing or anything else goes wrong.
222
+ If the user does not want to run this step, they should not specify it in steps.
223
+
224
+ Parameters
225
+ ----------
226
+ files : list(str)
227
+ data files required for this step
228
+
229
+ Raises
230
+ ------
231
+ NotImplementedError
232
+ needs to be implemented for each step
233
+ """
234
+ raise NotImplementedError
235
+
236
+ def save(self, *args): # pragma: no cover
237
+ """Save the results of this step
238
+
239
+ Parameters
240
+ ----------
241
+ *args : obj
242
+ things to save
243
+
244
+ Raises
245
+ ------
246
+ NotImplementedError
247
+ Needs to be implemented for each step
248
+ """
249
+ raise NotImplementedError
250
+
251
+ def load(self): # pragma: no cover
252
+ """Load results from a previous execution
253
+
254
+ If this raises a FileNotFoundError, run() will be used instead
255
+ For calibration steps it is preferred however to print a warning
256
+ and return None. Other modules can then use a default value instead.
257
+
258
+ Raises
259
+ ------
260
+ NotImplementedError
261
+ Needs to be implemented for each step
262
+ """
263
+ raise NotImplementedError
264
+
265
+ @property
266
+ def dependsOn(self):
267
+ """list(str): Steps that are required before running this step"""
268
+ return list(set(self._dependsOn))
269
+
270
+ @property
271
+ def loadDependsOn(self):
272
+ """list(str): Steps that are required before loading data from this step"""
273
+ return list(set(self._loadDependsOn))
274
+
275
+ @property
276
+ def output_dir(self):
277
+ """str: output directory, may contain tags {instrument}, {night}, {target}, {mode}"""
278
+ return self._output_dir.format(
279
+ instrument=self.instrument.name.upper(),
280
+ target=self.target,
281
+ night=self.night,
282
+ mode=self.mode,
283
+ )
284
+
285
+ @property
286
+ def prefix(self):
287
+ """str: temporary file prefix"""
288
+ i = self.instrument.name.lower()
289
+ if self.mode is not None and self.mode != "":
290
+ m = self.mode.lower()
291
+ return f"{i}_{m}"
292
+ else:
293
+ return i
294
+
295
+
296
+ class CalibrationStep(Step):
297
+ def __init__(self, *args, **config):
298
+ super().__init__(*args, **config)
299
+ self._dependsOn += ["mask", "bias"]
300
+
301
+ #:{'number_of_files', 'exposure_time', 'mean', 'median', 'none'}: how to adjust for diferences between the bias and flat field exposure times
302
+ self.bias_scaling = config["bias_scaling"]
303
+ #:{'divide', 'none'}: how to apply the normalized flat field
304
+ self.norm_scaling = config["norm_scaling"]
305
+
306
+ def calibrate(self, files, mask, bias=None, norm_flat=None):
307
+ bias, bhead = bias if bias is not None else (None, None)
308
+ norm, blaze = norm_flat if norm_flat is not None else (None, None)
309
+ orig, thead = combine_calibrate(
310
+ files,
311
+ self.instrument,
312
+ self.mode,
313
+ mask,
314
+ bias=bias,
315
+ bhead=bhead,
316
+ norm=norm,
317
+ bias_scaling=self.bias_scaling,
318
+ norm_scaling=self.norm_scaling,
319
+ plot=self.plot,
320
+ plot_title=self.plot_title,
321
+ )
322
+
323
+ return orig, thead
324
+
325
+
326
+ class ExtractionStep(Step):
327
+ def __init__(self, *args, **config):
328
+ super().__init__(*args, **config)
329
+ self._dependsOn += [
330
+ "orders",
331
+ ]
332
+
333
+ #:{'arc', 'optimal'}: Extraction method to use
334
+ self.extraction_method = config["extraction_method"]
335
+ if self.extraction_method == "arc":
336
+ #:dict: arguments for the extraction
337
+ self.extraction_kwargs = {
338
+ "extraction_width": config["extraction_width"],
339
+ "sigma_cutoff": config["extraction_cutoff"],
340
+ "collapse_function": config["collapse_function"],
341
+ }
342
+ elif self.extraction_method == "optimal":
343
+ self.extraction_kwargs = {
344
+ "extraction_width": config["extraction_width"],
345
+ "lambda_sf": config["smooth_slitfunction"],
346
+ "lambda_sp": config["smooth_spectrum"],
347
+ "osample": config["oversampling"],
348
+ "swath_width": config["swath_width"],
349
+ "sigma_cutoff": config["extraction_cutoff"],
350
+ "maxiter": config["maxiter"],
351
+ }
352
+ else:
353
+ raise ValueError(
354
+ f"Extraction method {self.extraction_method} not supported for step 'wavecal'"
355
+ )
356
+
357
+ def extract(self, img, head, orders, curvature, scatter=None):
358
+ orders, column_range = orders if orders is not None else (None, None)
359
+ tilt, shear = curvature if curvature is not None else (None, None)
360
+
361
+ data, unc, slitfu, cr = extract(
362
+ img,
363
+ orders,
364
+ gain=head["e_gain"],
365
+ readnoise=head["e_readn"],
366
+ dark=head["e_drk"],
367
+ column_range=column_range,
368
+ extraction_type=self.extraction_method,
369
+ order_range=self.order_range,
370
+ plot=self.plot,
371
+ plot_title=self.plot_title,
372
+ tilt=tilt,
373
+ shear=shear,
374
+ scatter=scatter,
375
+ **self.extraction_kwargs,
376
+ )
377
+ return data, unc, slitfu, cr
378
+
379
+
380
+ class FitsIOStep(Step):
381
+ def __init__(self, *args, **kwargs):
382
+ super().__init__(*args, **kwargs)
383
+ self._loadDependsOn += ["mask"]
384
+ self.allow_failure = True
385
+
386
+ def save(self, data, head, dtype=None):
387
+ """
388
+ Save the data to a FITS file
389
+
390
+ Parameters
391
+ ----------
392
+ data : array of shape (nrow, ncol)
393
+ bias data
394
+ head : FITS header
395
+ bias header
396
+ """
397
+ if dtype is not None:
398
+ data = np.asarray(data, dtype=np.float32)
399
+
400
+ fits.writeto(
401
+ self.savefile,
402
+ data=data,
403
+ header=head,
404
+ overwrite=True,
405
+ output_verify="silentfix+ignore",
406
+ )
407
+ logger.info("Created data file: %s", self.savefile)
408
+
409
+ def load(self, mask):
410
+ """
411
+ Load the master bias from a previous run
412
+
413
+ Parameters
414
+ ----------
415
+ mask : array of shape (nrow, ncol)
416
+ Bad pixel mask
417
+
418
+ Returns
419
+ -------
420
+ data : masked array of shape (nrow, ncol)
421
+ master bias data, with the bad pixel mask applied
422
+ head : FITS header
423
+ header of the master bias
424
+ """
425
+ try:
426
+ with fits.open(self.savefile, memmap=False) as hdu:
427
+ data, head = hdu[0].data, hdu[0].head
428
+ data = np.ma.masked_array(data, mask=mask)
429
+ logger.info("Data file: %s", self.savefile)
430
+ except FileNotFoundError as ex:
431
+ if self.allow_failure:
432
+ logger.warning("No data file found")
433
+ data, head = None, None
434
+ else:
435
+ raise ex
436
+ return data, head
437
+
438
+
439
+ class Mask(Step):
440
+ """Load the bad pixel mask for the given instrument/mode"""
441
+
442
+ def __init__(self, *args, **config):
443
+ super().__init__(*args, **config)
444
+
445
+ def run(self):
446
+ """Load the mask file from disk
447
+
448
+ Returns
449
+ -------
450
+ mask : array of shape (nrow, ncol)
451
+ Bad pixel mask for this setting
452
+ """
453
+ return self.load()
454
+
455
+ def load(self):
456
+ """Load the mask file from disk
457
+
458
+ Returns
459
+ -------
460
+ mask : array of shape (nrow, ncol)
461
+ Bad pixel mask for this setting
462
+ """
463
+ mask_file = self.instrument.get_mask_filename(mode=self.mode)
464
+ try:
465
+ mask, _ = self.instrument.load_fits(mask_file, self.mode, extension=0)
466
+ mask = ~mask.data.astype(bool) # REDUCE mask are inverse to numpy masks
467
+ logger.info("Bad pixel mask file: %s", mask_file)
468
+ except (FileNotFoundError, ValueError):
469
+ logger.error(
470
+ "Bad Pixel Mask datafile %s not found. Using all pixels instead.",
471
+ mask_file,
472
+ )
473
+ mask = False
474
+ return mask
475
+
476
+
477
+ class Bias(Step):
478
+ """Calculates the master bias"""
479
+
480
+ def __init__(self, *args, **config):
481
+ super().__init__(*args, **config)
482
+ self._dependsOn += ["mask"]
483
+ self._loadDependsOn += ["mask"]
484
+
485
+ #:int: polynomial degree of the fit between exposure time and pixel values
486
+ self.degree = config["degree"]
487
+
488
+ @property
489
+ def savefile(self):
490
+ """str: Name of master bias fits file"""
491
+ return join(self.output_dir, self.prefix + ".bias.fits")
492
+
493
+ def run(self, files, mask):
494
+ """Calculate the master bias
495
+
496
+ Parameters
497
+ ----------
498
+ files : list(str)
499
+ bias files
500
+ mask : array of shape (nrow, ncol)
501
+ bad pixel map
502
+
503
+ Returns
504
+ -------
505
+ bias : masked array of shape (nrow, ncol)
506
+ master bias data, with the bad pixel mask applied
507
+ bhead : FITS header
508
+ header of the master bias
509
+ """
510
+ logger.info("Bias Files: %s", files)
511
+
512
+ if self.degree == 0:
513
+ # If the degree is 0, we just combine all images into a single master bias
514
+ # this works great if we assume there is no dark at exposure time 0
515
+ bias, bhead = combine_bias(
516
+ files,
517
+ self.instrument,
518
+ self.mode,
519
+ mask=mask,
520
+ plot=self.plot,
521
+ plot_title=self.plot_title,
522
+ )
523
+ else:
524
+ # Otherwise we fit a polynomial to each pixel in the image, with
525
+ # the pixel value versus the exposure time. The constant coefficients
526
+ # are then the bias, and the others are used to scale with the
527
+ # exposure time
528
+ bias, bhead = combine_polynomial(
529
+ files,
530
+ self.instrument,
531
+ self.mode,
532
+ mask=mask,
533
+ degree=self.degree,
534
+ plot=self.plot,
535
+ plot_title=self.plot_title,
536
+ )
537
+
538
+ self.save(bias.data, bhead)
539
+ return bias, bhead
540
+
541
+ def save(self, bias, bhead):
542
+ """Save the master bias to a FITS file
543
+
544
+ Parameters
545
+ ----------
546
+ bias : array of shape (nrow, ncol)
547
+ bias data
548
+ bhead : FITS header
549
+ bias header
550
+ """
551
+ bias = np.asarray(bias, dtype=np.float32)
552
+
553
+ if self.degree == 0:
554
+ hdus = [fits.PrimaryHDU(data=bias, header=bhead, scale_back=False)]
555
+ else:
556
+ hdus = [fits.PrimaryHDU(data=bias[0], header=bhead, scale_back=False)]
557
+ for i in range(1, len(bias)):
558
+ hdus += [fits.ImageHDU(data=bias[i])]
559
+ hdus = fits.HDUList(hdus)
560
+
561
+ hdus[0].header["BZERO"] = 0
562
+ hdus.writeto(
563
+ self.savefile,
564
+ overwrite=True,
565
+ output_verify="silentfix+ignore",
566
+ )
567
+ logger.info("Created master bias file: %s", self.savefile)
568
+
569
+ def load(self, mask):
570
+ """Load the master bias from a previous run
571
+
572
+ Parameters
573
+ ----------
574
+ mask : array of shape (nrow, ncol)
575
+ Bad pixel mask
576
+
577
+ Returns
578
+ -------
579
+ bias : masked array of shape (nrow, ncol)
580
+ master bias data, with the bad pixel mask applied
581
+ bhead : FITS header
582
+ header of the master bias
583
+ """
584
+ try:
585
+ logger.info("Master bias file: %s", self.savefile)
586
+ with fits.open(self.savefile, memmap=False) as hdu:
587
+ degree = len(hdu) - 1
588
+ if degree == 0:
589
+ bias, bhead = hdu[0].data, hdu[0].header
590
+ bias = np.ma.masked_array(bias, mask=mask)
591
+ else:
592
+ bhead = hdu[0].header
593
+ bias = np.array([h.data for h in hdu])
594
+ bias = np.ma.masked_array(
595
+ bias, mask=[mask for _ in range(len(hdu))]
596
+ )
597
+ except FileNotFoundError:
598
+ logger.warning("No intermediate bias file found. Using Bias = 0 instead.")
599
+ bias, bhead = None, None
600
+ return bias, bhead
601
+
602
+
603
+ class Flat(CalibrationStep):
604
+ """Calculates the master flat"""
605
+
606
+ def __init__(self, *args, **config):
607
+ super().__init__(*args, **config)
608
+ self._loadDependsOn += ["mask"]
609
+
610
+ @property
611
+ def savefile(self):
612
+ """str: Name of master bias fits file"""
613
+ return join(self.output_dir, self.prefix + ".flat.fits")
614
+
615
+ def save(self, flat, fhead):
616
+ """Save the master flat to a FITS file
617
+
618
+ Parameters
619
+ ----------
620
+ flat : array of shape (nrow, ncol)
621
+ master flat data
622
+ fhead : FITS header
623
+ master flat header
624
+ """
625
+ flat = np.asarray(flat, dtype=np.float32)
626
+ fits.writeto(
627
+ self.savefile,
628
+ data=flat,
629
+ header=fhead,
630
+ overwrite=True,
631
+ output_verify="silentfix+ignore",
632
+ )
633
+ logger.info("Created master flat file: %s", self.savefile)
634
+
635
+ def run(self, files, bias, mask):
636
+ """Calculate the master flat, with the bias already subtracted
637
+
638
+ Parameters
639
+ ----------
640
+ files : list(str)
641
+ flat files
642
+ bias : tuple(array of shape (nrow, ncol), FITS header)
643
+ master bias and header
644
+ mask : array of shape (nrow, ncol)
645
+ Bad pixel mask
646
+
647
+ Returns
648
+ -------
649
+ flat : masked array of shape (nrow, ncol)
650
+ Master flat with bad pixel map applied
651
+ fhead : FITS header
652
+ Master flat FITS header
653
+ """
654
+ logger.info("Flat files: %s", files)
655
+ # This is just the calibration of images
656
+ flat, fhead = self.calibrate(files, mask, bias, None)
657
+ # And then save it
658
+ self.save(flat.data, fhead)
659
+ return flat, fhead
660
+
661
+ def load(self, mask):
662
+ """Load master flat from disk
663
+
664
+ Parameters
665
+ ----------
666
+ mask : array of shape (nrow, ncol)
667
+ Bad pixel mask
668
+
669
+ Returns
670
+ -------
671
+ flat : masked array of shape (nrow, ncol)
672
+ Master flat with bad pixel map applied
673
+ fhead : FITS header
674
+ Master flat FITS header
675
+ """
676
+ try:
677
+ with fits.open(self.savefile, memmap=False) as hdu:
678
+ flat, fhead = hdu[0].data, hdu[0].header
679
+ flat = np.ma.masked_array(flat, mask=mask)
680
+ logger.info("Master flat file: %s", self.savefile)
681
+ except FileNotFoundError:
682
+ logger.warning(
683
+ "No intermediate file for the flat field found. Using Flat = 1 instead"
684
+ )
685
+ flat, fhead = None, None
686
+ return flat, fhead
687
+
688
+
689
+ class OrderTracing(CalibrationStep):
690
+ """Determine the polynomial fits describing the pixel locations of each order"""
691
+
692
+ def __init__(self, *args, **config):
693
+ super().__init__(*args, **config)
694
+
695
+ #:int: Minimum size of each cluster to be included in further processing
696
+ self.min_cluster = config["min_cluster"]
697
+ #:int, float: Minimum width of each cluster after mergin
698
+ self.min_width = config["min_width"]
699
+ #:int: Size of the gaussian filter for smoothing
700
+ self.filter_size = config["filter_size"]
701
+ #:int: Background noise value threshold
702
+ self.noise = config["noise"]
703
+ #:int: Polynomial degree of the fit to each order
704
+ self.fit_degree = config["degree"]
705
+
706
+ self.degree_before_merge = config["degree_before_merge"]
707
+ self.regularization = config["regularization"]
708
+ self.closing_shape = config["closing_shape"]
709
+ self.auto_merge_threshold = config["auto_merge_threshold"]
710
+ self.merge_min_threshold = config["merge_min_threshold"]
711
+ self.sigma = config["split_sigma"]
712
+ #:int: Number of pixels at the edge of the detector to ignore
713
+ self.border_width = config["border_width"]
714
+ #:bool: Whether to use manual alignment
715
+ self.manual = config["manual"]
716
+
717
+ @property
718
+ def savefile(self):
719
+ """str: Name of the order tracing file"""
720
+ return join(self.output_dir, self.prefix + ".ord_default.npz")
721
+
722
+ def run(self, files, mask, bias):
723
+ """Determine polynomial coefficients describing order locations
724
+
725
+ Parameters
726
+ ----------
727
+ files : list(str)
728
+ Observation used for order tracing (should only have one element)
729
+ mask : array of shape (nrow, ncol)
730
+ Bad pixel mask
731
+
732
+ Returns
733
+ -------
734
+ orders : array of shape (nord, ndegree+1)
735
+ polynomial coefficients for each order
736
+ column_range : array of shape (nord, 2)
737
+ first and last(+1) column that carries signal in each order
738
+ """
739
+
740
+ logger.info("Order tracing files: %s", files)
741
+
742
+ order_img, ohead = self.calibrate(files, mask, bias, None)
743
+
744
+ orders, column_range = mark_orders(
745
+ order_img,
746
+ min_cluster=self.min_cluster,
747
+ min_width=self.min_width,
748
+ filter_size=self.filter_size,
749
+ noise=self.noise,
750
+ opower=self.fit_degree,
751
+ degree_before_merge=self.degree_before_merge,
752
+ regularization=self.regularization,
753
+ closing_shape=self.closing_shape,
754
+ border_width=self.border_width,
755
+ manual=self.manual,
756
+ auto_merge_threshold=self.auto_merge_threshold,
757
+ merge_min_threshold=self.merge_min_threshold,
758
+ sigma=self.sigma,
759
+ plot=self.plot,
760
+ plot_title=self.plot_title,
761
+ )
762
+
763
+ self.save(orders, column_range)
764
+
765
+ return orders, column_range
766
+
767
+ def save(self, orders, column_range):
768
+ """Save order tracing results to disk
769
+
770
+ Parameters
771
+ ----------
772
+ orders : array of shape (nord, ndegree+1)
773
+ polynomial coefficients
774
+ column_range : array of shape (nord, 2)
775
+ first and last(+1) column that carry signal in each order
776
+ """
777
+ np.savez(self.savefile, orders=orders, column_range=column_range)
778
+ logger.info("Created order tracing file: %s", self.savefile)
779
+
780
+ def load(self):
781
+ """Load order tracing results
782
+
783
+ Returns
784
+ -------
785
+ orders : array of shape (nord, ndegree+1)
786
+ polynomial coefficients for each order
787
+ column_range : array of shape (nord, 2)
788
+ first and last(+1) column that carries signal in each order
789
+ """
790
+ logger.info("Order tracing file: %s", self.savefile)
791
+ data = np.load(self.savefile, allow_pickle=True)
792
+ orders = data["orders"]
793
+ column_range = data["column_range"]
794
+ return orders, column_range
795
+
796
+
797
+ class BackgroundScatter(CalibrationStep):
798
+ """Determine the background scatter"""
799
+
800
+ def __init__(self, *args, **config):
801
+ super().__init__(*args, **config)
802
+ self._dependsOn += ["orders"]
803
+
804
+ #:tuple(int, int): Polynomial degrees for the background scatter fit, in row, column direction
805
+ self.scatter_degree = config["scatter_degree"]
806
+ self.extraction_width = config["extraction_width"]
807
+ self.sigma_cutoff = config["scatter_cutoff"]
808
+ self.border_width = config["border_width"]
809
+
810
+ @property
811
+ def savefile(self):
812
+ """str: Name of the scatter file"""
813
+ return join(self.output_dir, self.prefix + ".scatter.npz")
814
+
815
+ def run(self, files, mask, bias, orders):
816
+ logger.info("Background scatter files: %s", files)
817
+
818
+ scatter_img, shead = self.calibrate(files, mask, bias)
819
+
820
+ orders, column_range = orders
821
+ scatter = estimate_background_scatter(
822
+ scatter_img,
823
+ orders,
824
+ column_range=column_range,
825
+ extraction_width=self.extraction_width,
826
+ scatter_degree=self.scatter_degree,
827
+ sigma_cutoff=self.sigma_cutoff,
828
+ border_width=self.border_width,
829
+ plot=self.plot,
830
+ plot_title=self.plot_title,
831
+ )
832
+
833
+ self.save(scatter)
834
+ return scatter
835
+
836
+ def save(self, scatter):
837
+ """Save scatter results to disk
838
+
839
+ Parameters
840
+ ----------
841
+ scatter : array
842
+ scatter coefficients
843
+ """
844
+ np.savez(self.savefile, scatter=scatter)
845
+ logger.info("Created background scatter file: %s", self.savefile)
846
+
847
+ def load(self):
848
+ """Load scatter results from disk
849
+
850
+ Returns
851
+ -------
852
+ scatter : array
853
+ scatter coefficients
854
+ """
855
+ try:
856
+ data = np.load(self.savefile, allow_pickle=True)
857
+ logger.info("Background scatter file: %s", self.savefile)
858
+ except FileNotFoundError:
859
+ logger.warning(
860
+ "No intermediate files found for the scatter. Using scatter = 0 instead."
861
+ )
862
+ data = {"scatter": None}
863
+ scatter = data["scatter"]
864
+ return scatter
865
+
866
+
867
+ class NormalizeFlatField(Step):
868
+ """Calculate the 'normalized' flat field image"""
869
+
870
+ def __init__(self, *args, **config):
871
+ super().__init__(*args, **config)
872
+ self._dependsOn += ["flat", "orders", "scatter", "curvature"]
873
+
874
+ #:{'normalize'}: Extraction method to use
875
+ self.extraction_method = config["extraction_method"]
876
+ if self.extraction_method == "normalize":
877
+ #:dict: arguments for the extraction
878
+ self.extraction_kwargs = {
879
+ "extraction_width": config["extraction_width"],
880
+ "lambda_sf": config["smooth_slitfunction"],
881
+ "lambda_sp": config["smooth_spectrum"],
882
+ "osample": config["oversampling"],
883
+ "swath_width": config["swath_width"],
884
+ "sigma_cutoff": config["extraction_cutoff"],
885
+ "maxiter": config["maxiter"],
886
+ }
887
+ else:
888
+ raise ValueError(
889
+ f"Extraction method {self.extraction_method} not supported for step 'norm_flat'"
890
+ )
891
+ #:int: Threshold of the normalized flat field (values below this are just 1)
892
+ self.threshold = config["threshold"]
893
+ self.threshold_lower = config["threshold_lower"]
894
+
895
+ @property
896
+ def savefile(self):
897
+ """str: Name of the blaze file"""
898
+ return join(self.output_dir, self.prefix + ".flat_norm.npz")
899
+
900
+ def run(self, flat, orders, scatter, curvature):
901
+ """Calculate the 'normalized' flat field
902
+
903
+ Parameters
904
+ ----------
905
+ flat : tuple(array, header)
906
+ Master flat, and its FITS header
907
+ orders : tuple(array, array)
908
+ Polynomial coefficients for each order, and the first and last(+1) column containing signal
909
+
910
+ Returns
911
+ -------
912
+ norm : array of shape (nrow, ncol)
913
+ normalized flat field
914
+ blaze : array of shape (nord, ncol)
915
+ Continuum level as determined from the flat field for each order
916
+ """
917
+ flat, fhead = flat
918
+ orders, column_range = orders
919
+ tilt, shear = curvature
920
+
921
+ # if threshold is smaller than 1, assume percentage value is given
922
+ if self.threshold <= 1:
923
+ threshold = np.percentile(flat, self.threshold * 100)
924
+ else:
925
+ threshold = self.threshold
926
+
927
+ norm, _, blaze, _ = extract(
928
+ flat,
929
+ orders,
930
+ gain=fhead["e_gain"],
931
+ readnoise=fhead["e_readn"],
932
+ dark=fhead["e_drk"],
933
+ order_range=self.order_range,
934
+ column_range=column_range,
935
+ scatter=scatter,
936
+ threshold=threshold,
937
+ threshold_lower=self.threshold_lower,
938
+ extraction_type=self.extraction_method,
939
+ tilt=tilt,
940
+ shear=shear,
941
+ plot=self.plot,
942
+ plot_title=self.plot_title,
943
+ **self.extraction_kwargs,
944
+ )
945
+
946
+ blaze = np.ma.filled(blaze, 0)
947
+ norm = np.ma.filled(norm, 1)
948
+ norm = np.nan_to_num(norm, nan=1)
949
+ self.save(norm, blaze)
950
+ return norm, blaze
951
+
952
+ def save(self, norm, blaze):
953
+ """Save normalized flat field results to disk
954
+
955
+ Parameters
956
+ ----------
957
+ norm : array of shape (nrow, ncol)
958
+ normalized flat field
959
+ blaze : array of shape (nord, ncol)
960
+ Continuum level as determined from the flat field for each order
961
+ """
962
+ np.savez(self.savefile, blaze=blaze, norm=norm)
963
+ logger.info("Created normalized flat file: %s", self.savefile)
964
+
965
+ def load(self):
966
+ """Load normalized flat field results from disk
967
+
968
+ Returns
969
+ -------
970
+ norm : array of shape (nrow, ncol)
971
+ normalized flat field
972
+ blaze : array of shape (nord, ncol)
973
+ Continuum level as determined from the flat field for each order
974
+ """
975
+ try:
976
+ data = np.load(self.savefile, allow_pickle=True)
977
+ logger.info("Normalized flat file: %s", self.savefile)
978
+ except FileNotFoundError:
979
+ logger.warning(
980
+ "No intermediate files found for the normalized flat field. Using flat = 1 instead."
981
+ )
982
+ data = {"blaze": None, "norm": None}
983
+ blaze = data["blaze"]
984
+ norm = data["norm"]
985
+ return norm, blaze
986
+
987
+
988
+ class WavelengthCalibrationMaster(CalibrationStep, ExtractionStep):
989
+ """Create wavelength calibration master image"""
990
+
991
+ def __init__(self, *args, **config):
992
+ super().__init__(*args, **config)
993
+ self._dependsOn += ["norm_flat", "curvature", "bias"]
994
+
995
+ @property
996
+ def savefile(self):
997
+ """str: Name of the wavelength echelle file"""
998
+ return join(self.output_dir, self.prefix + ".thar_master.fits")
999
+
1000
+ def run(self, files, orders, mask, curvature, bias, norm_flat):
1001
+ """Perform wavelength calibration
1002
+
1003
+ This consists of extracting the wavelength image
1004
+ and fitting a polynomial the the known spectral lines
1005
+
1006
+ Parameters
1007
+ ----------
1008
+ files : list(str)
1009
+ wavelength calibration files
1010
+ orders : tuple(array, array)
1011
+ Polynomial coefficients of each order, and columns with signal of each order
1012
+ mask : array of shape (nrow, ncol)
1013
+ Bad pixel mask
1014
+
1015
+ Returns
1016
+ -------
1017
+ wave : array of shape (nord, ncol)
1018
+ wavelength for each point in the spectrum
1019
+ thar : array of shape (nrow, ncol)
1020
+ extracted wavelength calibration image
1021
+ coef : array of shape (*ndegrees,)
1022
+ polynomial coefficients of the wavelength fit
1023
+ linelist : record array of shape (nlines,)
1024
+ Updated line information for all lines
1025
+ """
1026
+ if len(files) == 0:
1027
+ raise FileNotFoundError("No files found for wavelength calibration")
1028
+ logger.info("Wavelength calibration files: %s", files)
1029
+ # Load wavecal image
1030
+ orig, thead = self.calibrate(files, mask, bias, norm_flat)
1031
+ # Extract wavecal spectrum
1032
+ thar, _, _, _ = self.extract(orig, thead, orders, curvature)
1033
+ self.save(thar, thead)
1034
+ return thar, thead
1035
+
1036
+ def save(self, thar, thead):
1037
+ """Save the master wavelength calibration to a FITS file
1038
+
1039
+ Parameters
1040
+ ----------
1041
+ thar : array of shape (nrow, ncol)
1042
+ master flat data
1043
+ thead : FITS header
1044
+ master flat header
1045
+ """
1046
+ thar = np.asarray(thar, dtype=np.float64)
1047
+ fits.writeto(
1048
+ self.savefile,
1049
+ data=thar,
1050
+ header=thead,
1051
+ overwrite=True,
1052
+ output_verify="silentfix+ignore",
1053
+ )
1054
+ logger.info("Created wavelength calibration spectrum file: %s", self.savefile)
1055
+
1056
+ def load(self):
1057
+ """Load master wavelength calibration from disk
1058
+
1059
+ Returns
1060
+ -------
1061
+ thar : masked array of shape (nrow, ncol)
1062
+ Master wavecal with bad pixel map applied
1063
+ thead : FITS header
1064
+ Master wavecal FITS header
1065
+ """
1066
+ with fits.open(self.savefile, memmap=False) as hdu:
1067
+ thar, thead = hdu[0].data, hdu[0].header
1068
+ logger.info("Wavelength calibration spectrum file: %s", self.savefile)
1069
+ return thar, thead
1070
+
1071
+
1072
+ class WavelengthCalibrationInitialize(Step):
1073
+ """Create the initial wavelength solution file"""
1074
+
1075
+ def __init__(self, *args, **config):
1076
+ super().__init__(*args, **config)
1077
+ self._dependsOn += ["wavecal_master"]
1078
+ self._loadDependsOn += ["config", "wavecal_master"]
1079
+
1080
+ #:tuple(int, int): Polynomial degree of the wavelength calibration in order, column direction
1081
+ self.degree = config["degree"]
1082
+ #:float: wavelength range around the initial guess to explore
1083
+ self.wave_delta = config["wave_delta"]
1084
+ #:int: number of walkers in the MCMC
1085
+ self.nwalkers = config["nwalkers"]
1086
+ #:int: number of steps in the MCMC
1087
+ self.steps = config["steps"]
1088
+ #:float: resiudal range to accept as match between peaks and atlas in m/s
1089
+ self.resid_delta = config["resid_delta"]
1090
+ #:str: element for the atlas to use
1091
+ self.element = config["element"]
1092
+ #:str: medium the medium of the instrument, air or vac
1093
+ self.medium = config["medium"]
1094
+ #:float: Gaussian smoothing parameter applied to the observed spectrum in pixel scale, set to 0 to disable smoothing
1095
+ self.smoothing = config["smoothing"]
1096
+ #:float: Minimum height of spectral lines in the normalized spectrum, values of 1 and above are interpreted as percentiles of the spectrum, set to 0 to disable the cutoff
1097
+ self.cutoff = config["cutoff"]
1098
+
1099
+ @property
1100
+ def savefile(self):
1101
+ """str: Name of the wavelength echelle file"""
1102
+ return join(self.output_dir, self.prefix + ".linelist.npz")
1103
+
1104
+ def run(self, wavecal_master):
1105
+ thar, thead = wavecal_master
1106
+
1107
+ # Get the initial wavelength guess from the instrument
1108
+ wave_range = self.instrument.get_wavelength_range(thead, self.mode)
1109
+ if wave_range is None:
1110
+ raise ValueError(
1111
+ "This instrument is missing an initial wavelength guess for wavecal_init"
1112
+ )
1113
+
1114
+ module = WavelengthCalibrationInitializeModule(
1115
+ plot=self.plot,
1116
+ plot_title=self.plot_title,
1117
+ degree=self.degree,
1118
+ wave_delta=self.wave_delta,
1119
+ nwalkers=self.nwalkers,
1120
+ steps=self.steps,
1121
+ resid_delta=self.resid_delta,
1122
+ element=self.element,
1123
+ medium=self.medium,
1124
+ smoothing=self.smoothing,
1125
+ cutoff=self.cutoff,
1126
+ )
1127
+ linelist = module.execute(thar, wave_range)
1128
+ self.save(linelist)
1129
+ return linelist
1130
+
1131
+ def save(self, linelist):
1132
+ linelist.save(self.savefile)
1133
+ logger.info("Created wavelength calibration linelist file: %s", self.savefile)
1134
+
1135
+ def load(self, config, wavecal_master):
1136
+ thar, thead = wavecal_master
1137
+ try:
1138
+ # Try loading the custom reference file
1139
+ reference = self.savefile
1140
+ linelist = LineList.load(reference)
1141
+ except FileNotFoundError:
1142
+ # If that fails, load the file provided by PyReduce
1143
+ # It usually fails because we want to use this one
1144
+ reference = self.instrument.get_wavecal_filename(
1145
+ thead, self.mode, **config["instrument"]
1146
+ )
1147
+
1148
+ # This should fail if there is no provided file by PyReduce
1149
+ linelist = LineList.load(reference)
1150
+ logger.info("Wavelength calibration linelist file: %s", reference)
1151
+ return linelist
1152
+
1153
+
1154
+ class WavelengthCalibrationFinalize(Step):
1155
+ """Perform wavelength calibration"""
1156
+
1157
+ def __init__(self, *args, **config):
1158
+ super().__init__(*args, **config)
1159
+ self._dependsOn += ["wavecal_master", "wavecal_init"]
1160
+
1161
+ #:tuple(int, int): Polynomial degree of the wavelength calibration in order, column direction
1162
+ self.degree = config["degree"]
1163
+ #:bool: Whether to use manual alignment instead of cross correlation
1164
+ self.manual = config["manual"]
1165
+ #:float: residual threshold in m/s
1166
+ self.threshold = config["threshold"]
1167
+ #:int: Number of iterations in the remove lines, auto id cycle
1168
+ self.iterations = config["iterations"]
1169
+ #:{'1D', '2D'}: Whether to use 1d or 2d polynomials
1170
+ self.dimensionality = config["dimensionality"]
1171
+ #:int: Number of detector offset steps, due to detector design
1172
+ self.nstep = config["nstep"]
1173
+ #:int: How many columns to use in the 2D cross correlation alignment. 0 means all pixels (slow).
1174
+ self.correlate_cols = config["correlate_cols"]
1175
+ #:float: fraction of columns, to allow individual orders to shift
1176
+ self.shift_window = config["shift_window"]
1177
+ #:str: elements of the spectral lamp
1178
+ self.element = config["element"]
1179
+ #:str: medium of the detector, vac or air
1180
+ self.medium = config["medium"]
1181
+
1182
+ @property
1183
+ def savefile(self):
1184
+ """str: Name of the wavelength echelle file"""
1185
+ return join(self.output_dir, self.prefix + ".thar.npz")
1186
+
1187
+ def run(self, wavecal_master, wavecal_init):
1188
+ """Perform wavelength calibration
1189
+
1190
+ This consists of extracting the wavelength image
1191
+ and fitting a polynomial the the known spectral lines
1192
+
1193
+ Parameters
1194
+ ----------
1195
+ wavecal_master : tuple
1196
+ results of the wavecal_master step, containing the master wavecal image
1197
+ and its header
1198
+ wavecal_init : LineList
1199
+ the initial LineList guess with the positions and wavelengths of lines
1200
+
1201
+ Returns
1202
+ -------
1203
+ wave : array of shape (nord, ncol)
1204
+ wavelength for each point in the spectrum
1205
+ coef : array of shape (*ndegrees,)
1206
+ polynomial coefficients of the wavelength fit
1207
+ linelist : record array of shape (nlines,)
1208
+ Updated line information for all lines
1209
+ """
1210
+ thar, thead = wavecal_master
1211
+ linelist = wavecal_init
1212
+
1213
+ module = WavelengthCalibrationModule(
1214
+ plot=self.plot,
1215
+ plot_title=self.plot_title,
1216
+ manual=self.manual,
1217
+ degree=self.degree,
1218
+ threshold=self.threshold,
1219
+ iterations=self.iterations,
1220
+ dimensionality=self.dimensionality,
1221
+ nstep=self.nstep,
1222
+ correlate_cols=self.correlate_cols,
1223
+ shift_window=self.shift_window,
1224
+ element=self.element,
1225
+ medium=self.medium,
1226
+ )
1227
+ wave, coef, linelist = module.execute(thar, linelist)
1228
+ self.save(wave, coef, linelist)
1229
+ return wave, coef, linelist
1230
+
1231
+ def save(self, wave, coef, linelist):
1232
+ """Save the results of the wavelength calibration
1233
+
1234
+ Parameters
1235
+ ----------
1236
+ wave : array of shape (nord, ncol)
1237
+ wavelength for each point in the spectrum
1238
+ coef : array of shape (ndegrees,)
1239
+ polynomial coefficients of the wavelength fit
1240
+ linelist : record array of shape (nlines,)
1241
+ Updated line information for all lines
1242
+ """
1243
+ np.savez(self.savefile, wave=wave, coef=coef, linelist=linelist)
1244
+ logger.info("Created wavelength calibration file: %s", self.savefile)
1245
+
1246
+ def load(self):
1247
+ """Load the results of the wavelength calibration
1248
+
1249
+ Returns
1250
+ -------
1251
+ wave : array of shape (nord, ncol)
1252
+ wavelength for each point in the spectrum
1253
+ coef : array of shape (*ndegrees,)
1254
+ polynomial coefficients of the wavelength fit
1255
+ linelist : record array of shape (nlines,)
1256
+ Updated line information for all lines
1257
+ """
1258
+ data = np.load(self.savefile, allow_pickle=True)
1259
+ logger.info("Wavelength calibration file: %s", self.savefile)
1260
+ wave = data["wave"]
1261
+ coef = data["coef"]
1262
+ linelist = data["linelist"]
1263
+ return wave, coef, linelist
1264
+
1265
+
1266
+ class LaserFrequencyCombMaster(CalibrationStep, ExtractionStep):
1267
+ """Create a laser frequency comb (or similar) master image"""
1268
+
1269
+ def __init__(self, *args, **config):
1270
+ super().__init__(*args, **config)
1271
+ self._dependsOn += ["norm_flat", "curvature"]
1272
+
1273
+ @property
1274
+ def savefile(self):
1275
+ """str: Name of the wavelength echelle file"""
1276
+ return join(self.output_dir, self.prefix + ".comb_master.fits")
1277
+
1278
+ def run(self, files, orders, mask, curvature, bias, norm_flat):
1279
+ """Improve the wavelength calibration with a laser frequency comb (or similar)
1280
+
1281
+ Parameters
1282
+ ----------
1283
+ files : list(str)
1284
+ observation files
1285
+ orders : tuple
1286
+ results from the order tracing step
1287
+ mask : array of shape (nrow, ncol)
1288
+ Bad pixel mask
1289
+ curvature : tuple
1290
+ results from the curvature step
1291
+ bias : tuple
1292
+ results from the bias step
1293
+
1294
+ Returns
1295
+ -------
1296
+ comb : array of shape (nord, ncol)
1297
+ extracted frequency comb image
1298
+ chead : Header
1299
+ FITS header of the combined image
1300
+ """
1301
+
1302
+ if len(files) == 0:
1303
+ raise FileNotFoundError("No files for Laser Frequency Comb found")
1304
+ logger.info("Frequency comb files: %s", files)
1305
+
1306
+ # Combine the input files and calibrate
1307
+ orig, chead = self.calibrate(files, mask, bias, norm_flat)
1308
+ # Extract the spectrum
1309
+ comb, _, _, _ = self.extract(orig, chead, orders, curvature)
1310
+ self.save(comb, chead)
1311
+ return comb, chead
1312
+
1313
+ def save(self, comb, chead):
1314
+ """Save the master comb to a FITS file
1315
+
1316
+ Parameters
1317
+ ----------
1318
+ comb : array of shape (nrow, ncol)
1319
+ master comb data
1320
+ chead : FITS header
1321
+ master comb header
1322
+ """
1323
+ comb = np.asarray(comb, dtype=np.float64)
1324
+ fits.writeto(
1325
+ self.savefile,
1326
+ data=comb,
1327
+ header=chead,
1328
+ overwrite=True,
1329
+ output_verify="silentfix+ignore",
1330
+ )
1331
+ logger.info("Created frequency comb master spectrum: %s", self.savefile)
1332
+
1333
+ def load(self):
1334
+ """Load master comb from disk
1335
+
1336
+ Returns
1337
+ -------
1338
+ comb : masked array of shape (nrow, ncol)
1339
+ Master comb with bad pixel map applied
1340
+ chead : FITS header
1341
+ Master comb FITS header
1342
+ """
1343
+ with fits.open(self.savefile, memmap=False) as hdu:
1344
+ comb, chead = hdu[0].data, hdu[0].header
1345
+ logger.info("Frequency comb master spectrum: %s", self.savefile)
1346
+ return comb, chead
1347
+
1348
+
1349
+ class LaserFrequencyCombFinalize(Step):
1350
+ """Improve the precision of the wavelength calibration with a laser frequency comb"""
1351
+
1352
+ def __init__(self, *args, **config):
1353
+ super().__init__(*args, **config)
1354
+ self._dependsOn += ["freq_comb_master", "wavecal"]
1355
+ self._loadDependsOn += ["wavecal"]
1356
+
1357
+ #:tuple(int, int): polynomial degree of the wavelength fit
1358
+ self.degree = config["degree"]
1359
+ #:float: residual threshold in m/s above which to remove lines
1360
+ self.threshold = config["threshold"]
1361
+ #:{'1D', '2D'}: Whether to use 1D or 2D polynomials
1362
+ self.dimensionality = config["dimensionality"]
1363
+ self.nstep = config["nstep"]
1364
+ #:int: Width of the peaks for finding them in the spectrum
1365
+ self.lfc_peak_width = config["lfc_peak_width"]
1366
+
1367
+ @property
1368
+ def savefile(self):
1369
+ """str: Name of the wavelength echelle file"""
1370
+ return join(self.output_dir, self.prefix + ".comb.npz")
1371
+
1372
+ def run(self, freq_comb_master, wavecal):
1373
+ """Improve the wavelength calibration with a laser frequency comb (or similar)
1374
+
1375
+ Parameters
1376
+ ----------
1377
+ files : list(str)
1378
+ observation files
1379
+ wavecal : tuple()
1380
+ results from the wavelength calibration step
1381
+ orders : tuple
1382
+ results from the order tracing step
1383
+ mask : array of shape (nrow, ncol)
1384
+ Bad pixel mask
1385
+
1386
+ Returns
1387
+ -------
1388
+ wave : array of shape (nord, ncol)
1389
+ improved wavelength solution
1390
+ comb : array of shape (nord, ncol)
1391
+ extracted frequency comb image
1392
+ """
1393
+ comb, chead = freq_comb_master
1394
+ wave, coef, linelist = wavecal
1395
+
1396
+ module = WavelengthCalibrationComb(
1397
+ plot=self.plot,
1398
+ plot_title=self.plot_title,
1399
+ degree=self.degree,
1400
+ threshold=self.threshold,
1401
+ dimensionality=self.dimensionality,
1402
+ nstep=self.nstep,
1403
+ lfc_peak_width=self.lfc_peak_width,
1404
+ )
1405
+ wave = module.execute(comb, wave, linelist)
1406
+
1407
+ self.save(wave)
1408
+ return wave
1409
+
1410
+ def save(self, wave):
1411
+ """Save the results of the frequency comb improvement
1412
+
1413
+ Parameters
1414
+ ----------
1415
+ wave : array of shape (nord, ncol)
1416
+ improved wavelength solution
1417
+ """
1418
+ np.savez(self.savefile, wave=wave)
1419
+ logger.info("Created frequency comb wavecal file: %s", self.savefile)
1420
+
1421
+ def load(self, wavecal):
1422
+ """Load the results of the frequency comb improvement if possible,
1423
+ otherwise just use the normal wavelength solution
1424
+
1425
+ Parameters
1426
+ ----------
1427
+ wavecal : tuple
1428
+ results from the wavelength calibration step
1429
+
1430
+ Returns
1431
+ -------
1432
+ wave : array of shape (nord, ncol)
1433
+ improved wavelength solution
1434
+ comb : array of shape (nord, ncol)
1435
+ extracted frequency comb image
1436
+ """
1437
+ try:
1438
+ data = np.load(self.savefile, allow_pickle=True)
1439
+ logger.info("Frequency comb wavecal file: %s", self.savefile)
1440
+ except FileNotFoundError:
1441
+ logger.warning(
1442
+ "No data for Laser Frequency Comb found, using regular wavelength calibration instead"
1443
+ )
1444
+ wave, coef, linelist = wavecal
1445
+ data = {"wave": wave}
1446
+ wave = data["wave"]
1447
+ return wave
1448
+
1449
+
1450
+ class SlitCurvatureDetermination(CalibrationStep, ExtractionStep):
1451
+ """Determine the curvature of the slit"""
1452
+
1453
+ def __init__(self, *args, **config):
1454
+ super().__init__(*args, **config)
1455
+
1456
+ #:float: how many sigma of bad lines to cut away
1457
+ self.sigma_cutoff = config["curvature_cutoff"]
1458
+ #:float: width of the orders in the extraction
1459
+ self.extraction_width = config["extraction_width"]
1460
+ #:int: Polynomial degree of the overall fit
1461
+ self.fit_degree = config["degree"]
1462
+ #:int: Orders of the curvature to fit, currently supports only 1 and 2
1463
+ self.curv_degree = config["curv_degree"]
1464
+ #:{'1D', '2D'}: Whether to use 1d or 2d polynomials
1465
+ self.curvature_mode = config["dimensionality"]
1466
+ #:float: peak finding noise threshold
1467
+ self.peak_threshold = config["peak_threshold"]
1468
+ #:int: peak width
1469
+ self.peak_width = config["peak_width"]
1470
+ #:float: window width to search for peak in each row
1471
+ self.window_width = config["window_width"]
1472
+ #:str: Function shape that is fit to individual peaks
1473
+ self.peak_function = config["peak_function"]
1474
+
1475
+ @property
1476
+ def savefile(self):
1477
+ """str: Name of the tilt/shear save file"""
1478
+ return join(self.output_dir, self.prefix + ".shear.npz")
1479
+
1480
+ def run(self, files, orders, mask, bias):
1481
+ """Determine the curvature of the slit
1482
+
1483
+ Parameters
1484
+ ----------
1485
+ files : list(str)
1486
+ files to use for this
1487
+ orders : tuple
1488
+ results of the order tracing
1489
+ mask : array of shape (nrow, ncol)
1490
+ Bad pixel mask
1491
+
1492
+ Returns
1493
+ -------
1494
+ tilt : array of shape (nord, ncol)
1495
+ first order slit curvature at each point
1496
+ shear : array of shape (nord, ncol)
1497
+ second order slit curvature at each point
1498
+ """
1499
+
1500
+ logger.info("Slit curvature files: %s", files)
1501
+
1502
+ orig, thead = self.calibrate(files, mask, bias, None)
1503
+ extracted, _, _, _ = self.extract(orig, thead, orders, None)
1504
+
1505
+ orders, column_range = orders
1506
+ module = CurvatureModule(
1507
+ orders,
1508
+ column_range=column_range,
1509
+ extraction_width=self.extraction_width,
1510
+ order_range=self.order_range,
1511
+ fit_degree=self.fit_degree,
1512
+ curv_degree=self.curv_degree,
1513
+ sigma_cutoff=self.sigma_cutoff,
1514
+ mode=self.curvature_mode,
1515
+ peak_threshold=self.peak_threshold,
1516
+ peak_width=self.peak_width,
1517
+ window_width=self.window_width,
1518
+ peak_function=self.peak_function,
1519
+ plot=self.plot,
1520
+ plot_title=self.plot_title,
1521
+ )
1522
+ tilt, shear = module.execute(extracted, orig)
1523
+ self.save(tilt, shear)
1524
+ return tilt, shear
1525
+
1526
+ def save(self, tilt, shear):
1527
+ """Save results from the curvature
1528
+
1529
+ Parameters
1530
+ ----------
1531
+ tilt : array of shape (nord, ncol)
1532
+ first order slit curvature at each point
1533
+ shear : array of shape (nord, ncol)
1534
+ second order slit curvature at each point
1535
+ """
1536
+ np.savez(self.savefile, tilt=tilt, shear=shear)
1537
+ logger.info("Created slit curvature file: %s", self.savefile)
1538
+
1539
+ def load(self):
1540
+ """Load the curvature if possible, otherwise return None, None, i.e. use vertical extraction
1541
+
1542
+ Returns
1543
+ -------
1544
+ tilt : array of shape (nord, ncol)
1545
+ first order slit curvature at each point
1546
+ shear : array of shape (nord, ncol)
1547
+ second order slit curvature at each point
1548
+ """
1549
+ try:
1550
+ data = np.load(self.savefile, allow_pickle=True)
1551
+ logger.info("Slit curvature file: %s", self.savefile)
1552
+ except FileNotFoundError:
1553
+ logger.warning("No data for slit curvature found, setting it to 0.")
1554
+ data = {"tilt": None, "shear": None}
1555
+
1556
+ tilt = data["tilt"]
1557
+ shear = data["shear"]
1558
+ return tilt, shear
1559
+
1560
+
1561
+ class RectifyImage(Step):
1562
+ """Create a 2D image of the rectified orders"""
1563
+
1564
+ def __init__(self, *args, **config):
1565
+ super().__init__(*args, **config)
1566
+ self._dependsOn += ["files", "orders", "curvature", "mask", "freq_comb"]
1567
+ # self._loadDependsOn += []
1568
+
1569
+ self.extraction_width = config["extraction_width"]
1570
+ self.input_files = config["input_files"]
1571
+
1572
+ def filename(self, name):
1573
+ return util.swap_extension(name, ".rectify.fits", path=self.output_dir)
1574
+
1575
+ def run(self, files, orders, curvature, mask, freq_comb):
1576
+ orders, column_range = orders
1577
+ tilt, shear = curvature
1578
+ wave = freq_comb
1579
+
1580
+ files = files[self.input_files]
1581
+
1582
+ rectified = {}
1583
+ for fname in tqdm(files, desc="Files"):
1584
+ img, head = self.instrument.load_fits(
1585
+ fname, self.mode, mask=mask, dtype="f8"
1586
+ )
1587
+
1588
+ images, cr, xwd = rectify_image(
1589
+ img,
1590
+ orders,
1591
+ column_range,
1592
+ self.extraction_width,
1593
+ self.order_range,
1594
+ tilt,
1595
+ shear,
1596
+ )
1597
+ wavelength, image = merge_images(images, wave, cr, xwd)
1598
+
1599
+ self.save(fname, image, wavelength, header=head)
1600
+ rectified[fname] = (wavelength, image)
1601
+
1602
+ return rectified
1603
+
1604
+ def save(self, fname, image, wavelength, header=None):
1605
+ # Change filename
1606
+ fname = self.filename(fname)
1607
+ # Create HDU List, one extension per order
1608
+ primary = fits.PrimaryHDU(header=header)
1609
+ secondary = fits.ImageHDU(data=image)
1610
+ column = fits.Column(name="wavelength", array=wavelength, format="D")
1611
+ tertiary = fits.BinTableHDU.from_columns([column])
1612
+ hdus = fits.HDUList([primary, secondary, tertiary])
1613
+ # Save data to file
1614
+ hdus.writeto(fname, overwrite=True, output_verify="silentfix")
1615
+
1616
+ def load(self, files):
1617
+ files = files[self.input_files]
1618
+
1619
+ rectified = {}
1620
+ for orig_fname in files:
1621
+ fname = self.filename(orig_fname)
1622
+ with fits.open(fname, memmap=False) as hdu:
1623
+ img = hdu[1].data
1624
+ wave = hdu[2].data["wavelength"]
1625
+ rectified[orig_fname] = (wave, img)
1626
+
1627
+ return rectified
1628
+
1629
+
1630
+ class ScienceExtraction(CalibrationStep, ExtractionStep):
1631
+ """Extract the science spectra"""
1632
+
1633
+ def __init__(self, *args, **config):
1634
+ super().__init__(*args, **config)
1635
+ self._dependsOn += ["norm_flat", "curvature", "scatter"]
1636
+ self._loadDependsOn += ["files"]
1637
+
1638
+ def science_file(self, name):
1639
+ """Name of the science file in disk, based on the input file
1640
+
1641
+ Parameters
1642
+ ----------
1643
+ name : str
1644
+ name of the observation file
1645
+
1646
+ Returns
1647
+ -------
1648
+ name : str
1649
+ science file name
1650
+ """
1651
+ return util.swap_extension(name, ".science.ech", path=self.output_dir)
1652
+
1653
+ def run(self, files, bias, orders, norm_flat, curvature, scatter, mask):
1654
+ """Extract Science spectra from observation
1655
+
1656
+ Parameters
1657
+ ----------
1658
+ files : list(str)
1659
+ list of observations
1660
+ bias : tuple
1661
+ results from master bias step
1662
+ orders : tuple
1663
+ results from order tracing step
1664
+ norm_flat : tuple
1665
+ results from flat normalization
1666
+ curvature : tuple
1667
+ results from slit curvature step
1668
+ mask : array of shape (nrow, ncol)
1669
+ bad pixel map
1670
+
1671
+ Returns
1672
+ -------
1673
+ heads : list(FITS header)
1674
+ FITS headers of each observation
1675
+ specs : list(array of shape (nord, ncol))
1676
+ extracted spectra
1677
+ sigmas : list(array of shape (nord, ncol))
1678
+ uncertainties of the extracted spectra
1679
+ slitfu: list(array of shape (nord, (extr_height*oversample+1)+1)
1680
+ slit illumination function
1681
+ columns : list(array of shape (nord, 2))
1682
+ column ranges for each spectra
1683
+ """
1684
+ heads, specs, sigmas, slitfus, columns = [], [], [], [], []
1685
+ for fname in tqdm(files, desc="Files"):
1686
+ logger.info("Science file: %s", fname)
1687
+ # Calibrate the input image
1688
+ im, head = self.calibrate([fname], mask, bias, norm_flat)
1689
+ # Optimally extract science spectrum
1690
+ spec, sigma, slitfu, cr = self.extract(
1691
+ im, head, orders, curvature, scatter=scatter
1692
+ )
1693
+
1694
+ # make slitfus from swaths into one
1695
+ # print(len(slitfu),[len(sf) for sf in slitfu])
1696
+ # slitfu = np.median(np.array(slitfu),axis=0)
1697
+ # save spectrum to disk
1698
+ self.save(fname, head, spec, sigma, slitfu, cr)
1699
+ heads.append(head)
1700
+ specs.append(spec)
1701
+ sigmas.append(sigma)
1702
+ slitfus.append(slitfu)
1703
+ columns.append(cr)
1704
+
1705
+ return heads, specs, sigmas, slitfus, columns
1706
+
1707
+ def save(self, fname, head, spec, sigma, slitfu, column_range):
1708
+ """Save the results of one extraction
1709
+
1710
+ Parameters
1711
+ ----------
1712
+ fname : str
1713
+ filename to save to
1714
+ head : FITS header
1715
+ FITS header
1716
+ spec : array of shape (nord, ncol)
1717
+ extracted spectrum
1718
+ sigma : array of shape (nord, ncol)
1719
+ uncertainties of the extracted spectrum
1720
+ slitfu: list(array of shape (nord, (extr_height*oversample+1)+1)
1721
+ slit illumination function
1722
+ column_range : array of shape (nord, 2)
1723
+ range of columns that have spectrum
1724
+ """
1725
+ nameout = self.science_file(fname)
1726
+ echelle.save(
1727
+ nameout, head, spec=spec, sig=sigma, slitfu=slitfu, columns=column_range
1728
+ )
1729
+ logger.info("Created science file: %s", nameout)
1730
+
1731
+ def load(self, files):
1732
+ """Load all science spectra from disk
1733
+
1734
+ Returns
1735
+ -------
1736
+ heads : list(FITS header)
1737
+ FITS headers of each observation
1738
+ specs : list(array of shape (nord, ncol))
1739
+ extracted spectra
1740
+ sigmas : list(array of shape (nord, ncol))
1741
+ uncertainties of the extracted spectra
1742
+ columns : list(array of shape (nord, 2))
1743
+ column ranges for each spectra
1744
+ """
1745
+ files = files["science"]
1746
+ files = [self.science_file(fname) for fname in files]
1747
+
1748
+ if len(files) == 0:
1749
+ raise FileNotFoundError("Science files are required to load them")
1750
+
1751
+ logger.info("Science files: %s", files)
1752
+
1753
+ heads, specs, sigmas, columns = [], [], [], []
1754
+ for fname in files:
1755
+ # fname = join(self.output_dir, fname)
1756
+ science = echelle.read(
1757
+ fname,
1758
+ continuum_normalization=False,
1759
+ barycentric_correction=False,
1760
+ radial_velociy_correction=False,
1761
+ )
1762
+ heads.append(science.header)
1763
+ specs.append(science["spec"])
1764
+ sigmas.append(science["sig"])
1765
+ columns.append(science["columns"])
1766
+
1767
+ return heads, specs, sigmas, None, columns
1768
+
1769
+
1770
+ class ContinuumNormalization(Step):
1771
+ """Determine the continuum to each observation"""
1772
+
1773
+ def __init__(self, *args, **config):
1774
+ super().__init__(*args, **config)
1775
+ self._dependsOn += ["science", "freq_comb", "norm_flat"]
1776
+ self._loadDependsOn += ["norm_flat", "science"]
1777
+
1778
+ @property
1779
+ def savefile(self):
1780
+ """str: savefile name"""
1781
+ return join(self.output_dir, self.prefix + ".cont.npz")
1782
+
1783
+ def run(self, science, freq_comb, norm_flat):
1784
+ """Determine the continuum to each observation
1785
+ Also splices the orders together
1786
+
1787
+ Parameters
1788
+ ----------
1789
+ science : tuple
1790
+ results from science step
1791
+ freq_comb : tuple
1792
+ results from freq_comb step (or wavecal if those don't exist)
1793
+ norm_flat : tuple
1794
+ results from the normalized flatfield step
1795
+
1796
+ Returns
1797
+ -------
1798
+ heads : list(FITS header)
1799
+ FITS headers of each observation
1800
+ specs : list(array of shape (nord, ncol))
1801
+ extracted spectra
1802
+ sigmas : list(array of shape (nord, ncol))
1803
+ uncertainties of the extracted spectra
1804
+ conts : list(array of shape (nord, ncol))
1805
+ continuum for each spectrum
1806
+ columns : list(array of shape (nord, 2))
1807
+ column ranges for each spectra
1808
+ """
1809
+ wave = freq_comb
1810
+ heads, specs, sigmas, _, columns = science
1811
+ norm, blaze = norm_flat
1812
+
1813
+ logger.info("Continuum normalization")
1814
+ conts = [None for _ in specs]
1815
+ for j, (spec, sigma) in enumerate(zip(specs, sigmas, strict=False)):
1816
+ logger.info("Splicing orders")
1817
+ specs[j], wave, blaze, sigmas[j] = splice_orders(
1818
+ spec,
1819
+ wave,
1820
+ blaze,
1821
+ sigma,
1822
+ scaling=True,
1823
+ plot=self.plot,
1824
+ plot_title=self.plot_title,
1825
+ )
1826
+ logger.info("Normalizing continuum")
1827
+ conts[j] = continuum_normalize(
1828
+ specs[j],
1829
+ wave,
1830
+ blaze,
1831
+ sigmas[j],
1832
+ plot=self.plot,
1833
+ plot_title=self.plot_title,
1834
+ )
1835
+
1836
+ self.save(heads, specs, sigmas, conts, columns)
1837
+ return heads, specs, sigmas, conts, columns
1838
+
1839
+ def save(self, heads, specs, sigmas, conts, columns):
1840
+ """Save the results from the continuum normalization
1841
+
1842
+ Parameters
1843
+ ----------
1844
+ heads : list(FITS header)
1845
+ FITS headers of each observation
1846
+ specs : list(array of shape (nord, ncol))
1847
+ extracted spectra
1848
+ sigmas : list(array of shape (nord, ncol))
1849
+ uncertainties of the extracted spectra
1850
+ conts : list(array of shape (nord, ncol))
1851
+ continuum for each spectrum
1852
+ columns : list(array of shape (nord, 2))
1853
+ column ranges for each spectra
1854
+ """
1855
+ value = {
1856
+ "heads": heads,
1857
+ "specs": specs,
1858
+ "sigmas": sigmas,
1859
+ "conts": conts,
1860
+ "columns": columns,
1861
+ }
1862
+ joblib.dump(value, self.savefile)
1863
+ logger.info("Created continuum normalization file: %s", self.savefile)
1864
+
1865
+ def load(self, norm_flat, science):
1866
+ """Load the results from the continuum normalization
1867
+
1868
+ Returns
1869
+ -------
1870
+ heads : list(FITS header)
1871
+ FITS headers of each observation
1872
+ specs : list(array of shape (nord, ncol))
1873
+ extracted spectra
1874
+ sigmas : list(array of shape (nord, ncol))
1875
+ uncertainties of the extracted spectra
1876
+ conts : list(array of shape (nord, ncol))
1877
+ continuum for each spectrum
1878
+ columns : list(array of shape (nord, 2))
1879
+ column ranges for each spectra
1880
+ """
1881
+ try:
1882
+ data = joblib.load(self.savefile)
1883
+ logger.info("Continuum normalization file: %s", self.savefile)
1884
+ except FileNotFoundError:
1885
+ # Use science files instead
1886
+ logger.warning(
1887
+ "No continuum normalized data found. Using unnormalized results instead."
1888
+ )
1889
+ heads, specs, sigmas, columns = science
1890
+ norm, blaze = norm_flat
1891
+ conts = [blaze for _ in specs]
1892
+ data = {
1893
+ "heads": heads,
1894
+ "specs": specs,
1895
+ "sigmas": sigmas,
1896
+ "conts": conts,
1897
+ "columns": columns,
1898
+ }
1899
+ heads = data["heads"]
1900
+ specs = data["specs"]
1901
+ sigmas = data["sigmas"]
1902
+ conts = data["conts"]
1903
+ columns = data["columns"]
1904
+ return heads, specs, sigmas, conts, columns
1905
+
1906
+
1907
+ class Finalize(Step):
1908
+ """Create the final output files"""
1909
+
1910
+ def __init__(self, *args, **config):
1911
+ super().__init__(*args, **config)
1912
+ self._dependsOn += ["continuum", "freq_comb", "config"]
1913
+ self.filename = config["filename"]
1914
+
1915
+ def output_file(self, number, name):
1916
+ """str: output file name"""
1917
+ out = self.filename.format(
1918
+ instrument=self.instrument.name,
1919
+ night=self.night,
1920
+ mode=self.mode,
1921
+ number=number,
1922
+ input=name,
1923
+ )
1924
+ return join(self.output_dir, out)
1925
+
1926
+ def save_config_to_header(self, head, config, prefix="PR"):
1927
+ for key, value in config.items():
1928
+ if isinstance(value, dict):
1929
+ head = self.save_config_to_header(
1930
+ head, value, prefix=f"{prefix} {key.upper()}"
1931
+ )
1932
+ else:
1933
+ if key in ["plot", "$schema", "__skip_existing__"]:
1934
+ # Skip values that are not relevant to the file product
1935
+ continue
1936
+ if value is None:
1937
+ value = "null"
1938
+ elif not np.isscalar(value):
1939
+ value = str(value)
1940
+ head[f"HIERARCH {prefix} {key.upper()}"] = value
1941
+ return head
1942
+
1943
+ def run(self, continuum, freq_comb, config):
1944
+ """Create the final output files
1945
+
1946
+ this is includes:
1947
+ - heliocentric corrections
1948
+ - creating one echelle file
1949
+
1950
+ Parameters
1951
+ ----------
1952
+ continuum : tuple
1953
+ results from the continuum normalization
1954
+ freq_comb : tuple
1955
+ results from the frequency comb step (or wavelength calibration)
1956
+ """
1957
+ heads, specs, sigmas, conts, columns = continuum
1958
+ wave = freq_comb
1959
+
1960
+ fnames = []
1961
+ # Combine science with wavecal and continuum
1962
+ for i, (head, spec, sigma, blaze, column) in enumerate(
1963
+ zip(heads, specs, sigmas, conts, columns, strict=False)
1964
+ ):
1965
+ head["e_erscle"] = ("absolute", "error scale")
1966
+
1967
+ # Add heliocentric correction
1968
+ try:
1969
+ rv_corr, bjd = util.helcorr(
1970
+ head["e_obslon"],
1971
+ head["e_obslat"],
1972
+ head["e_obsalt"],
1973
+ head["e_ra"],
1974
+ head["e_dec"],
1975
+ head["e_jd"],
1976
+ )
1977
+
1978
+ logger.debug("Heliocentric correction: %f km/s", rv_corr)
1979
+ logger.debug("Heliocentric Julian Date: %s", str(bjd))
1980
+ except KeyError:
1981
+ logger.warning("Could not calculate heliocentric correction")
1982
+ # logger.warning("Telescope is in space?")
1983
+ rv_corr = 0
1984
+ bjd = head["e_jd"]
1985
+
1986
+ head["barycorr"] = rv_corr
1987
+ head["e_jd"] = bjd
1988
+ head["HIERARCH PR_version"] = __version__
1989
+
1990
+ head = self.save_config_to_header(head, config)
1991
+
1992
+ if self.plot:
1993
+ plt.plot(wave.T, (spec / blaze).T)
1994
+ if self.plot_title is not None:
1995
+ plt.title(self.plot_title)
1996
+ plt.show()
1997
+
1998
+ fname = self.save(i, head, spec, sigma, blaze, wave, column)
1999
+ fnames.append(fname)
2000
+ return fnames
2001
+
2002
+ def save(self, i, head, spec, sigma, cont, wave, columns):
2003
+ """Save one output spectrum to disk
2004
+
2005
+ Parameters
2006
+ ----------
2007
+ i : int
2008
+ individual number of each file
2009
+ head : FITS header
2010
+ FITS header
2011
+ spec : array of shape (nord, ncol)
2012
+ final spectrum
2013
+ sigma : array of shape (nord, ncol)
2014
+ final uncertainties
2015
+ cont : array of shape (nord, ncol)
2016
+ final continuum scales
2017
+ wave : array of shape (nord, ncol)
2018
+ wavelength solution
2019
+ columns : array of shape (nord, 2)
2020
+ columns that carry signal
2021
+
2022
+ Returns
2023
+ -------
2024
+ out_file : str
2025
+ name of the output file
2026
+ """
2027
+ original_name = os.path.splitext(head["e_input"])[0]
2028
+ out_file = self.output_file(i, original_name)
2029
+ echelle.save(
2030
+ out_file, head, spec=spec, sig=sigma, cont=cont, wave=wave, columns=columns
2031
+ )
2032
+ logger.info("Final science file: %s", out_file)
2033
+ return out_file
2034
+
2035
+
2036
+ class Reducer:
2037
+ step_order = {
2038
+ "bias": 10,
2039
+ "flat": 20,
2040
+ "orders": 30,
2041
+ "curvature": 40,
2042
+ "scatter": 45,
2043
+ "norm_flat": 50,
2044
+ "wavecal_master": 60,
2045
+ "wavecal_init": 64,
2046
+ "wavecal": 67,
2047
+ "freq_comb_master": 70,
2048
+ "freq_comb": 72,
2049
+ "rectify": 75,
2050
+ "science": 80,
2051
+ "continuum": 90,
2052
+ "finalize": 100,
2053
+ }
2054
+
2055
+ modules = {
2056
+ "mask": Mask,
2057
+ "bias": Bias,
2058
+ "flat": Flat,
2059
+ "orders": OrderTracing,
2060
+ "scatter": BackgroundScatter,
2061
+ "norm_flat": NormalizeFlatField,
2062
+ "wavecal_master": WavelengthCalibrationMaster,
2063
+ "wavecal_init": WavelengthCalibrationInitialize,
2064
+ "wavecal": WavelengthCalibrationFinalize,
2065
+ "freq_comb_master": LaserFrequencyCombMaster,
2066
+ "freq_comb": LaserFrequencyCombFinalize,
2067
+ "curvature": SlitCurvatureDetermination,
2068
+ "science": ScienceExtraction,
2069
+ "continuum": ContinuumNormalization,
2070
+ "finalize": Finalize,
2071
+ "rectify": RectifyImage,
2072
+ }
2073
+
2074
+ def __init__(
2075
+ self,
2076
+ files,
2077
+ output_dir,
2078
+ target,
2079
+ instrument,
2080
+ mode,
2081
+ night,
2082
+ config,
2083
+ order_range=None,
2084
+ skip_existing=False,
2085
+ ):
2086
+ """Reduce all observations from a single night and instrument mode
2087
+
2088
+ Parameters
2089
+ ----------
2090
+ files: dict{str:str}
2091
+ Data files for each step
2092
+ output_dir : str
2093
+ directory to place output files in
2094
+ target : str
2095
+ observed targets as used in directory names/fits headers
2096
+ instrument : str
2097
+ instrument used for observations
2098
+ mode : str
2099
+ instrument mode used (e.g. "red" or "blue" for HARPS)
2100
+ night : str
2101
+ Observation night, in the same format as used in the directory structure/file sorting
2102
+ config : dict
2103
+ numeric reduction specific settings, like pixel threshold, which may change between runs
2104
+ info : dict
2105
+ fixed instrument specific values, usually header keywords for gain, readnoise, etc.
2106
+ skip_existing : bool
2107
+ Whether to skip reductions with existing output
2108
+ """
2109
+ #:dict(str:str): Filenames sorted by usecase
2110
+ self.files = files
2111
+ self.output_dir = output_dir.format(
2112
+ instrument=str(instrument), target=target, night=night, mode=mode
2113
+ )
2114
+
2115
+ if isinstance(instrument, str):
2116
+ instrument = load_instrument(instrument)
2117
+
2118
+ self.data = {"files": files, "config": config}
2119
+ self.inputs = (instrument, mode, target, night, output_dir, order_range)
2120
+ self.config = config
2121
+ self.skip_existing = skip_existing
2122
+
2123
+ def run_module(self, step, load=False):
2124
+ # The Module this step is based on (An object of the Step class)
2125
+ module = self.modules[step](*self.inputs, **self.config.get(step, {}))
2126
+
2127
+ # Load the dependencies necessary for loading/running this step
2128
+ dependencies = module.dependsOn if not load else module.loadDependsOn
2129
+ for dependency in dependencies:
2130
+ if dependency not in self.data.keys():
2131
+ self.data[dependency] = self.run_module(dependency, load=True)
2132
+ args = {d: self.data[d] for d in dependencies}
2133
+
2134
+ # Try to load the data, if the step is not specifically given as necessary
2135
+ # If the intermediate data is not available, run it normally instead
2136
+ # But give a warning
2137
+ if load:
2138
+ try:
2139
+ logger.info("Loading data from step '%s'", step)
2140
+ data = module.load(**args)
2141
+ except FileNotFoundError:
2142
+ logger.warning(
2143
+ "Intermediate File(s) for loading step %s not found. Running it instead.",
2144
+ step,
2145
+ )
2146
+ data = self.run_module(step, load=False)
2147
+ else:
2148
+ logger.info("Running step '%s'", step)
2149
+ if step in self.files.keys():
2150
+ args["files"] = self.files[step]
2151
+ data = module.run(**args)
2152
+
2153
+ self.data[step] = data
2154
+ return data
2155
+
2156
+ def prepare_output_dir(self):
2157
+ # create output folder structure if necessary
2158
+ output_dir = self.output_dir
2159
+ if not os.path.exists(output_dir):
2160
+ os.makedirs(output_dir)
2161
+
2162
+ def run_steps(self, steps="all"):
2163
+ """
2164
+ Execute the steps as required
2165
+
2166
+ Parameters
2167
+ ----------
2168
+ steps : {tuple(str), "all"}, optional
2169
+ which steps of the reduction process to perform
2170
+ the possible steps are: "bias", "flat", "orders", "norm_flat", "wavecal", "freq_comb",
2171
+ "curvature", "science", "continuum", "finalize"
2172
+ alternatively set steps to "all", which is equivalent to setting all steps
2173
+ """
2174
+ self.prepare_output_dir()
2175
+
2176
+ if steps == "all":
2177
+ steps = list(self.step_order.keys())
2178
+ steps = list(steps)
2179
+
2180
+ if self.skip_existing and "finalize" in steps:
2181
+ module = self.modules["finalize"](
2182
+ *self.inputs, **self.config.get("finalize", {})
2183
+ )
2184
+ exists = [False] * len(self.files["science"])
2185
+ data = {"finalize": [None] * len(self.files["science"])}
2186
+ for i, f in enumerate(self.files["science"]):
2187
+ fname_in = os.path.basename(f)
2188
+ fname_in = os.path.splitext(fname_in)[0]
2189
+ fname_out = module.output_file("?", fname_in)
2190
+ fname_out = glob.glob(fname_out)
2191
+ exists[i] = len(fname_out) != 0
2192
+ if exists[i]:
2193
+ data["finalize"][i] = fname_out[0]
2194
+ if all(exists):
2195
+ logger.info("All science files already exist, skipping this set")
2196
+ logger.debug("--------------------------------")
2197
+ return data
2198
+
2199
+ steps.sort(key=lambda x: self.step_order[x])
2200
+
2201
+ for step in steps:
2202
+ self.run_module(step)
2203
+
2204
+ logger.debug("--------------------------------")
2205
+ return self.data