pyreduce-astro 0.7a4__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. pyreduce/__init__.py +67 -0
  2. pyreduce/__main__.py +322 -0
  3. pyreduce/cli.py +342 -0
  4. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.exp +0 -0
  5. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.lib +0 -0
  6. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.exp +0 -0
  7. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.lib +0 -0
  8. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.exp +0 -0
  9. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.lib +0 -0
  10. pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.exp +0 -0
  11. pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.lib +0 -0
  12. pyreduce/clib/Release/_slitfunc_2d.obj +0 -0
  13. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.exp +0 -0
  14. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.lib +0 -0
  15. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.exp +0 -0
  16. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.lib +0 -0
  17. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.exp +0 -0
  18. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.lib +0 -0
  19. pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.exp +0 -0
  20. pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.lib +0 -0
  21. pyreduce/clib/Release/_slitfunc_bd.obj +0 -0
  22. pyreduce/clib/__init__.py +0 -0
  23. pyreduce/clib/_slitfunc_2d.cp311-win_amd64.pyd +0 -0
  24. pyreduce/clib/_slitfunc_2d.cp312-win_amd64.pyd +0 -0
  25. pyreduce/clib/_slitfunc_2d.cp313-win_amd64.pyd +0 -0
  26. pyreduce/clib/_slitfunc_2d.cp314-win_amd64.pyd +0 -0
  27. pyreduce/clib/_slitfunc_bd.cp311-win_amd64.pyd +0 -0
  28. pyreduce/clib/_slitfunc_bd.cp312-win_amd64.pyd +0 -0
  29. pyreduce/clib/_slitfunc_bd.cp313-win_amd64.pyd +0 -0
  30. pyreduce/clib/_slitfunc_bd.cp314-win_amd64.pyd +0 -0
  31. pyreduce/clib/build_extract.py +75 -0
  32. pyreduce/clib/slit_func_2d_xi_zeta_bd.c +1313 -0
  33. pyreduce/clib/slit_func_2d_xi_zeta_bd.h +55 -0
  34. pyreduce/clib/slit_func_bd.c +362 -0
  35. pyreduce/clib/slit_func_bd.h +17 -0
  36. pyreduce/clipnflip.py +147 -0
  37. pyreduce/combine_frames.py +861 -0
  38. pyreduce/configuration.py +191 -0
  39. pyreduce/continuum_normalization.py +329 -0
  40. pyreduce/cwrappers.py +404 -0
  41. pyreduce/datasets.py +238 -0
  42. pyreduce/echelle.py +413 -0
  43. pyreduce/estimate_background_scatter.py +130 -0
  44. pyreduce/extract.py +1362 -0
  45. pyreduce/extraction_width.py +77 -0
  46. pyreduce/instruments/__init__.py +0 -0
  47. pyreduce/instruments/aj.py +9 -0
  48. pyreduce/instruments/aj.yaml +51 -0
  49. pyreduce/instruments/andes.py +102 -0
  50. pyreduce/instruments/andes.yaml +72 -0
  51. pyreduce/instruments/common.py +711 -0
  52. pyreduce/instruments/common.yaml +57 -0
  53. pyreduce/instruments/crires_plus.py +103 -0
  54. pyreduce/instruments/crires_plus.yaml +101 -0
  55. pyreduce/instruments/filters.py +195 -0
  56. pyreduce/instruments/harpn.py +203 -0
  57. pyreduce/instruments/harpn.yaml +140 -0
  58. pyreduce/instruments/harps.py +312 -0
  59. pyreduce/instruments/harps.yaml +144 -0
  60. pyreduce/instruments/instrument_info.py +140 -0
  61. pyreduce/instruments/jwst_miri.py +29 -0
  62. pyreduce/instruments/jwst_miri.yaml +53 -0
  63. pyreduce/instruments/jwst_niriss.py +98 -0
  64. pyreduce/instruments/jwst_niriss.yaml +60 -0
  65. pyreduce/instruments/lick_apf.py +35 -0
  66. pyreduce/instruments/lick_apf.yaml +60 -0
  67. pyreduce/instruments/mcdonald.py +123 -0
  68. pyreduce/instruments/mcdonald.yaml +56 -0
  69. pyreduce/instruments/metis_ifu.py +45 -0
  70. pyreduce/instruments/metis_ifu.yaml +62 -0
  71. pyreduce/instruments/metis_lss.py +45 -0
  72. pyreduce/instruments/metis_lss.yaml +62 -0
  73. pyreduce/instruments/micado.py +45 -0
  74. pyreduce/instruments/micado.yaml +62 -0
  75. pyreduce/instruments/models.py +257 -0
  76. pyreduce/instruments/neid.py +156 -0
  77. pyreduce/instruments/neid.yaml +61 -0
  78. pyreduce/instruments/nirspec.py +215 -0
  79. pyreduce/instruments/nirspec.yaml +63 -0
  80. pyreduce/instruments/nte.py +42 -0
  81. pyreduce/instruments/nte.yaml +55 -0
  82. pyreduce/instruments/uves.py +46 -0
  83. pyreduce/instruments/uves.yaml +65 -0
  84. pyreduce/instruments/xshooter.py +39 -0
  85. pyreduce/instruments/xshooter.yaml +63 -0
  86. pyreduce/make_shear.py +607 -0
  87. pyreduce/masks/mask_crires_plus_det1.fits.gz +0 -0
  88. pyreduce/masks/mask_crires_plus_det2.fits.gz +0 -0
  89. pyreduce/masks/mask_crires_plus_det3.fits.gz +0 -0
  90. pyreduce/masks/mask_ctio_chiron.fits.gz +0 -0
  91. pyreduce/masks/mask_elodie.fits.gz +0 -0
  92. pyreduce/masks/mask_feros3.fits.gz +0 -0
  93. pyreduce/masks/mask_flames_giraffe.fits.gz +0 -0
  94. pyreduce/masks/mask_harps_blue.fits.gz +0 -0
  95. pyreduce/masks/mask_harps_red.fits.gz +0 -0
  96. pyreduce/masks/mask_hds_blue.fits.gz +0 -0
  97. pyreduce/masks/mask_hds_red.fits.gz +0 -0
  98. pyreduce/masks/mask_het_hrs_2x5.fits.gz +0 -0
  99. pyreduce/masks/mask_jwst_miri_lrs_slitless.fits.gz +0 -0
  100. pyreduce/masks/mask_jwst_niriss_gr700xd.fits.gz +0 -0
  101. pyreduce/masks/mask_lick_apf_.fits.gz +0 -0
  102. pyreduce/masks/mask_mcdonald.fits.gz +0 -0
  103. pyreduce/masks/mask_nes.fits.gz +0 -0
  104. pyreduce/masks/mask_nirspec_nirspec.fits.gz +0 -0
  105. pyreduce/masks/mask_sarg.fits.gz +0 -0
  106. pyreduce/masks/mask_sarg_2x2a.fits.gz +0 -0
  107. pyreduce/masks/mask_sarg_2x2b.fits.gz +0 -0
  108. pyreduce/masks/mask_subaru_hds_red.fits.gz +0 -0
  109. pyreduce/masks/mask_uves_blue.fits.gz +0 -0
  110. pyreduce/masks/mask_uves_blue_binned_2_2.fits.gz +0 -0
  111. pyreduce/masks/mask_uves_middle.fits.gz +0 -0
  112. pyreduce/masks/mask_uves_middle_2x2_split.fits.gz +0 -0
  113. pyreduce/masks/mask_uves_middle_binned_2_2.fits.gz +0 -0
  114. pyreduce/masks/mask_uves_red.fits.gz +0 -0
  115. pyreduce/masks/mask_uves_red_2x2.fits.gz +0 -0
  116. pyreduce/masks/mask_uves_red_2x2_split.fits.gz +0 -0
  117. pyreduce/masks/mask_uves_red_binned_2_2.fits.gz +0 -0
  118. pyreduce/masks/mask_xshooter_nir.fits.gz +0 -0
  119. pyreduce/pipeline.py +619 -0
  120. pyreduce/rectify.py +138 -0
  121. pyreduce/reduce.py +2065 -0
  122. pyreduce/settings/settings_AJ.json +19 -0
  123. pyreduce/settings/settings_ANDES.json +89 -0
  124. pyreduce/settings/settings_CRIRES_PLUS.json +89 -0
  125. pyreduce/settings/settings_HARPN.json +73 -0
  126. pyreduce/settings/settings_HARPS.json +69 -0
  127. pyreduce/settings/settings_JWST_MIRI.json +55 -0
  128. pyreduce/settings/settings_JWST_NIRISS.json +55 -0
  129. pyreduce/settings/settings_LICK_APF.json +62 -0
  130. pyreduce/settings/settings_MCDONALD.json +58 -0
  131. pyreduce/settings/settings_METIS_IFU.json +77 -0
  132. pyreduce/settings/settings_METIS_LSS.json +77 -0
  133. pyreduce/settings/settings_MICADO.json +78 -0
  134. pyreduce/settings/settings_NEID.json +73 -0
  135. pyreduce/settings/settings_NIRSPEC.json +58 -0
  136. pyreduce/settings/settings_NTE.json +60 -0
  137. pyreduce/settings/settings_UVES.json +54 -0
  138. pyreduce/settings/settings_XSHOOTER.json +78 -0
  139. pyreduce/settings/settings_pyreduce.json +184 -0
  140. pyreduce/settings/settings_schema.json +850 -0
  141. pyreduce/tools/__init__.py +0 -0
  142. pyreduce/tools/combine.py +117 -0
  143. pyreduce/trace.py +979 -0
  144. pyreduce/util.py +1366 -0
  145. pyreduce/wavecal/MICADO_HK_3arcsec_chip5.npz +0 -0
  146. pyreduce/wavecal/atlas/thar.fits +4946 -13
  147. pyreduce/wavecal/atlas/thar_list.txt +4172 -0
  148. pyreduce/wavecal/atlas/une.fits +0 -0
  149. pyreduce/wavecal/convert.py +38 -0
  150. pyreduce/wavecal/crires_plus_J1228_Open_det1.npz +0 -0
  151. pyreduce/wavecal/crires_plus_J1228_Open_det2.npz +0 -0
  152. pyreduce/wavecal/crires_plus_J1228_Open_det3.npz +0 -0
  153. pyreduce/wavecal/harpn_harpn_2D.npz +0 -0
  154. pyreduce/wavecal/harps_blue_2D.npz +0 -0
  155. pyreduce/wavecal/harps_blue_pol_2D.npz +0 -0
  156. pyreduce/wavecal/harps_red_2D.npz +0 -0
  157. pyreduce/wavecal/harps_red_pol_2D.npz +0 -0
  158. pyreduce/wavecal/mcdonald.npz +0 -0
  159. pyreduce/wavecal/metis_lss_l_2D.npz +0 -0
  160. pyreduce/wavecal/metis_lss_m_2D.npz +0 -0
  161. pyreduce/wavecal/nirspec_K2.npz +0 -0
  162. pyreduce/wavecal/uves_blue_360nm_2D.npz +0 -0
  163. pyreduce/wavecal/uves_blue_390nm_2D.npz +0 -0
  164. pyreduce/wavecal/uves_blue_437nm_2D.npz +0 -0
  165. pyreduce/wavecal/uves_middle_2x2_2D.npz +0 -0
  166. pyreduce/wavecal/uves_middle_565nm_2D.npz +0 -0
  167. pyreduce/wavecal/uves_middle_580nm_2D.npz +0 -0
  168. pyreduce/wavecal/uves_middle_600nm_2D.npz +0 -0
  169. pyreduce/wavecal/uves_middle_665nm_2D.npz +0 -0
  170. pyreduce/wavecal/uves_middle_860nm_2D.npz +0 -0
  171. pyreduce/wavecal/uves_red_580nm_2D.npz +0 -0
  172. pyreduce/wavecal/uves_red_600nm_2D.npz +0 -0
  173. pyreduce/wavecal/uves_red_665nm_2D.npz +0 -0
  174. pyreduce/wavecal/uves_red_760nm_2D.npz +0 -0
  175. pyreduce/wavecal/uves_red_860nm_2D.npz +0 -0
  176. pyreduce/wavecal/xshooter_nir.npz +0 -0
  177. pyreduce/wavelength_calibration.py +1871 -0
  178. pyreduce_astro-0.7a4.dist-info/METADATA +106 -0
  179. pyreduce_astro-0.7a4.dist-info/RECORD +182 -0
  180. pyreduce_astro-0.7a4.dist-info/WHEEL +4 -0
  181. pyreduce_astro-0.7a4.dist-info/entry_points.txt +2 -0
  182. pyreduce_astro-0.7a4.dist-info/licenses/LICENSE +674 -0
@@ -0,0 +1,1871 @@
1
+ """
2
+ Wavelength Calibration
3
+ by comparison to a reference spectrum
4
+ Loosely bases on the IDL wavecal function
5
+ """
6
+
7
+ import logging
8
+ from os.path import dirname, join
9
+
10
+ import corner
11
+ import emcee
12
+ import matplotlib.pyplot as plt
13
+ import numpy as np
14
+ from astropy.io import fits
15
+ from numpy.polynomial.polynomial import Polynomial, polyval2d
16
+ from scipy import signal
17
+ from scipy.constants import speed_of_light
18
+ from scipy.interpolate import interp1d
19
+ from scipy.ndimage.filters import gaussian_filter1d
20
+ from scipy.ndimage.morphology import grey_closing
21
+ from scipy.optimize import curve_fit
22
+ from tqdm import tqdm
23
+
24
+ from . import util
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ def polyfit(x, y, deg):
30
+ res = Polynomial.fit(x, y, deg, domain=[])
31
+ coef = res.coef[::-1]
32
+ return coef
33
+
34
+
35
+ class AlignmentPlot:
36
+ """
37
+ Makes a plot which can be clicked to align the two spectra, reference and observed
38
+ """
39
+
40
+ def __init__(self, ax, obs, lines, offset=(0, 0), plot_title=None):
41
+ self.im = ax
42
+ self.first = True
43
+ self.nord, self.ncol = obs.shape
44
+ self.RED, self.GREEN, self.BLUE = 0, 1, 2
45
+
46
+ self.obs = obs
47
+ self.lines = lines
48
+ self.plot_title = plot_title
49
+
50
+ self.order_first = 0
51
+ self.spec_first = ""
52
+ self.x_first = 0
53
+ self.offset = list(offset)
54
+
55
+ self.make_ref_image()
56
+
57
+ def make_ref_image(self):
58
+ """create and show the reference plot, with the two spectra"""
59
+ ref_image = np.zeros((self.nord * 2, self.ncol, 3))
60
+ for iord in range(self.nord):
61
+ ref_image[iord * 2, :, self.RED] = 10 * np.ma.filled(self.obs[iord], 0)
62
+ if 0 <= iord + self.offset[0] < self.nord:
63
+ for line in self.lines[self.lines["order"] == iord]:
64
+ first = int(np.clip(line["xfirst"] + self.offset[1], 0, self.ncol))
65
+ last = int(np.clip(line["xlast"] + self.offset[1], 0, self.ncol))
66
+ order = (iord + self.offset[0]) * 2 + 1
67
+ ref_image[order, first:last, self.GREEN] = (
68
+ 10
69
+ * line["height"]
70
+ * signal.windows.gaussian(last - first, line["width"])
71
+ )
72
+ ref_image = np.clip(ref_image, 0, 1)
73
+ ref_image[ref_image < 0.1] = 0
74
+
75
+ self.im.imshow(
76
+ ref_image,
77
+ aspect="auto",
78
+ origin="lower",
79
+ extent=(-0.5, self.ncol - 0.5, -0.5, self.nord - 0.5),
80
+ )
81
+ title = "Alignment, Observed: RED, Reference: GREEN\nGreen should be above red!"
82
+ if self.plot_title is not None:
83
+ title = f"{self.plot_title}\n{title}"
84
+ self.im.figure.suptitle(title)
85
+ self.im.axes.set_xlabel("x [pixel]")
86
+ self.im.axes.set_ylabel("Order")
87
+
88
+ self.im.figure.canvas.draw()
89
+
90
+ def connect(self):
91
+ """connect the click event with the appropiate function"""
92
+ self.cidclick = self.im.figure.canvas.mpl_connect(
93
+ "button_press_event", self.on_click
94
+ )
95
+
96
+ def on_click(self, event):
97
+ """On click offset the reference by the distance between click positions"""
98
+ if event.ydata is None:
99
+ return
100
+ order = int(np.floor(event.ydata))
101
+ spec = "ref" if (event.ydata - order) > 0.5 else "obs" # if True then reference
102
+ x = event.xdata
103
+ print("Order: %i, Spectrum: %s, x: %g" % (order, "ref" if spec else "obs", x))
104
+
105
+ # on every second click
106
+ if self.first:
107
+ self.first = False
108
+ self.order_first = order
109
+ self.spec_first = spec
110
+ self.x_first = x
111
+ else:
112
+ # Clicked different spectra
113
+ if spec != self.spec_first:
114
+ self.first = True
115
+ direction = -1 if spec == "ref" else 1
116
+ offset_orders = int(order - self.order_first) * direction
117
+ offset_x = int(x - self.x_first) * direction
118
+ self.offset[0] -= offset_orders - 1
119
+ self.offset[1] -= offset_x
120
+ self.make_ref_image()
121
+
122
+
123
+ class LineAtlas:
124
+ def __init__(self, element, medium="vac"):
125
+ self.element = element
126
+ self.medium = medium
127
+
128
+ fname = element.lower() + ".fits"
129
+ folder = dirname(__file__)
130
+ self.fname = join(folder, "wavecal/atlas", fname)
131
+ self.wave, self.flux = self.load_fits(self.fname)
132
+
133
+ try:
134
+ # If a specific linelist file is provided
135
+ fname_list = element.lower() + "_list.txt"
136
+ self.fname_list = join(folder, "wavecal/atlas", fname_list)
137
+ linelist = np.genfromtxt(self.fname_list, dtype="f8,U8")
138
+ wpos, element = linelist["f0"], linelist["f1"]
139
+ indices = self.wave.searchsorted(wpos)
140
+ heights = self.flux[indices]
141
+ self.linelist = np.rec.fromarrays(
142
+ [wpos, heights, element], names=["wave", "heights", "element"]
143
+ )
144
+ except (OSError, FileNotFoundError):
145
+ # Otherwise fit the line positions from the spectrum
146
+ logger.warning(
147
+ "No dedicated linelist found for %s, determining peaks based on the reference spectrum instead.",
148
+ element,
149
+ )
150
+ module = WavelengthCalibration(plot=False)
151
+ n, peaks = module._find_peaks(self.flux)
152
+ wpos = np.interp(peaks, np.arange(len(self.wave)), self.wave)
153
+ element = np.full(len(wpos), element)
154
+ indices = self.wave.searchsorted(wpos)
155
+ heights = self.flux[indices]
156
+ self.linelist = np.rec.fromarrays(
157
+ [wpos, heights, element], names=["wave", "heights", "element"]
158
+ )
159
+
160
+ # The data files are in vaccuum, if the instrument is in air, we need to convert
161
+ if medium == "air":
162
+ self.wave = util.vac2air(self.wave)
163
+ self.linelist["wave"] = util.vac2air(self.linelist["wave"])
164
+
165
+ def load_fits(self, fname):
166
+ with fits.open(fname, memmap=False) as hdu:
167
+ if len(hdu) == 1:
168
+ # Its just the spectrum
169
+ # with the wavelength defined via the header keywords
170
+ header = hdu[0].header
171
+ spec = hdu[0].data.ravel()
172
+ wmin = header["CRVAL1"]
173
+ wdel = header["CDELT1"]
174
+ wave = np.arange(spec.size) * wdel + wmin
175
+ else:
176
+ # Its a binary Table, with two columns for the wavelength and the
177
+ # spectrum
178
+ data = hdu[1].data
179
+ wave = data["wave"]
180
+ spec = data["spec"]
181
+
182
+ spec /= np.nanmax(spec)
183
+ spec = np.clip(spec, 0, None)
184
+ return wave, spec
185
+
186
+
187
+ class LineList:
188
+ dtype = np.dtype(
189
+ (
190
+ np.record,
191
+ [
192
+ (("wlc", "WLC"), ">f8"), # Wavelength (before fit)
193
+ (("wll", "WLL"), ">f8"), # Wavelength (after fit)
194
+ (("posc", "POSC"), ">f8"), # Pixel Position (before fit)
195
+ (("posm", "POSM"), ">f8"), # Pixel Position (after fit)
196
+ (("xfirst", "XFIRST"), ">i2"), # first pixel of the line
197
+ (("xlast", "XLAST"), ">i2"), # last pixel of the line
198
+ (
199
+ ("approx", "APPROX"),
200
+ "O",
201
+ ), # Not used. Describes the shape used to approximate the line. "G" for Gaussian
202
+ (("width", "WIDTH"), ">f8"), # width of the line in pixels
203
+ (("height", "HEIGHT"), ">f8"), # relative strength of the line
204
+ (("order", "ORDER"), ">i2"), # echelle order the line is found in
205
+ ("flag", "?"), # flag that tells us if we should use the line or not
206
+ ],
207
+ )
208
+ )
209
+
210
+ def __init__(self, lines=None):
211
+ if lines is None:
212
+ lines = np.array([], dtype=self.dtype)
213
+ self.data = lines
214
+ self.dtype = self.data.dtype
215
+
216
+ def __getitem__(self, key):
217
+ return self.data[key]
218
+
219
+ def __setitem__(self, key, value):
220
+ self.data[key] = value
221
+
222
+ def __len__(self):
223
+ return len(self.data)
224
+
225
+ @classmethod
226
+ def load(cls, filename):
227
+ data = np.load(filename, allow_pickle=True)
228
+ linelist = cls(data["cs_lines"])
229
+ return linelist
230
+
231
+ def save(self, filename):
232
+ np.savez(filename, cs_lines=self.data)
233
+
234
+ def append(self, linelist):
235
+ if isinstance(linelist, LineList):
236
+ linelist = linelist.data
237
+ self.data = np.append(self.data, linelist)
238
+
239
+ def add_line(self, wave, order, pos, width, height, flag):
240
+ lines = self.from_list([wave], [order], [pos], [width], [height], [flag])
241
+ self.data = np.append(self.data, lines)
242
+
243
+ @classmethod
244
+ def from_list(cls, wave, order, pos, width, height, flag):
245
+ lines = [
246
+ (w, w, p, p, p - wi / 2, p + wi / 2, b"G", wi, h, o, f)
247
+ for w, o, p, wi, h, f in zip(
248
+ wave, order, pos, width, height, flag, strict=False
249
+ )
250
+ ]
251
+ lines = np.array(lines, dtype=cls.dtype)
252
+ return cls(lines)
253
+
254
+
255
+ class WavelengthCalibration:
256
+ """
257
+ Wavelength Calibration Module
258
+
259
+ Takes an observed wavelength image and the reference linelist
260
+ and returns the wavelength at each pixel
261
+ """
262
+
263
+ def __init__(
264
+ self,
265
+ threshold=100,
266
+ degree=(6, 6),
267
+ iterations=3,
268
+ dimensionality="2D",
269
+ nstep=0,
270
+ correlate_cols=0,
271
+ shift_window=0.01,
272
+ manual=False,
273
+ polarim=False,
274
+ lfc_peak_width=3,
275
+ closing=5,
276
+ element=None,
277
+ medium="vac",
278
+ plot=True,
279
+ plot_title=None,
280
+ ):
281
+ #:float: Residual threshold in m/s above which to remove lines
282
+ self.threshold = threshold
283
+ #:tuple(int, int): polynomial degree of the wavelength fit in (pixel, order) direction
284
+ self.degree = degree
285
+ if dimensionality == "1D":
286
+ self.degree = int(degree)
287
+ elif dimensionality == "2D":
288
+ self.degree = (int(degree[0]), int(degree[1]))
289
+ #:int: Number of iterations in the remove residuals, auto id, loop
290
+ self.iterations = iterations
291
+ #:{"1D", "2D"}: Whether to use 1d or 2d fit
292
+ self.dimensionality = dimensionality
293
+ #:bool: Whether to fit for pixel steps (offsets) in the detector
294
+ self.nstep = nstep
295
+ #:int: How many columns to use in the 2D cross correlation alignment. 0 means all pixels (slow).
296
+ self.correlate_cols = correlate_cols
297
+ #:float: Fraction if the number of columns to use in the alignment of individual orders. Set to 0 to disable
298
+ self.shift_window = shift_window
299
+ #:bool: Whether to manually align the reference instead of using cross correlation
300
+ self.manual = manual
301
+ #:bool: Whether to use polarimetric orders instead of the usual ones. I.e. Each pair of two orders represents the same data. Not Supported yet
302
+ self.polarim = polarim
303
+ #:int: Whether to plot the results. Set to 2 to plot during all steps.
304
+ self.plot = plot
305
+ self.plot_title = plot_title
306
+ #:str: Elements used in the wavelength calibration. Used in AutoId to find more lines from the Atlas
307
+ self.element = element
308
+ #:str: Medium of the detector, vac or air
309
+ self.medium = medium
310
+ #:int: Laser Frequency Peak width (for scipy.signal.find_peaks)
311
+ self.lfc_peak_width = lfc_peak_width
312
+ #:int: grey closing range for the input image
313
+ self.closing = 5
314
+ #:int: Number of orders in the observation
315
+ self.nord = None
316
+ #:int: Number of columns in the observation
317
+ self.ncol = None
318
+
319
+ @property
320
+ def step_mode(self):
321
+ return self.nstep > 0
322
+
323
+ @property
324
+ def dimensionality(self):
325
+ """{"1D", "2D"}: Whether to use 1D or 2D polynomials for the wavelength solution"""
326
+ return self._dimensionality
327
+
328
+ @dimensionality.setter
329
+ def dimensionality(self, value):
330
+ accepted_values = ["1D", "2D"]
331
+ if value in accepted_values:
332
+ self._dimensionality = value
333
+ else:
334
+ raise ValueError(
335
+ f"Value for 'dimensionality' not understood. Expected one of {accepted_values} but got {value} instead"
336
+ )
337
+
338
+ def normalize(self, obs, lines):
339
+ """
340
+ Normalize the observation and reference list in each order individually
341
+ Copies the data if the image, but not of the linelist
342
+
343
+ Parameters
344
+ ----------
345
+ obs : array of shape (nord, ncol)
346
+ observed image
347
+ lines : recarray of shape (nlines,)
348
+ reference linelist
349
+
350
+ Returns
351
+ -------
352
+ obs : array of shape (nord, ncol)
353
+ normalized image
354
+ lines : recarray of shape (nlines,)
355
+ normalized reference linelist
356
+ """
357
+ # normalize order by order
358
+ obs = np.ma.copy(obs)
359
+ for i in range(len(obs)):
360
+ if self.closing > 0:
361
+ obs[i] = grey_closing(obs[i], self.closing)
362
+ try:
363
+ obs[i] -= np.ma.median(obs[i][obs[i] > 0])
364
+ except ValueError:
365
+ logger.warning(
366
+ "Could not determine the minimum value in order %i. No positive values found",
367
+ i,
368
+ )
369
+ obs[i] /= np.ma.max(obs[i])
370
+
371
+ # Remove negative outliers
372
+ std = np.std(obs, axis=1)[:, None]
373
+ obs[obs <= -2 * std] = np.ma.masked
374
+ # obs[obs <= 0] = np.ma.masked
375
+
376
+ # Normalize lines in each order
377
+ for order in np.unique(lines["order"]):
378
+ select = lines["order"] == order
379
+ topheight = np.max(lines[select]["height"])
380
+ lines["height"][select] /= topheight
381
+
382
+ return obs, lines
383
+
384
+ def create_image_from_lines(self, lines):
385
+ """
386
+ Create a reference image based on a line list
387
+ Each line will be approximated by a Gaussian
388
+ Space inbetween lines is 0
389
+ The number of orders is from 0 to the maximum order
390
+
391
+ Parameters
392
+ ----------
393
+ lines : recarray of shape (nlines,)
394
+ line data
395
+
396
+ Returns
397
+ -------
398
+ img : array of shape (nord, ncol)
399
+ New reference image
400
+ """
401
+ min_order = int(np.min(lines["order"]))
402
+ max_order = int(np.max(lines["order"]))
403
+ img = np.zeros((max_order - min_order + 1, self.ncol))
404
+ for line in lines:
405
+ if line["order"] < 0:
406
+ continue
407
+ if line["xlast"] < 0 or line["xfirst"] > self.ncol:
408
+ continue
409
+ first = int(max(line["xfirst"], 0))
410
+ last = int(min(line["xlast"], self.ncol))
411
+ img[int(line["order"]) - min_order, first:last] = line[
412
+ "height"
413
+ ] * signal.windows.gaussian(last - first, line["width"])
414
+ return img
415
+
416
+ def align_manual(self, obs, lines):
417
+ """
418
+ Open an AlignmentPlot window for manual selection of the alignment
419
+
420
+ Parameters
421
+ ----------
422
+ obs : array of shape (nord, ncol)
423
+ observed image
424
+ lines : recarray of shape (nlines,)
425
+ reference linelist
426
+
427
+ Returns
428
+ -------
429
+ offset : tuple(int, int)
430
+ offset in order and column to be applied to each line in the linelist
431
+ """
432
+ _, ax = plt.subplots()
433
+ ap = AlignmentPlot(ax, obs, lines, plot_title=self.plot_title)
434
+ ap.connect()
435
+ util.show_or_save("wavecal_alignment")
436
+ offset = ap.offset
437
+ return offset
438
+
439
+ def apply_alignment_offset(self, lines, offset, select=None):
440
+ """
441
+ Apply an offset to the linelist
442
+
443
+ Parameters
444
+ ----------
445
+ lines : recarray of shape (nlines,)
446
+ reference linelist
447
+ offset : tuple(int, int)
448
+ offset in (order, column)
449
+ select : array of shape(nlines,), optional
450
+ Mask that defines which lines the offset applies to
451
+
452
+ Returns
453
+ -------
454
+ lines : recarray of shape (nlines,)
455
+ linelist with offset applied
456
+ """
457
+ if select is None:
458
+ select = slice(None)
459
+ lines["xfirst"][select] += offset[1]
460
+ lines["xlast"][select] += offset[1]
461
+ lines["posm"][select] += offset[1]
462
+ lines["order"][select] += offset[0]
463
+ return lines
464
+
465
+ def align(self, obs, lines):
466
+ """
467
+ Align the observation with the reference spectrum
468
+ Either automatically using cross correlation or manually (visually)
469
+
470
+ Parameters
471
+ ----------
472
+ obs : array[nrow, ncol]
473
+ observed wavelength calibration spectrum (e.g. obs=ThoriumArgon)
474
+ lines : struct_array
475
+ reference line data
476
+ manual : bool, optional
477
+ wether to manually align the spectra (default: False)
478
+ plot : bool, optional
479
+ wether to plot the alignment (default: False)
480
+
481
+ Returns
482
+ -------
483
+ offset: tuple(int, int)
484
+ offset in order and column
485
+ """
486
+ obs = np.ma.filled(obs, 0)
487
+
488
+ if not self.manual:
489
+ # make image from lines
490
+ img = self.create_image_from_lines(lines)
491
+
492
+ # Crop the image to speed up cross correlation
493
+ if self.correlate_cols != 0:
494
+ _slice = slice(
495
+ (self.ncol - self.correlate_cols) // 2,
496
+ (self.ncol + self.correlate_cols) // 2 + 1,
497
+ )
498
+ ccimg = img[:, _slice]
499
+ ccobs = obs[:, _slice]
500
+ else:
501
+ ccimg, ccobs = img, obs
502
+
503
+ # Cross correlate with obs image
504
+ # And determine overall offset
505
+ correlation = signal.correlate2d(ccobs, ccimg, mode="same")
506
+ offset_order, offset_x = np.unravel_index(
507
+ np.argmax(correlation), correlation.shape
508
+ )
509
+
510
+ if self.plot >= 2:
511
+ plt.imshow(correlation, aspect="auto")
512
+ plt.vlines(offset_x, -0.5, correlation.shape[0] - 0.5, color="red")
513
+ plt.hlines(offset_order, -0.5, correlation.shape[1] - 0.5, color="red")
514
+ if self.plot_title is not None:
515
+ plt.title(self.plot_title)
516
+ util.show_or_save("wavecal_correlation")
517
+
518
+ offset_order = offset_order - ccimg.shape[0] / 2 + 1
519
+ offset_x = offset_x - ccimg.shape[1] / 2 + 1
520
+ offset = [int(offset_order), int(offset_x)]
521
+
522
+ # apply offset
523
+ lines = self.apply_alignment_offset(lines, offset)
524
+
525
+ if self.shift_window != 0:
526
+ # Shift individual orders to fit reference
527
+ # Only allow a small shift here (1%) ?
528
+ img = self.create_image_from_lines(lines)
529
+ for i in range(max(offset[0], 0), min(len(obs), len(img))):
530
+ correlation = signal.correlate(obs[i], img[i], mode="same")
531
+ width = int(self.ncol * self.shift_window) // 2
532
+ low, high = self.ncol // 2 - width, self.ncol // 2 + width
533
+ offset_x = np.argmax(correlation[low:high]) + low
534
+ offset_x = int(offset_x - self.ncol / 2 + 1)
535
+
536
+ select = lines["order"] == i
537
+ lines = self.apply_alignment_offset(lines, (0, offset_x), select)
538
+
539
+ if self.plot or self.manual:
540
+ offset = self.align_manual(obs, lines)
541
+ lines = self.apply_alignment_offset(lines, offset)
542
+
543
+ logger.debug(f"Offset order: {offset[0]}, Offset pixel: {offset[1]}")
544
+
545
+ return lines
546
+
547
+ def _fit_single_line(self, obs, center, width, plot=False):
548
+ low = int(center - width * 5)
549
+ low = max(low, 0)
550
+ high = int(center + width * 5)
551
+ high = min(high, len(obs))
552
+
553
+ section = obs[low:high]
554
+ x = np.arange(low, high, 1)
555
+ x = np.ma.masked_array(x, mask=np.ma.getmaskarray(section))
556
+ coef = util.gaussfit2(x, section)
557
+
558
+ if self.plot >= 2 and plot:
559
+ x2 = np.linspace(x.min(), x.max(), len(x) * 100)
560
+ plt.plot(x, section, label="Observation")
561
+ plt.plot(x2, util.gaussval2(x2, *coef), label="Fit")
562
+ title = "Gaussian Fit to spectral line"
563
+ if self.plot_title is not None:
564
+ title = f"{self.plot_title}\n{title}"
565
+ plt.title(title)
566
+ plt.xlabel("x [pixel]")
567
+ plt.ylabel("Intensity [a.u.]")
568
+ plt.legend()
569
+ util.show_or_save("wavecal_line_fit")
570
+ return coef
571
+
572
+ def fit_lines(self, obs, lines):
573
+ """
574
+ Determine exact position of each line on the detector based on initial guess
575
+
576
+ This fits a Gaussian to each line, and uses the peak position as a new solution
577
+
578
+ Parameters
579
+ ----------
580
+ obs : array of shape (nord, ncol)
581
+ observed wavelength calibration image
582
+ lines : recarray of shape (nlines,)
583
+ reference line data
584
+
585
+ Returns
586
+ -------
587
+ lines : recarray of shape (nlines,)
588
+ Updated line information (posm is changed)
589
+ """
590
+ # For each line fit a gaussian to the observation
591
+ for i, line in tqdm(
592
+ enumerate(lines), total=len(lines), leave=False, desc="Lines"
593
+ ):
594
+ if line["posm"] < 0 or line["posm"] >= obs.shape[1]:
595
+ # Line outside pixel range
596
+ continue
597
+ if line["order"] < 0 or line["order"] >= len(obs):
598
+ # Line outside order range
599
+ continue
600
+
601
+ try:
602
+ coef = self._fit_single_line(
603
+ obs[int(line["order"])],
604
+ line["posm"],
605
+ line["width"],
606
+ plot=line["flag"],
607
+ )
608
+ lines[i]["posm"] = coef[1]
609
+ except:
610
+ # Gaussian fit failed, dont use line
611
+ lines[i]["flag"] = False
612
+
613
+ return lines
614
+
615
+ def build_2d_solution(self, lines, plot=False):
616
+ """
617
+ Create a 2D polynomial fit to flagged lines.
618
+
619
+ Parameters
620
+ ----------
621
+ lines : struc_array
622
+ line data
623
+ plot : bool, optional
624
+ whether to plot the solution (default: False)
625
+
626
+ Returns
627
+ -------
628
+ coef : array[degree_x, degree_y]
629
+ 2d polynomial coefficients
630
+ """
631
+
632
+ if self.step_mode:
633
+ return self.build_step_solution(lines, plot=plot)
634
+
635
+ # Only use flagged data
636
+ mask = lines["flag"] # True: use line, False: dont use line
637
+ m_wave = lines["wll"][mask]
638
+ m_pix = lines["posm"][mask]
639
+ m_ord = lines["order"][mask]
640
+
641
+ if self.dimensionality == "1D":
642
+ nord = self.nord
643
+ coef = np.zeros((nord, self.degree + 1))
644
+ for i in range(nord):
645
+ select = m_ord == i
646
+ if np.count_nonzero(select) < 2:
647
+ # Not enough lines for wavelength solution
648
+ logger.warning(
649
+ "Not enough valid lines found wavelength calibration in order % i",
650
+ i,
651
+ )
652
+ coef[i] = np.nan
653
+ continue
654
+
655
+ deg = max(min(self.degree, np.count_nonzero(select) - 2), 0)
656
+ coef[i, -(deg + 1) :] = np.polyfit(
657
+ m_pix[select], m_wave[select], deg=deg
658
+ )
659
+ elif self.dimensionality == "2D":
660
+ # 2d polynomial fit with: x = column, y = order, z = wavelength
661
+ coef = util.polyfit2d(m_pix, m_ord, m_wave, degree=self.degree, plot=False)
662
+ else:
663
+ raise ValueError(
664
+ f"Parameter 'mode' not understood. Expected '1D' or '2D' but got {self.dimensionality}"
665
+ )
666
+
667
+ if plot or self.plot >= 2: # pragma: no cover
668
+ self.plot_residuals(lines, coef, title="Residuals")
669
+
670
+ return coef
671
+
672
+ def g(self, x, step_coef_pos, step_coef_diff):
673
+ try:
674
+ bins = step_coef_pos
675
+ digits = np.digitize(x, bins) - 1
676
+ except ValueError:
677
+ return np.inf
678
+
679
+ cumsum = np.cumsum(step_coef_diff)
680
+ x = x + cumsum[digits]
681
+ return x
682
+
683
+ def f(self, x, poly_coef, step_coef_pos, step_coef_diff):
684
+ xdash = self.g(x, step_coef_pos, step_coef_diff)
685
+ if np.all(np.isinf(xdash)):
686
+ return np.inf
687
+ y = np.polyval(poly_coef, xdash)
688
+ return y
689
+
690
+ def build_step_solution(self, lines, plot=False):
691
+ """
692
+ Fit the least squares fit to the wavelength points,
693
+ with additional free parameters for detector gaps, e.g. due to stitching.
694
+
695
+ The exact method of the fit depends on the dimensionality.
696
+ Either way we are using the usual polynomial fit for the wavelength, but
697
+ the x points are modified beforehand by shifting them some amount, at specific
698
+ indices. We assume that the stitching effects are distributed evenly and we know how
699
+ many steps we expect (this is set as "nstep").
700
+
701
+ Parameters
702
+ ----------
703
+ lines : np.recarray
704
+ linedata
705
+ plot : bool, optional
706
+ whether to plot results or not, by default False
707
+
708
+ Returns
709
+ -------
710
+ coef
711
+ coefficients of the best fit
712
+ """
713
+ mask = lines["flag"] # True: use line, False: dont use line
714
+ m_wave = lines["wll"][mask]
715
+ m_pix = lines["posm"][mask]
716
+ m_ord = lines["order"][mask]
717
+
718
+ nstep = self.nstep
719
+ ncol = self.ncol
720
+
721
+ if self.dimensionality == "1D":
722
+ coef = {}
723
+ for order in np.unique(m_ord):
724
+ select = m_ord == order
725
+ x = xl = m_pix[select]
726
+ y = m_wave[select]
727
+ step_coef = np.zeros((nstep, 2))
728
+ step_coef[:, 0] = np.linspace(ncol / (nstep + 1), ncol, nstep + 1)[:-1]
729
+
730
+ def func(x, *param):
731
+ return self.f(x, poly_coef, step_coef[:, 0], param) # noqa: B023
732
+
733
+ for _ in range(5):
734
+ poly_coef = np.polyfit(xl, y, self.degree)
735
+ res, _ = curve_fit(func, x, y, p0=step_coef[:, 1], bounds=[-1, 1])
736
+ step_coef[:, 1] = res
737
+ xl = self.g(x, step_coef[:, 0], step_coef[:, 1])
738
+
739
+ coef[order] = [poly_coef, step_coef]
740
+ elif self.dimensionality == "2D":
741
+ unique = np.unique(m_ord)
742
+ nord = len(unique)
743
+ shape = (self.degree[0] + 1, self.degree[1] + 1)
744
+ np.prod(shape)
745
+
746
+ step_coef = np.zeros((nord, nstep, 2))
747
+ step_coef[:, :, 0] = np.linspace(ncol / (nstep + 1), ncol, nstep + 1)[:-1]
748
+
749
+ def func(x, *param):
750
+ x, y = x[: len(x) // 2], x[len(x) // 2 :]
751
+ theta = np.asarray(param).reshape((nord, nstep))
752
+ xl = np.copy(x)
753
+ for j, i in enumerate(unique):
754
+ xl[y == i] = self.g(x[y == i], step_coef[j, :, 0], theta[j])
755
+ z = polyval2d(xl, y, poly_coef)
756
+ return z
757
+
758
+ # TODO: this could use some optimization
759
+ x = np.copy(m_pix)
760
+ x0 = np.concatenate((m_pix, m_ord))
761
+ resid_old = np.inf
762
+ for k in tqdm(range(5)):
763
+ poly_coef = util.polyfit2d(
764
+ x, m_ord, m_wave, degree=self.degree, plot=False
765
+ )
766
+
767
+ res, _ = curve_fit(func, x0, m_wave, p0=step_coef[:, :, 1])
768
+ step_coef[:, :, 1] = res.reshape((nord, nstep))
769
+ for j, i in enumerate(unique):
770
+ x[m_ord == i] = self.g(
771
+ m_pix[m_ord == i], step_coef[j][:, 0], step_coef[j][:, 1]
772
+ )
773
+
774
+ resid = polyval2d(x, m_ord, poly_coef) - m_wave
775
+ resid = np.sum(resid**2)
776
+ improvement = resid_old - resid
777
+ resid_old = resid
778
+ logger.info(
779
+ "Iteration: %i, Residuals: %.5g, Improvement: %.5g",
780
+ k,
781
+ resid,
782
+ improvement,
783
+ )
784
+
785
+ poly_coef = util.polyfit2d(x, m_ord, m_wave, degree=self.degree, plot=False)
786
+ step_coef = {i: step_coef[j] for j, i in enumerate(unique)}
787
+ coef = (poly_coef, step_coef)
788
+ else:
789
+ raise ValueError(
790
+ f"Parameter 'dimensionality' not understood. Expected '1D' or '2D' but got {self.dimensionality}"
791
+ )
792
+
793
+ return coef
794
+
795
+ def evaluate_step_solution(self, pos, order, solution):
796
+ if not np.array_equal(np.shape(pos), np.shape(order)):
797
+ raise ValueError("pos and order must have the same shape")
798
+ if self.dimensionality == "1D":
799
+ result = np.zeros(pos.shape)
800
+ for i in np.unique(order):
801
+ select = order == i
802
+ result[select] = self.f(
803
+ pos[select],
804
+ solution[i][0],
805
+ solution[i][1][:, 0],
806
+ solution[i][1][:, 1],
807
+ )
808
+ elif self.dimensionality == "2D":
809
+ poly_coef, step_coef = solution
810
+ pos = np.copy(pos)
811
+ for i in np.unique(order):
812
+ pos[order == i] = self.g(
813
+ pos[order == i], step_coef[i][:, 0], step_coef[i][:, 1]
814
+ )
815
+ result = polyval2d(pos, order, poly_coef)
816
+ else:
817
+ raise ValueError(
818
+ f"Parameter 'mode' not understood, expected '1D' or '2D' but got {self.dimensionality}"
819
+ )
820
+ return result
821
+
822
+ def evaluate_solution(self, pos, order, solution):
823
+ """
824
+ Evaluate the 1d or 2d wavelength solution at the given pixel positions and orders
825
+
826
+ Parameters
827
+ ----------
828
+ pos : array
829
+ pixel position on the detector (i.e. x axis)
830
+ order : array
831
+ order of each point
832
+ solution : array of shape (nord, ndegree) or (degree_x, degree_y)
833
+ polynomial coefficients. For mode=1D, one set of coefficients per order.
834
+ For mode=2D, the first dimension is for the positions and the second for the orders
835
+ mode : str, optional
836
+ Wether to interpret the solution as 1D or 2D polynomials, by default "1D"
837
+
838
+ Returns
839
+ -------
840
+ result: array
841
+ Evaluated polynomial
842
+
843
+ Raises
844
+ ------
845
+ ValueError
846
+ If pos and order have different shapes, or mode is of the wrong value
847
+ """
848
+ if not np.array_equal(np.shape(pos), np.shape(order)):
849
+ raise ValueError("pos and order must have the same shape")
850
+
851
+ if self.step_mode:
852
+ return self.evaluate_step_solution(pos, order, solution)
853
+
854
+ if self.dimensionality == "1D":
855
+ result = np.zeros(pos.shape)
856
+ for i in np.unique(order):
857
+ select = order == i
858
+ result[select] = np.polyval(solution[int(i)], pos[select])
859
+ elif self.dimensionality == "2D":
860
+ result = np.polynomial.polynomial.polyval2d(pos, order, solution)
861
+ else:
862
+ raise ValueError(
863
+ f"Parameter 'mode' not understood, expected '1D' or '2D' but got {self.dimensionality}"
864
+ )
865
+ return result
866
+
867
+ def make_wave(self, wave_solution, plot=False):
868
+ """Expand polynomial wavelength solution into full image
869
+
870
+ Parameters
871
+ ----------
872
+ wave_solution : array of shape(degree,)
873
+ polynomial coefficients of wavelength solution
874
+ plot : bool, optional
875
+ wether to plot the solution (default: False)
876
+
877
+ Returns
878
+ -------
879
+ wave_img : array of shape (nord, ncol)
880
+ wavelength solution for each point in the spectrum
881
+ """
882
+
883
+ y, x = np.indices((self.nord, self.ncol))
884
+ wave_img = self.evaluate_solution(x, y, wave_solution)
885
+
886
+ return wave_img
887
+
888
+ def auto_id(self, obs, wave_img, lines):
889
+ """Automatically identify peaks that are close to known lines
890
+
891
+ Parameters
892
+ ----------
893
+ obs : array of shape (nord, ncol)
894
+ observed spectrum
895
+ wave_img : array of shape (nord, ncol)
896
+ wavelength solution image
897
+ lines : struc_array
898
+ line data
899
+ threshold : int, optional
900
+ difference threshold between line positions in m/s, until which a line is considered identified (default: 1)
901
+ plot : bool, optional
902
+ wether to plot the new lines
903
+
904
+ Returns
905
+ -------
906
+ lines : struct_array
907
+ line data with new flags
908
+ """
909
+
910
+ new_lines = []
911
+ if self.atlas is not None:
912
+ # For each order, find the corresponding section in the Atlas
913
+ # Look for strong lines in the atlas and the spectrum that match in position
914
+ # Add new lines to the linelist
915
+ width_of_atlas_peaks = 3
916
+ for order in range(obs.shape[0]):
917
+ mask = ~np.ma.getmask(obs[order])
918
+ index_mask = np.arange(len(mask))[mask]
919
+ data_obs = obs[order, mask]
920
+ wave_obs = wave_img[order, mask]
921
+
922
+ threshold_of_peak_closeness = (
923
+ np.diff(wave_obs) / wave_obs[:-1] * speed_of_light
924
+ )
925
+ threshold_of_peak_closeness = np.max(threshold_of_peak_closeness)
926
+
927
+ wmin, wmax = wave_obs[0], wave_obs[-1]
928
+ imin, imax = np.searchsorted(self.atlas.wave, (wmin, wmax))
929
+ wave_atlas = self.atlas.wave[imin:imax]
930
+ data_atlas = self.atlas.flux[imin:imax]
931
+ if len(data_atlas) == 0:
932
+ continue
933
+ data_atlas = data_atlas / data_atlas.max()
934
+
935
+ line = lines[
936
+ (lines["order"] == order)
937
+ & (lines["wll"] > wmin)
938
+ & (lines["wll"] < wmax)
939
+ ]
940
+
941
+ peaks_atlas, peak_info_atlas = signal.find_peaks(
942
+ data_atlas, height=0.01, width=width_of_atlas_peaks
943
+ )
944
+ peaks_obs, peak_info_obs = signal.find_peaks(
945
+ data_obs, height=0.01, width=0
946
+ )
947
+
948
+ for _, p in enumerate(peaks_atlas):
949
+ # Look for an existing line in the vicinityq
950
+ wpeak = wave_atlas[p]
951
+ diff = np.abs(line["wll"] - wpeak) / wpeak * speed_of_light
952
+ if np.any(diff < threshold_of_peak_closeness):
953
+ # Line already in the linelist, ignore
954
+ continue
955
+ else:
956
+ # Look for matching peak in observation
957
+ diff = (
958
+ np.abs(wpeak - wave_obs[peaks_obs]) / wpeak * speed_of_light
959
+ )
960
+ imin = np.argmin(diff)
961
+
962
+ if diff[imin] < threshold_of_peak_closeness:
963
+ # Add line to linelist
964
+ # Location on the detector
965
+ # Include the masked areas!!!
966
+ ipeak = peaks_obs[imin]
967
+ ipeak = index_mask[ipeak]
968
+
969
+ # relative height of the peak
970
+ hpeak = data_obs[peaks_obs[imin]]
971
+ wipeak = peak_info_obs["widths"][imin]
972
+ # wave, order, pos, width, height, flag
973
+ new_lines.append([wpeak, order, ipeak, wipeak, hpeak, True])
974
+
975
+ # Add new lines to the linelist
976
+ if len(new_lines) != 0:
977
+ new_lines = np.array(new_lines).T
978
+ new_lines = LineList.from_list(*new_lines)
979
+ new_lines = self.fit_lines(obs, new_lines)
980
+ lines.append(new_lines)
981
+
982
+ # Option 1:
983
+ # Step 1: Loop over unused lines in lines
984
+ # Step 2: find peaks in neighbourhood
985
+ # Step 3: Toggle flag on if close
986
+ counter = 0
987
+ for i, line in enumerate(lines):
988
+ if line["flag"]:
989
+ # Line is already in use
990
+ continue
991
+ if line["order"] < 0 or line["order"] >= self.nord:
992
+ # Line outside order range
993
+ continue
994
+ iord = int(line["order"])
995
+ if line["wll"] < wave_img[iord][0] or line["wll"] >= wave_img[iord][-1]:
996
+ # Line outside pixel range
997
+ continue
998
+
999
+ wl = line["wll"]
1000
+ width = line["width"] * 5
1001
+ wave = wave_img[iord]
1002
+ order_obs = obs[iord]
1003
+ # Find where the line should be
1004
+ try:
1005
+ idx = np.digitize(wl, wave)
1006
+ except ValueError:
1007
+ # Wavelength solution is not monotonic
1008
+ idx = np.where(wave >= wl)[0][0]
1009
+
1010
+ low = int(idx - width)
1011
+ low = max(low, 0)
1012
+ high = int(idx + width)
1013
+ high = min(high, len(order_obs))
1014
+
1015
+ vec = order_obs[low:high]
1016
+ if np.all(np.ma.getmaskarray(vec)):
1017
+ continue
1018
+ # Find the best fitting peak
1019
+ # TODO use gaussian fit?
1020
+ peak_idx, _ = signal.find_peaks(vec, height=np.ma.median(vec), width=3)
1021
+ if len(peak_idx) > 0:
1022
+ peak_pos = np.copy(peak_idx).astype(float)
1023
+ for j in range(len(peak_idx)):
1024
+ try:
1025
+ coef = self._fit_single_line(vec, peak_idx[j], line["width"])
1026
+ peak_pos[j] = coef[1]
1027
+ except:
1028
+ peak_pos[j] = np.nan
1029
+ pass
1030
+
1031
+ pos_wave = np.interp(peak_pos, np.arange(high - low), wave[low:high])
1032
+ residual = np.abs(wl - pos_wave) / wl * speed_of_light
1033
+ idx = np.argmin(residual)
1034
+ if residual[idx] < self.threshold:
1035
+ counter += 1
1036
+ lines["flag"][i] = True
1037
+ lines["posm"][i] = low + peak_pos[idx]
1038
+
1039
+ logger.info("AutoID identified %i new lines", counter + len(new_lines))
1040
+
1041
+ return lines
1042
+
1043
+ def calculate_residual(self, wave_solution, lines):
1044
+ """
1045
+ Calculate all residuals of all given lines
1046
+
1047
+ Residual = (Wavelength Solution - Expected Wavelength) / Expected Wavelength * speed of light
1048
+
1049
+ Parameters
1050
+ ----------
1051
+ wave_solution : array of shape (degree_x, degree_y)
1052
+ polynomial coefficients of the wavelength solution (in numpy format)
1053
+ lines : recarray of shape (nlines,)
1054
+ contains the position of the line on the detector (posm), the order (order), and the expected wavelength (wll)
1055
+
1056
+ Returns
1057
+ -------
1058
+ residual : array of shape (nlines,)
1059
+ Residual of each line in m/s
1060
+ """
1061
+ x = lines["posm"]
1062
+ y = lines["order"]
1063
+ mask = ~lines["flag"]
1064
+
1065
+ solution = self.evaluate_solution(x, y, wave_solution)
1066
+
1067
+ residual = (solution - lines["wll"]) / lines["wll"] * speed_of_light
1068
+ residual = np.ma.masked_array(residual, mask=mask)
1069
+ return residual
1070
+
1071
+ def reject_outlier(self, residual, lines):
1072
+ """
1073
+ Reject the strongest outlier
1074
+
1075
+ Parameters
1076
+ ----------
1077
+ residual : array of shape (nlines,)
1078
+ residuals of all lines
1079
+ lines : recarray of shape (nlines,)
1080
+ line data
1081
+
1082
+ Returns
1083
+ -------
1084
+ lines : struct_array
1085
+ line data with one more flagged line
1086
+ residual : array of shape (nlines,)
1087
+ residuals of each line, with outliers masked (including the new one)
1088
+ """
1089
+
1090
+ # Strongest outlier
1091
+ ibad = np.ma.argmax(np.abs(residual))
1092
+ lines["flag"][ibad] = False
1093
+
1094
+ return lines
1095
+
1096
+ def reject_lines(self, lines, plot=False):
1097
+ """
1098
+ Reject the largest outlier one by one until all residuals are lower than the threshold
1099
+
1100
+ Parameters
1101
+ ----------
1102
+ lines : recarray of shape (nlines,)
1103
+ Line data with pixel position, and expected wavelength
1104
+ threshold : float, optional
1105
+ upper limit for the residual, by default 100
1106
+ degree : tuple, optional
1107
+ polynomial degree of the wavelength solution (pixel, column) (default: (6, 6))
1108
+ plot : bool, optional
1109
+ Wether to plot the results (default: False)
1110
+
1111
+ Returns
1112
+ -------
1113
+ lines : recarray of shape (nlines,)
1114
+ Line data with updated flags
1115
+ """
1116
+
1117
+ wave_solution = self.build_2d_solution(lines)
1118
+ residual = self.calculate_residual(wave_solution, lines)
1119
+ nbad = 0
1120
+ while np.ma.any(np.abs(residual) > self.threshold):
1121
+ lines = self.reject_outlier(residual, lines)
1122
+ wave_solution = self.build_2d_solution(lines)
1123
+ residual = self.calculate_residual(wave_solution, lines)
1124
+ nbad += 1
1125
+ logger.info("Discarding %i lines", nbad)
1126
+
1127
+ if plot or self.plot >= 2: # pragma: no cover
1128
+ mask = lines["flag"]
1129
+ _, axis = plt.subplots()
1130
+ axis.plot(lines["order"][mask], residual[mask], "X", label="Accepted Lines")
1131
+ axis.plot(
1132
+ lines["order"][~mask], residual[~mask], "D", label="Rejected Lines"
1133
+ )
1134
+ axis.set_xlabel("Order")
1135
+ axis.set_ylabel("Residual [m/s]")
1136
+ axis.set_title("Residuals versus order")
1137
+ axis.legend()
1138
+
1139
+ fig, ax = plt.subplots(
1140
+ nrows=self.nord // 2, ncols=2, sharex=True, squeeze=False
1141
+ )
1142
+ plt.subplots_adjust(hspace=0)
1143
+ fig.suptitle("Residuals of each order versus image columns")
1144
+
1145
+ for iord in range(self.nord):
1146
+ order_lines = lines[lines["order"] == iord]
1147
+ solution = self.evaluate_solution(
1148
+ order_lines["posm"], order_lines["order"], wave_solution
1149
+ )
1150
+ # Residual in m/s
1151
+ residual = (
1152
+ (solution - order_lines["wll"])
1153
+ / order_lines["wll"]
1154
+ * speed_of_light
1155
+ )
1156
+ mask = order_lines["flag"]
1157
+ ax[iord // 2, iord % 2].plot(
1158
+ order_lines["posm"][mask],
1159
+ residual[mask],
1160
+ "X",
1161
+ label="Accepted Lines",
1162
+ )
1163
+ ax[iord // 2, iord % 2].plot(
1164
+ order_lines["posm"][~mask],
1165
+ residual[~mask],
1166
+ "D",
1167
+ label="Rejected Lines",
1168
+ )
1169
+ # ax[iord // 2, iord % 2].tick_params(labelleft=False)
1170
+ ax[iord // 2, iord % 2].set_ylim(
1171
+ -self.threshold * 1.5, +self.threshold * 1.5
1172
+ )
1173
+
1174
+ ax[-1, 0].set_xlabel("x [pixel]")
1175
+ ax[-1, 1].set_xlabel("x [pixel]")
1176
+
1177
+ ax[0, 0].legend()
1178
+
1179
+ util.show_or_save("wavecal_reject_lines")
1180
+ return lines
1181
+
1182
+ def plot_results(self, wave_img, obs):
1183
+ plt.subplot(211)
1184
+ title = "Wavelength solution with Wavelength calibration spectrum\nOrders are in different colours"
1185
+ if self.plot_title is not None:
1186
+ title = f"{self.plot_title}\n{title}"
1187
+ plt.title(title)
1188
+ plt.xlabel("Wavelength")
1189
+ plt.ylabel("Observed spectrum")
1190
+ for i in range(self.nord):
1191
+ plt.plot(wave_img[i], obs[i], label="Order %i" % i)
1192
+
1193
+ plt.subplot(212)
1194
+ plt.title("2D Wavelength solution")
1195
+ plt.imshow(
1196
+ wave_img, aspect="auto", origin="lower", extent=(0, self.ncol, 0, self.nord)
1197
+ )
1198
+ cbar = plt.colorbar()
1199
+ plt.xlabel("Column")
1200
+ plt.ylabel("Order")
1201
+ cbar.set_label("Wavelength [Å]")
1202
+ util.show_or_save("wavecal_results")
1203
+
1204
+ def plot_residuals(self, lines, coef, title="Residuals"):
1205
+ orders = np.unique(lines["order"])
1206
+ norders = len(orders)
1207
+ if self.plot_title is not None:
1208
+ title = f"{self.plot_title}\n{title}"
1209
+ plt.suptitle(title)
1210
+ nplots = int(np.ceil(norders / 2))
1211
+ for i, order in enumerate(orders):
1212
+ plt.subplot(nplots, 2, i + 1)
1213
+ order_lines = lines[lines["order"] == order]
1214
+ if len(order_lines) > 0:
1215
+ residual = self.calculate_residual(coef, order_lines)
1216
+ plt.plot(order_lines["posm"], residual, "rX")
1217
+ plt.hlines([0], 0, self.ncol)
1218
+
1219
+ plt.xlim(0, self.ncol)
1220
+ plt.ylim(-self.threshold, self.threshold)
1221
+
1222
+ if (i + 1) not in [norders, norders - 1]:
1223
+ plt.xticks([])
1224
+ else:
1225
+ plt.xlabel("x [Pixel]")
1226
+
1227
+ if (i + 1) % 2 == 0:
1228
+ plt.yticks([])
1229
+ # else:
1230
+ # plt.yticks([-self.threshold, 0, self.threshold])
1231
+
1232
+ plt.subplots_adjust(hspace=0, wspace=0.1)
1233
+
1234
+ # order = 0
1235
+ # order_lines = lines[lines["order"] == order]
1236
+ # if len(order_lines) > 0:
1237
+ # residual = self.calculate_residual(coef, order_lines)
1238
+ # plt.plot(order_lines["posm"], residual, "rX")
1239
+ # plt.hlines([0], 0, self.ncol)
1240
+ # plt.xlim(0, self.ncol)
1241
+ # plt.ylim(-self.threshold, self.threshold)
1242
+ # plt.xlabel("x [Pixel]")
1243
+ # plt.ylabel("Residual [m/s]")
1244
+
1245
+ util.show_or_save("wavecal_residuals")
1246
+
1247
+ def _find_peaks(self, comb):
1248
+ # Find peaks in the comb spectrum
1249
+ # Run find_peak twice
1250
+ # once to find the average distance between peaks
1251
+ # once for real (disregarding close peaks)
1252
+ c = comb - np.ma.min(comb)
1253
+ width = self.lfc_peak_width
1254
+ height = np.ma.median(c)
1255
+ peaks, _ = signal.find_peaks(c, height=height, width=width)
1256
+ distance = np.median(np.diff(peaks)) // 4
1257
+ peaks, _ = signal.find_peaks(c, height=height, distance=distance, width=width)
1258
+
1259
+ # Fit peaks with gaussian to get accurate position
1260
+ new_peaks = peaks.astype(float)
1261
+ width = np.mean(np.diff(peaks)) // 2
1262
+ for j, p in enumerate(peaks):
1263
+ idx = p + np.arange(-width, width + 1, 1)
1264
+ idx = np.clip(idx, 0, len(c) - 1).astype(int)
1265
+ try:
1266
+ coef = util.gaussfit3(np.arange(len(idx)), c[idx])
1267
+ new_peaks[j] = coef[1] + p - width
1268
+ except RuntimeError:
1269
+ new_peaks[j] = p
1270
+
1271
+ n = np.arange(len(peaks))
1272
+
1273
+ # keep peaks within the range
1274
+ mask = (new_peaks > 0) & (new_peaks < len(c))
1275
+ n, new_peaks = n[mask], new_peaks[mask]
1276
+
1277
+ return n, new_peaks
1278
+
1279
+ def calculate_AIC(self, lines, wave_solution):
1280
+ if self.step_mode:
1281
+ if self.dimensionality == "1D":
1282
+ k = 1
1283
+ for _, v in wave_solution.items():
1284
+ k += np.size(v[0])
1285
+ k += np.size(v[1])
1286
+ elif self.dimensionality == "2D":
1287
+ k = 1
1288
+ poly_coef, steps_coef = wave_solution
1289
+ for _, v in steps_coef.items():
1290
+ k += np.size(v)
1291
+ k += np.size(poly_coef)
1292
+ else:
1293
+ k = np.size(wave_solution) + 1
1294
+
1295
+ # We get the residuals in velocity space
1296
+ # but need to remove the speed of light component, to get dimensionless parameters
1297
+ x = lines["posm"]
1298
+ y = lines["order"]
1299
+ ~lines["flag"]
1300
+ solution = self.evaluate_solution(x, y, wave_solution)
1301
+ rss = (solution - lines["wll"]) / lines["wll"]
1302
+
1303
+ # rss = self.calculate_residual(wave_solution, lines)
1304
+ # rss /= speed_of_light
1305
+ n = rss.size
1306
+ rss = np.ma.sum(rss**2)
1307
+
1308
+ # As per Wikipedia https://en.wikipedia.org/wiki/Akaike_information_criterion
1309
+ logl = np.log(rss)
1310
+ aic = 2 * k + n * logl
1311
+ self.logl = logl
1312
+ self.aicc = aic + (2 * k**2 + 2 * k) / (n - k - 1)
1313
+ self.aic = aic
1314
+ return aic
1315
+
1316
+ def execute(self, obs, lines):
1317
+ """
1318
+ Perform the whole wavelength calibration procedure with the current settings
1319
+
1320
+ Parameters
1321
+ ----------
1322
+ obs : array of shape (nord, ncol)
1323
+ observed image
1324
+ lines : recarray of shape (nlines,)
1325
+ reference linelist
1326
+
1327
+ Returns
1328
+ -------
1329
+ wave_img : array of shape (nord, ncol)
1330
+ Wavelength solution for each pixel
1331
+
1332
+ Raises
1333
+ ------
1334
+ NotImplementedError
1335
+ If polarimitry flag is set
1336
+ """
1337
+
1338
+ if self.polarim:
1339
+ raise NotImplementedError("polarized orders not implemented yet")
1340
+
1341
+ self.nord, self.ncol = obs.shape
1342
+ lines = LineList(lines)
1343
+ if self.element is not None:
1344
+ try:
1345
+ self.atlas = LineAtlas(self.element, self.medium)
1346
+ except FileNotFoundError:
1347
+ logger.warning("No Atlas file found for element %s", self.element)
1348
+ self.atlas = None
1349
+ except:
1350
+ self.atlas = None
1351
+ else:
1352
+ self.atlas = None
1353
+
1354
+ obs, lines = self.normalize(obs, lines)
1355
+ # Step 1: align obs and reference
1356
+ lines = self.align(obs, lines)
1357
+
1358
+ # Keep original positions for reference
1359
+ lines["posc"] = np.copy(lines["posm"])
1360
+
1361
+ # Step 2: Locate the lines on the detector, and update the pixel position
1362
+ # lines["flag"] = True
1363
+ lines = self.fit_lines(obs, lines)
1364
+
1365
+ for i in range(self.iterations):
1366
+ logger.info(f"Wavelength calibration iteration: {i}")
1367
+ # Step 3: Create a wavelength solution on known lines
1368
+ wave_solution = self.build_2d_solution(lines)
1369
+ wave_img = self.make_wave(wave_solution)
1370
+ # Step 4: Identify lines that fit into the solution
1371
+ lines = self.auto_id(obs, wave_img, lines)
1372
+ # Step 5: Reject outliers
1373
+ lines = self.reject_lines(lines)
1374
+ # lines = self.reject_lines(lines)
1375
+
1376
+ logger.info(
1377
+ "Number of lines used for wavelength calibration: %i",
1378
+ np.count_nonzero(lines["flag"]),
1379
+ )
1380
+
1381
+ # Step 6: build final 2d solution
1382
+ wave_solution = self.build_2d_solution(lines, plot=self.plot)
1383
+ wave_img = self.make_wave(wave_solution)
1384
+
1385
+ if self.plot:
1386
+ self.plot_results(wave_img, obs)
1387
+
1388
+ aic = self.calculate_AIC(lines, wave_solution)
1389
+ logger.info("AIC of wavelength fit: %f", aic)
1390
+
1391
+ # np.savez("cs_lines.npz", cs_lines=lines.data)
1392
+
1393
+ return wave_img, wave_solution, lines
1394
+
1395
+
1396
+ class WavelengthCalibrationComb(WavelengthCalibration):
1397
+ def execute(self, comb, wave, lines=None):
1398
+ self.nord, self.ncol = comb.shape
1399
+
1400
+ # TODO give everything better names
1401
+ pixel, order, wavelengths = [], [], []
1402
+ n_all, f_all = [], []
1403
+ comb = np.ma.masked_array(comb, mask=comb <= 0)
1404
+
1405
+ for i in range(self.nord):
1406
+ # Find Peak positions in current order
1407
+ n, peaks = self._find_peaks(comb[i])
1408
+
1409
+ # Determine the n-offset of this order, relative to the anchor frequency
1410
+ # Use the existing absolute wavelength calibration as reference
1411
+ y_ord = np.full(len(peaks), i)
1412
+ w_old = interp1d(np.arange(len(wave[i])), wave[i], kind="cubic")(peaks)
1413
+ f_old = speed_of_light / w_old
1414
+
1415
+ # fr: repeating frequency
1416
+ # fd: anchor frequency of this order, needs to be shifted to the absolute reference frame
1417
+ fr = np.median(np.diff(f_old))
1418
+ fd = np.median(f_old % fr)
1419
+ n_raw = (f_old - fd) / fr
1420
+ n = np.round(n_raw)
1421
+
1422
+ if np.any(np.abs(n_raw - n) > 0.3):
1423
+ logger.warning(
1424
+ "Bad peaks detected in the frequency comb in order %i", i
1425
+ )
1426
+
1427
+ fr, fd = polyfit(n, f_old, deg=1)
1428
+
1429
+ n_offset = 0
1430
+ # The first order is used as the baseline for all other orders
1431
+ # The choice is arbitrary and doesn't matter
1432
+ if i == 0:
1433
+ f0 = fd
1434
+ n_offset = 0
1435
+ else:
1436
+ # n0: shift in n, relative to the absolute reference
1437
+ # shift n to the absolute grid, so that all peaks are given by the same f0
1438
+ n_offset = (f0 - fd) / fr
1439
+ n_offset = int(round(n_offset))
1440
+ n -= n_offset
1441
+ fd += n_offset * fr
1442
+
1443
+ n = np.abs(n)
1444
+
1445
+ n_all += [n]
1446
+ f_all += [f_old]
1447
+ pixel += [peaks]
1448
+ order += [y_ord]
1449
+
1450
+ logger.debug(
1451
+ "LFC Order: %i, f0: %.3f, fr: %.5f, n0: %.2f", i, fd, fr, n_offset
1452
+ )
1453
+
1454
+ # Here we postualte that m * lambda = const
1455
+ # where m is the peak number
1456
+ # this is the result of the grating equation
1457
+ # at least const is roughly constant for neighbouring peaks
1458
+ correct = True
1459
+ if correct:
1460
+ w_all = [speed_of_light / f for f in f_all]
1461
+ mw_all = [m * w for m, w in zip(n_all, w_all, strict=False)]
1462
+ y = np.concatenate(mw_all)
1463
+ gap = np.median(y)
1464
+
1465
+ corr = np.zeros(self.nord)
1466
+ for i in range(self.nord):
1467
+ corri = gap / w_all[i] - n_all[i]
1468
+ corri = np.median(corri)
1469
+ corr[i] = np.round(corri)
1470
+ n_all[i] += corr[i]
1471
+
1472
+ logger.debug("LFC order offset correction: %s", corr)
1473
+
1474
+ for i in range(self.nord):
1475
+ coef = polyfit(n_all[i], n_all[i] * w_all[i], deg=5)
1476
+ mw = np.polyval(coef, n_all[i])
1477
+ w_all[i] = mw / n_all[i]
1478
+ f_all[i] = speed_of_light / w_all[i]
1479
+
1480
+ # Merge Data
1481
+ n_all = np.concatenate(n_all)
1482
+ f_all = np.concatenate(f_all)
1483
+ pixel = np.concatenate(pixel)
1484
+ order = np.concatenate(order)
1485
+
1486
+ # Fit f0 and fr to all data
1487
+ # (fr, f0), cov = np.polyfit(n_all, f_all, deg=1, cov=True)
1488
+ fr, f0 = polyfit(n_all, f_all, deg=1)
1489
+
1490
+ logger.debug("Laser Frequency Comb Anchor Frequency: %.3f 10**10 Hz", f0)
1491
+ logger.debug("Laser Frequency Comb Repeating Frequency: %.5f 10**10 Hz", fr)
1492
+
1493
+ # All peaks are then given by f0 + n * fr
1494
+ wavelengths = speed_of_light / (f0 + n_all * fr)
1495
+
1496
+ flag = np.full(len(wavelengths), True)
1497
+ laser_lines = np.rec.fromarrays(
1498
+ (wavelengths, pixel, pixel, order, flag),
1499
+ names=("wll", "posm", "posc", "order", "flag"),
1500
+ )
1501
+
1502
+ # Use now better resolution to find the new solution
1503
+ # A single pass of discarding outliers should be enough
1504
+ coef = self.build_2d_solution(laser_lines)
1505
+ # resid = self.calculate_residual(coef, laser_lines)
1506
+ # laser_lines["flag"] = np.abs(resid) < self.threshold
1507
+ # coef = self.build_2d_solution(laser_lines)
1508
+ new_wave = self.make_wave(coef)
1509
+
1510
+ self.calculate_AIC(laser_lines, coef)
1511
+
1512
+ self.n_lines_good = np.count_nonzero(laser_lines["flag"])
1513
+ logger.info(
1514
+ f"Laser Frequency Comb solution based on {self.n_lines_good} lines."
1515
+ )
1516
+ if self.plot:
1517
+ residual = wave - new_wave
1518
+ residual = residual.ravel()
1519
+
1520
+ area = np.percentile(residual, (32, 50, 68))
1521
+ area = area[0] - 5 * (area[1] - area[0]), area[0] + 5 * (area[2] - area[1])
1522
+ plt.hist(residual, bins=100, range=area)
1523
+ title = "ThAr - LFC"
1524
+ if self.plot_title is not None:
1525
+ title = f"{self.plot_title}\n{title}"
1526
+ plt.title(title)
1527
+ plt.xlabel(r"$\Delta\lambda$ [Å]")
1528
+ plt.ylabel("N")
1529
+ util.show_or_save("wavecal_lfc_hist")
1530
+
1531
+ if self.plot:
1532
+ if lines is not None:
1533
+ self.plot_residuals(
1534
+ lines,
1535
+ coef,
1536
+ title="GasLamp Line Residuals in the Laser Frequency Comb Solution",
1537
+ )
1538
+ self.plot_residuals(
1539
+ laser_lines,
1540
+ coef,
1541
+ title="Laser Frequency Comb Peak Residuals in the LFC Solution",
1542
+ )
1543
+
1544
+ if self.plot:
1545
+ wave_img = wave
1546
+ title = "Difference between GasLamp Solution and Laser Frequency Comb solution\nEach plot shows one order"
1547
+ if self.plot_title is not None:
1548
+ title = f"{self.plot_title}\n{title}"
1549
+ plt.suptitle(title)
1550
+ for i in range(len(new_wave)):
1551
+ plt.subplot(len(new_wave) // 4 + 1, 4, i + 1)
1552
+ plt.plot(wave_img[i] - new_wave[i])
1553
+ util.show_or_save("wavecal_lfc_diff")
1554
+
1555
+ if self.plot:
1556
+ self.plot_results(new_wave, comb)
1557
+
1558
+ return new_wave
1559
+
1560
+
1561
+ class WavelengthCalibrationInitialize(WavelengthCalibration):
1562
+ def __init__(
1563
+ self,
1564
+ degree=2,
1565
+ plot=False,
1566
+ plot_title="Wavecal Initial",
1567
+ wave_delta=20,
1568
+ nwalkers=100,
1569
+ steps=50_000,
1570
+ resid_delta=1000,
1571
+ cutoff=5,
1572
+ smoothing=0,
1573
+ element="thar",
1574
+ medium="vac",
1575
+ ):
1576
+ super().__init__(
1577
+ degree=degree,
1578
+ element=element,
1579
+ medium=medium,
1580
+ plot=plot,
1581
+ plot_title=plot_title,
1582
+ dimensionality="1D",
1583
+ )
1584
+ #:float: wavelength uncertainty on the initial guess in Angstrom
1585
+ self.wave_delta = wave_delta
1586
+ #:int: number of walkers in the MCMC
1587
+ self.nwalkers = nwalkers
1588
+ #:int: number of steps in the MCMC
1589
+ self.steps = steps
1590
+ #:float: residual uncertainty allowed when matching observation with known lines
1591
+ self.resid_delta = resid_delta
1592
+ #:float: gaussian smoothing applied to the wavecal spectrum before the MCMC in pixel scale, disable it by setting it to 0
1593
+ self.smoothing = smoothing
1594
+ #:float: minimum value in the spectrum to be considered a spectral line, if the value is above (or equal 1) it defines the percentile of the spectrum
1595
+ self.cutoff = cutoff
1596
+
1597
+ def get_cutoff(self, spectrum):
1598
+ if self.cutoff == 0:
1599
+ cutoff = None
1600
+ elif self.cutoff < 1:
1601
+ cutoff = self.cutoff
1602
+ else:
1603
+ cutoff = np.nanpercentile(spectrum[spectrum != 0], self.cutoff)
1604
+ return cutoff
1605
+
1606
+ def normalize(self, spectrum):
1607
+ smoothing = self.smoothing
1608
+ spectrum = np.copy(spectrum)
1609
+ spectrum -= np.nanmedian(spectrum)
1610
+ if smoothing != 0:
1611
+ spectrum = gaussian_filter1d(spectrum, smoothing)
1612
+ spectrum[spectrum < 0] = 0
1613
+ spectrum /= np.max(spectrum)
1614
+ return spectrum
1615
+
1616
+ def determine_wavelength_coefficients(
1617
+ self,
1618
+ spectrum,
1619
+ atlas,
1620
+ wave_range,
1621
+ ) -> np.ndarray:
1622
+ """
1623
+ Determines the wavelength polynomial coefficients of a spectrum,
1624
+ based on an line atlas with known spectral lines,
1625
+ and an initial guess for the wavelength range.
1626
+ The calculation uses an MCMC approach to sample the probability space and
1627
+ find the best cross correlation value, between observation and atlas.
1628
+
1629
+ Parameters
1630
+ ----------
1631
+ spectrum : array
1632
+ observed spectrum at each pixel
1633
+ atlas : LineAtlas
1634
+ atlas containing a known spectrum with wavelength and flux
1635
+ wave_range : 2-tuple
1636
+ initial wavelength guess (begin, end)
1637
+ degrees : int, optional
1638
+ number of degrees of the wavelength polynomial,
1639
+ lower numbers yield better results, by default 2
1640
+ w_range : float, optional
1641
+ uncertainty on the initial wavelength guess in Ansgtrom, by default 20
1642
+ nwalkers : int, optional
1643
+ number of walkers for the MCMC, more is better but increases
1644
+ the time, by default 100
1645
+ steps : int, optional
1646
+ number of steps in the MCMC per walker, more is better but increases
1647
+ the time, by default 20_000
1648
+ plot : bool, optional
1649
+ whether to plot the results or not, by default False
1650
+
1651
+ Returns
1652
+ -------
1653
+ coef : array
1654
+ polynomial coefficients in numpy order
1655
+ """
1656
+ spectrum = np.asarray(spectrum)
1657
+
1658
+ assert self.degree >= 2, "The polynomial degree must be at least 2"
1659
+ assert spectrum.ndim == 1, "The spectrum should only have 1 dimension"
1660
+ assert self.wave_delta > 0, "The wavelength uncertainty needs to be positive"
1661
+
1662
+ n_features = spectrum.shape[0]
1663
+ n_output = ndim = self.degree + 1
1664
+
1665
+ # Normalize the spectrum, and copy it just in case
1666
+ spectrum = self.normalize(spectrum)
1667
+ cutoff = self.get_cutoff(spectrum)
1668
+
1669
+ # The pixel scale used for everything else
1670
+ x = np.arange(n_features)
1671
+ # Initial guess for the wavelength solution
1672
+ coef = np.zeros(n_output)
1673
+ coef[-1] = wave_range[0]
1674
+ coef[-2] = (wave_range[-1] - wave_range[0]) / n_features
1675
+
1676
+ # We scale every coefficient to roughly order 1
1677
+ # this is then in units of the maximum offset due to a change in this value
1678
+ # in angstrom
1679
+ w_scale = 1 / np.power(n_features, range(n_output))
1680
+ factors = w_scale[::-1]
1681
+ coef /= factors
1682
+
1683
+ # Here we define the functions we need for the MCMC
1684
+ def polyval_vectorize(p, x, where=None):
1685
+ n_poly, n_coef = p.shape
1686
+ n_points = x.shape[0]
1687
+ y = np.zeros((n_poly, n_points))
1688
+ if where is not None:
1689
+ for i in range(n_coef):
1690
+ y[where] *= x
1691
+ y[where] += p[where, i, None]
1692
+ else:
1693
+ for i in range(n_coef):
1694
+ y *= x
1695
+ y += p[:, i, None]
1696
+ return y
1697
+
1698
+ def log_prior(p):
1699
+ prior = np.zeros(p.shape[0])
1700
+ prior[np.any(~np.isfinite(p), axis=1)] = -np.inf
1701
+ prior[np.any(np.abs(p - coef) > self.wave_delta, axis=1)] = -np.inf
1702
+ return prior
1703
+
1704
+ def log_prior_2(w):
1705
+ # Chech that w is increasing
1706
+ prior = np.zeros(w.shape[0])
1707
+ prior[np.any(w[:, 1:] < w[:, :-1], axis=1)] = -np.inf
1708
+ prior[w[:, 0] < wave_range[0] - self.wave_delta] = -np.inf
1709
+ prior[w[:, -1] > wave_range[1] + self.wave_delta] = -np.inf
1710
+ return prior
1711
+
1712
+ def log_prob(p):
1713
+ # Check that p is within bounds
1714
+ prior = log_prior(p)
1715
+ where = np.isfinite(prior)
1716
+ # Calculate the wavelength scale
1717
+ w = polyval_vectorize(p * factors, x, where=where)
1718
+ # Check that it is monotonically increasing
1719
+ prior += log_prior_2(w)
1720
+ where = np.isfinite(prior)
1721
+
1722
+ y = np.zeros((p.shape[0], x.shape[0]))
1723
+ y[where, :] = np.interp(w[where, :], atlas.wave, atlas.flux)
1724
+ y[where, :] /= np.max(y[where, :], axis=1)[:, None]
1725
+ # This is the cross correlation value squared
1726
+ cross = np.sum(y * spectrum, axis=1) ** 2
1727
+ # chi2 = - np.sum((y - spectrum)**2, axis=1)
1728
+ # chi2 = - np.sum((np.where(y > 0.01, 1, 0) - np.where(spectrum > 0.01, 1, 0))**2, axis=1)
1729
+ # this is the same as above, but a lot faster thanks to the magic of bitwise xor
1730
+ if cutoff is not None:
1731
+ chi2 = (y > cutoff) ^ (spectrum > cutoff)
1732
+ chi2 = -np.count_nonzero(chi2, axis=1) / 20
1733
+ else:
1734
+ chi2 = -np.sum((y - spectrum) ** 2, axis=1) / 20
1735
+ return prior + cross + chi2
1736
+
1737
+ p0 = np.zeros((self.nwalkers, ndim))
1738
+ p0 += coef[None, :]
1739
+ p0 += np.random.uniform(
1740
+ low=-self.wave_delta, high=self.wave_delta, size=(self.nwalkers, ndim)
1741
+ )
1742
+ sampler = emcee.EnsembleSampler(
1743
+ self.nwalkers,
1744
+ ndim,
1745
+ log_prob,
1746
+ vectorize=True,
1747
+ moves=[(emcee.moves.DEMove(), 0.8), (emcee.moves.DESnookerMove(), 0.2)],
1748
+ )
1749
+ sampler.run_mcmc(p0, self.steps, progress=True)
1750
+
1751
+ tau = sampler.get_autocorr_time(quiet=True)
1752
+ burnin = int(2 * np.max(tau))
1753
+ thin = int(0.5 * np.min(tau))
1754
+ samples = sampler.get_chain(discard=burnin, thin=thin, flat=True)
1755
+
1756
+ low, mid, high = np.percentile(samples, [32, 50, 68], axis=0)
1757
+ coef = mid * factors
1758
+
1759
+ if self.plot:
1760
+ corner.corner(samples, truths=mid)
1761
+ util.show_or_save("wavecal_init_corner")
1762
+
1763
+ wave = np.polyval(coef, x)
1764
+ y = np.interp(wave, atlas.wave, atlas.flux)
1765
+ y /= np.max(y)
1766
+ plt.plot(wave, spectrum)
1767
+ plt.plot(wave, y)
1768
+ util.show_or_save("wavecal_init_spectrum")
1769
+
1770
+ return coef
1771
+
1772
+ def create_new_linelist_from_solution(
1773
+ self,
1774
+ spectrum,
1775
+ wavelength,
1776
+ atlas,
1777
+ order,
1778
+ ) -> LineList:
1779
+ """
1780
+ Create a new linelist based on an existing wavelength solution for a spectrum,
1781
+ and a line atlas with known lines. The linelist is the one used by the rest of
1782
+ PyReduce wavelength calibration.
1783
+
1784
+ Observed lines are matched with the lines in the atlas to
1785
+ improve the wavelength solution.
1786
+
1787
+ Parameters
1788
+ ----------
1789
+ spectrum : array
1790
+ Observed spectrum at each pixel
1791
+ wavelength : array
1792
+ Wavelength of spectrum at each pixel
1793
+ atlas : LineAtlas
1794
+ Atlas with wavelength of known lines
1795
+ order : int
1796
+ Order of the spectrum within the detector
1797
+ resid_delta : float, optional
1798
+ Maximum residual allowed between a peak and the closest line in the atlas,
1799
+ to still match them, in m/s, by default 1000.
1800
+
1801
+ Returns
1802
+ -------
1803
+ linelist : LineList
1804
+ new linelist with lines from this order
1805
+ """
1806
+ # The new linelist
1807
+ linelist = LineList()
1808
+ spectrum = np.asarray(spectrum)
1809
+ wavelength = np.asarray(wavelength)
1810
+
1811
+ assert self.resid_delta > 0, "Residuals Delta must be positive"
1812
+ assert spectrum.ndim == 1, "Spectrum must have only 1 dimension"
1813
+ assert wavelength.ndim == 1, "Wavelength must have only 1 dimension"
1814
+ assert spectrum.size == wavelength.size, (
1815
+ "Spectrum and Wavelength must have the same size"
1816
+ )
1817
+
1818
+ n_features = spectrum.shape[0]
1819
+ x = np.arange(n_features)
1820
+
1821
+ # Normalize just in case
1822
+ spectrum = self.normalize(spectrum)
1823
+ cutoff = self.get_cutoff(spectrum)
1824
+
1825
+ # TODO: make this use another function, and pass the hight as a parameter
1826
+ scopy = np.copy(spectrum)
1827
+ if cutoff is not None:
1828
+ scopy[scopy < cutoff] = 0
1829
+ _, peaks = self._find_peaks(scopy)
1830
+
1831
+ peak_wave = np.interp(peaks, x, wavelength)
1832
+ peak_height = np.interp(peaks, x, spectrum)
1833
+
1834
+ # Here we only look at the lines within range
1835
+ atlas_linelist = atlas.linelist[
1836
+ (atlas.linelist["wave"] > wavelength[0])
1837
+ & (atlas.linelist["wave"] < wavelength[-1])
1838
+ ]
1839
+
1840
+ residuals = np.zeros_like(peak_wave)
1841
+ for i, pw in enumerate(peak_wave):
1842
+ resid = np.abs(pw - atlas_linelist["wave"])
1843
+ j = np.argmin(resid)
1844
+ residuals[i] = resid[j] / pw * speed_of_light
1845
+ if residuals[i] < self.resid_delta:
1846
+ linelist.add_line(
1847
+ atlas_linelist["wave"][j],
1848
+ order,
1849
+ peaks[i],
1850
+ 3,
1851
+ peak_height[i],
1852
+ True,
1853
+ )
1854
+
1855
+ return linelist
1856
+
1857
+ def execute(self, spectrum, wave_range) -> LineList:
1858
+ atlas = LineAtlas(self.element, self.medium)
1859
+ linelist = LineList()
1860
+ orders = range(spectrum.shape[0])
1861
+ x = np.arange(spectrum.shape[1])
1862
+ for order in orders:
1863
+ spec = spectrum[order]
1864
+ wrange = wave_range[order]
1865
+ coef = self.determine_wavelength_coefficients(spec, atlas, wrange)
1866
+ wave = np.polyval(coef, x)
1867
+ linelist_loc = self.create_new_linelist_from_solution(
1868
+ spec, wave, atlas, order
1869
+ )
1870
+ linelist.append(linelist_loc)
1871
+ return linelist