pyreduce-astro 0.6.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (151) hide show
  1. pyreduce/__init__.py +58 -0
  2. pyreduce/__main__.py +106 -0
  3. pyreduce/clib/__init__.py +0 -0
  4. pyreduce/clib/_slitfunc_2d.cpython-313-darwin.so +0 -0
  5. pyreduce/clib/_slitfunc_bd.cpython-313-darwin.so +0 -0
  6. pyreduce/clib/build_extract.py +75 -0
  7. pyreduce/clib/slit_func_2d_xi_zeta_bd.c +1313 -0
  8. pyreduce/clib/slit_func_2d_xi_zeta_bd.h +55 -0
  9. pyreduce/clib/slit_func_bd.c +362 -0
  10. pyreduce/clib/slit_func_bd.h +17 -0
  11. pyreduce/clipnflip.py +147 -0
  12. pyreduce/combine_frames.py +855 -0
  13. pyreduce/configuration.py +186 -0
  14. pyreduce/continuum_normalization.py +329 -0
  15. pyreduce/cwrappers.py +404 -0
  16. pyreduce/datasets.py +231 -0
  17. pyreduce/echelle.py +413 -0
  18. pyreduce/estimate_background_scatter.py +129 -0
  19. pyreduce/extract.py +1359 -0
  20. pyreduce/extraction_width.py +77 -0
  21. pyreduce/instruments/__init__.py +0 -0
  22. pyreduce/instruments/andes.json +59 -0
  23. pyreduce/instruments/andes.py +100 -0
  24. pyreduce/instruments/common.json +46 -0
  25. pyreduce/instruments/common.py +675 -0
  26. pyreduce/instruments/crires_plus.json +63 -0
  27. pyreduce/instruments/crires_plus.py +103 -0
  28. pyreduce/instruments/filters.py +195 -0
  29. pyreduce/instruments/harpn.json +136 -0
  30. pyreduce/instruments/harpn.py +201 -0
  31. pyreduce/instruments/harps.json +155 -0
  32. pyreduce/instruments/harps.py +310 -0
  33. pyreduce/instruments/instrument_info.py +140 -0
  34. pyreduce/instruments/instrument_schema.json +221 -0
  35. pyreduce/instruments/jwst_miri.json +53 -0
  36. pyreduce/instruments/jwst_miri.py +29 -0
  37. pyreduce/instruments/jwst_niriss.json +52 -0
  38. pyreduce/instruments/jwst_niriss.py +98 -0
  39. pyreduce/instruments/lick_apf.json +53 -0
  40. pyreduce/instruments/lick_apf.py +35 -0
  41. pyreduce/instruments/mcdonald.json +59 -0
  42. pyreduce/instruments/mcdonald.py +123 -0
  43. pyreduce/instruments/metis_ifu.json +63 -0
  44. pyreduce/instruments/metis_ifu.py +45 -0
  45. pyreduce/instruments/metis_lss.json +65 -0
  46. pyreduce/instruments/metis_lss.py +45 -0
  47. pyreduce/instruments/micado.json +53 -0
  48. pyreduce/instruments/micado.py +45 -0
  49. pyreduce/instruments/neid.json +51 -0
  50. pyreduce/instruments/neid.py +154 -0
  51. pyreduce/instruments/nirspec.json +56 -0
  52. pyreduce/instruments/nirspec.py +215 -0
  53. pyreduce/instruments/nte.json +47 -0
  54. pyreduce/instruments/nte.py +42 -0
  55. pyreduce/instruments/uves.json +59 -0
  56. pyreduce/instruments/uves.py +46 -0
  57. pyreduce/instruments/xshooter.json +66 -0
  58. pyreduce/instruments/xshooter.py +39 -0
  59. pyreduce/make_shear.py +606 -0
  60. pyreduce/masks/mask_crires_plus_det1.fits.gz +0 -0
  61. pyreduce/masks/mask_crires_plus_det2.fits.gz +0 -0
  62. pyreduce/masks/mask_crires_plus_det3.fits.gz +0 -0
  63. pyreduce/masks/mask_ctio_chiron.fits.gz +0 -0
  64. pyreduce/masks/mask_elodie.fits.gz +0 -0
  65. pyreduce/masks/mask_feros3.fits.gz +0 -0
  66. pyreduce/masks/mask_flames_giraffe.fits.gz +0 -0
  67. pyreduce/masks/mask_harps_blue.fits.gz +0 -0
  68. pyreduce/masks/mask_harps_red.fits.gz +0 -0
  69. pyreduce/masks/mask_hds_blue.fits.gz +0 -0
  70. pyreduce/masks/mask_hds_red.fits.gz +0 -0
  71. pyreduce/masks/mask_het_hrs_2x5.fits.gz +0 -0
  72. pyreduce/masks/mask_jwst_miri_lrs_slitless.fits.gz +0 -0
  73. pyreduce/masks/mask_jwst_niriss_gr700xd.fits.gz +0 -0
  74. pyreduce/masks/mask_lick_apf_.fits.gz +0 -0
  75. pyreduce/masks/mask_mcdonald.fits.gz +0 -0
  76. pyreduce/masks/mask_nes.fits.gz +0 -0
  77. pyreduce/masks/mask_nirspec_nirspec.fits.gz +0 -0
  78. pyreduce/masks/mask_sarg.fits.gz +0 -0
  79. pyreduce/masks/mask_sarg_2x2a.fits.gz +0 -0
  80. pyreduce/masks/mask_sarg_2x2b.fits.gz +0 -0
  81. pyreduce/masks/mask_subaru_hds_red.fits.gz +0 -0
  82. pyreduce/masks/mask_uves_blue.fits.gz +0 -0
  83. pyreduce/masks/mask_uves_blue_binned_2_2.fits.gz +0 -0
  84. pyreduce/masks/mask_uves_middle.fits.gz +0 -0
  85. pyreduce/masks/mask_uves_middle_2x2_split.fits.gz +0 -0
  86. pyreduce/masks/mask_uves_middle_binned_2_2.fits.gz +0 -0
  87. pyreduce/masks/mask_uves_red.fits.gz +0 -0
  88. pyreduce/masks/mask_uves_red_2x2.fits.gz +0 -0
  89. pyreduce/masks/mask_uves_red_2x2_split.fits.gz +0 -0
  90. pyreduce/masks/mask_uves_red_binned_2_2.fits.gz +0 -0
  91. pyreduce/masks/mask_xshooter_nir.fits.gz +0 -0
  92. pyreduce/rectify.py +138 -0
  93. pyreduce/reduce.py +2205 -0
  94. pyreduce/settings/settings_CRIRES_PLUS.json +89 -0
  95. pyreduce/settings/settings_HARPN.json +73 -0
  96. pyreduce/settings/settings_HARPS.json +69 -0
  97. pyreduce/settings/settings_JWST_MIRI.json +55 -0
  98. pyreduce/settings/settings_JWST_NIRISS.json +55 -0
  99. pyreduce/settings/settings_LICK_APF.json +62 -0
  100. pyreduce/settings/settings_MCDONALD.json +58 -0
  101. pyreduce/settings/settings_METIS_IFU.json +77 -0
  102. pyreduce/settings/settings_METIS_LSS.json +77 -0
  103. pyreduce/settings/settings_MICADO.json +78 -0
  104. pyreduce/settings/settings_NEID.json +73 -0
  105. pyreduce/settings/settings_NIRSPEC.json +58 -0
  106. pyreduce/settings/settings_NTE.json +60 -0
  107. pyreduce/settings/settings_UVES.json +54 -0
  108. pyreduce/settings/settings_XSHOOTER.json +78 -0
  109. pyreduce/settings/settings_pyreduce.json +178 -0
  110. pyreduce/settings/settings_schema.json +827 -0
  111. pyreduce/tools/__init__.py +0 -0
  112. pyreduce/tools/combine.py +117 -0
  113. pyreduce/trace_orders.py +645 -0
  114. pyreduce/util.py +1288 -0
  115. pyreduce/wavecal/MICADO_HK_3arcsec_chip5.npz +0 -0
  116. pyreduce/wavecal/atlas/thar.fits +4946 -13
  117. pyreduce/wavecal/atlas/thar_list.txt +4172 -0
  118. pyreduce/wavecal/atlas/une.fits +0 -0
  119. pyreduce/wavecal/convert.py +38 -0
  120. pyreduce/wavecal/crires_plus_J1228_Open_det1.npz +0 -0
  121. pyreduce/wavecal/crires_plus_J1228_Open_det2.npz +0 -0
  122. pyreduce/wavecal/crires_plus_J1228_Open_det3.npz +0 -0
  123. pyreduce/wavecal/harpn_harpn_2D.npz +0 -0
  124. pyreduce/wavecal/harps_blue_2D.npz +0 -0
  125. pyreduce/wavecal/harps_blue_pol_2D.npz +0 -0
  126. pyreduce/wavecal/harps_red_2D.npz +0 -0
  127. pyreduce/wavecal/harps_red_pol_2D.npz +0 -0
  128. pyreduce/wavecal/mcdonald.npz +0 -0
  129. pyreduce/wavecal/metis_lss_l_2D.npz +0 -0
  130. pyreduce/wavecal/metis_lss_m_2D.npz +0 -0
  131. pyreduce/wavecal/nirspec_K2.npz +0 -0
  132. pyreduce/wavecal/uves_blue_360nm_2D.npz +0 -0
  133. pyreduce/wavecal/uves_blue_390nm_2D.npz +0 -0
  134. pyreduce/wavecal/uves_blue_437nm_2D.npz +0 -0
  135. pyreduce/wavecal/uves_middle_2x2_2D.npz +0 -0
  136. pyreduce/wavecal/uves_middle_565nm_2D.npz +0 -0
  137. pyreduce/wavecal/uves_middle_580nm_2D.npz +0 -0
  138. pyreduce/wavecal/uves_middle_600nm_2D.npz +0 -0
  139. pyreduce/wavecal/uves_middle_665nm_2D.npz +0 -0
  140. pyreduce/wavecal/uves_middle_860nm_2D.npz +0 -0
  141. pyreduce/wavecal/uves_red_580nm_2D.npz +0 -0
  142. pyreduce/wavecal/uves_red_600nm_2D.npz +0 -0
  143. pyreduce/wavecal/uves_red_665nm_2D.npz +0 -0
  144. pyreduce/wavecal/uves_red_760nm_2D.npz +0 -0
  145. pyreduce/wavecal/uves_red_860nm_2D.npz +0 -0
  146. pyreduce/wavecal/xshooter_nir.npz +0 -0
  147. pyreduce/wavelength_calibration.py +1873 -0
  148. pyreduce_astro-0.6.0b1.dist-info/METADATA +112 -0
  149. pyreduce_astro-0.6.0b1.dist-info/RECORD +151 -0
  150. pyreduce_astro-0.6.0b1.dist-info/WHEEL +4 -0
  151. pyreduce_astro-0.6.0b1.dist-info/licenses/LICENSE +674 -0
pyreduce/extract.py ADDED
@@ -0,0 +1,1359 @@
1
+ """Module for extracting data from observations
2
+
3
+ Authors
4
+ -------
5
+
6
+ Version
7
+ -------
8
+
9
+ License
10
+ -------
11
+ """
12
+
13
+ import logging
14
+
15
+ import matplotlib.pyplot as plt
16
+ import numpy as np
17
+ from scipy.interpolate import interp1d
18
+ from tqdm import tqdm
19
+
20
+ from .cwrappers import slitfunc_curved
21
+ from .util import make_index
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class ProgressPlot: # pragma: no cover
27
+ def __init__(self, nrow, ncol, nslitf, nbad=1000, title=None):
28
+ self.nrow = nrow
29
+ self.ncol = ncol
30
+ self.nslitf = nslitf
31
+
32
+ self.nbad = nbad
33
+
34
+ plt.ion()
35
+ self.fig = plt.figure(figsize=(12, 4))
36
+
37
+ # self.ax1 = self.fig.add_subplot(231, projection="3d")
38
+ self.ax1 = self.fig.add_subplot(231)
39
+ self.ax1.set_title("Swath")
40
+ self.ax1.set_ylabel("y [pixel]")
41
+ self.ax2 = self.fig.add_subplot(132)
42
+ self.ax2.set_title("Spectrum")
43
+ self.ax2.set_xlabel("x [pixel]")
44
+ self.ax2.set_ylabel("flux [arb. unit]")
45
+ self.ax2.set_xlim((0, ncol))
46
+ self.ax3 = self.fig.add_subplot(133)
47
+ self.ax3.set_title("Slit")
48
+ self.ax3.set_xlabel("y [pixel]")
49
+ self.ax3.set_ylabel("contribution [1]")
50
+ self.ax3.set_xlim((0, nrow))
51
+ # self.ax4 = self.fig.add_subplot(234, projection="3d")
52
+ self.ax4 = self.fig.add_subplot(234)
53
+ self.ax4.set_title("Model")
54
+ self.ax4.set_xlabel("x [pixel]")
55
+ self.ax4.set_ylabel("y [pixel]")
56
+
57
+ self.title = title
58
+ if title is not None:
59
+ self.fig.suptitle(title)
60
+
61
+ self.fig.tight_layout()
62
+
63
+ # Just plot empty pictures, to create the plots
64
+ # Update the data later
65
+ img = np.ones((nrow, ncol))
66
+ # y, x = np.indices((nrow, ncol))
67
+ # self.im_obs = self.ax1.plot_surface(x, y, img)
68
+ # self.im_model = self.ax4.plot_surface(x, y, img)
69
+ self.im_obs = self.ax1.imshow(img)
70
+ self.im_model = self.ax4.imshow(img)
71
+
72
+ (self.dots_spec,) = self.ax2.plot(
73
+ np.zeros(nrow * ncol), np.zeros(nrow * ncol), ".r", ms=2, alpha=0.6
74
+ )
75
+ (self.line_spec,) = self.ax2.plot(np.zeros(ncol), "-k")
76
+ (self.mask_spec,) = self.ax2.plot(np.zeros(self.nbad), "Pg")
77
+ (self.dots_slit,) = self.ax3.plot(
78
+ np.zeros(nrow * ncol), np.zeros(nrow * ncol), ".r", ms=2, alpha=0.6
79
+ )
80
+ (self.line_slit,) = self.ax3.plot(np.zeros(nrow), "-k", lw=2)
81
+ (self.mask_slit,) = self.ax3.plot(np.zeros(self.nbad), "Pg")
82
+
83
+ # self.ax1.set_zscale("log")
84
+ # self.ax4.set_zscale("log")
85
+
86
+ self.fig.canvas.draw()
87
+ self.fig.canvas.flush_events()
88
+
89
+ def fix_linear(self, data, limit, fill=0):
90
+ """Assures the size of the 1D array data is equal to limit"""
91
+
92
+ if len(data) > limit:
93
+ data = data[:limit]
94
+ elif len(data) < limit:
95
+ padding = np.full(limit - len(data), fill, dtype=data.dtype)
96
+ data = np.concatenate((data, padding))
97
+ return data
98
+
99
+ def plot(self, img, spec, slitf, model, ycen, mask, ord_num, left, right):
100
+ img = np.copy(img)
101
+ spec = np.copy(spec)
102
+ slitf = np.copy(slitf)
103
+ ycen = np.copy(ycen)
104
+
105
+ ny = img.shape[0]
106
+ nspec = img.shape[1]
107
+ x_spec, y_spec = self.get_spec(img, spec, slitf, ycen)
108
+ x_slit, y_slit = self.get_slitf(img, spec, slitf, ycen)
109
+ ycen = ycen + ny / 2
110
+
111
+ old = np.linspace(-1, ny, len(slitf))
112
+
113
+ # Fix Sizes
114
+ mask_spec_x = self.fix_linear(x_spec[mask.ravel()], self.nbad, fill=np.nan)
115
+ mask_spec = self.fix_linear(y_spec[mask.ravel()], self.nbad, fill=np.nan)
116
+ mask_slit_x = self.fix_linear(x_slit[mask.ravel()], self.nbad, fill=np.nan)
117
+ mask_slit = self.fix_linear(y_slit[mask.ravel()], self.nbad, fill=np.nan)
118
+
119
+ ycen = self.fix_linear(ycen, self.ncol)
120
+ x_spec = self.fix_linear(x_spec, self.ncol * self.nrow)
121
+ y_spec = self.fix_linear(y_spec, self.ncol * self.nrow)
122
+ spec = self.fix_linear(spec, self.ncol)
123
+ x_slit = self.fix_linear(x_slit, self.ncol * self.nrow)
124
+ y_slit = self.fix_linear(y_slit, self.ncol * self.nrow)
125
+ old = self.fix_linear(old, self.nslitf)
126
+ sf = self.fix_linear(slitf, self.nslitf)
127
+
128
+ # Update Data
129
+ model = np.clip(model, 0, np.max(model[5:-5, 5:-5]) * 1.1)
130
+ self.im_obs.remove()
131
+ img = np.clip(img, 0, np.max(model) * 1.1)
132
+ # y, x = np.indices(img.shape)
133
+ # self.im_obs = self.ax1.plot_surface(x, y, img)
134
+ self.im_obs = self.ax1.imshow(img, aspect="auto", origin="lower")
135
+ vmin, vmax = self.im_obs.norm.vmin, self.im_obs.norm.vmax
136
+ self.im_model.remove()
137
+ # y, x = np.indices(model.shape)
138
+ # self.im_model = self.ax4.plot_surface(x, y, model)
139
+ self.im_model = self.ax4.imshow(
140
+ model, aspect="auto", origin="lower", vmin=vmin, vmax=vmax
141
+ )
142
+
143
+ # self.line_ycen.set_ydata(ycen)
144
+ self.dots_spec.set_xdata(x_spec)
145
+ self.dots_spec.set_ydata(y_spec)
146
+ self.line_spec.set_ydata(spec)
147
+
148
+ self.mask_spec.set_xdata(mask_spec_x)
149
+ self.mask_spec.set_ydata(mask_spec)
150
+
151
+ self.dots_slit.set_xdata(x_slit)
152
+ self.dots_slit.set_ydata(y_slit)
153
+ self.line_slit.set_xdata(old)
154
+ self.line_slit.set_ydata(sf)
155
+
156
+ self.mask_slit.set_xdata(mask_slit_x)
157
+ self.mask_slit.set_ydata(mask_slit)
158
+
159
+ self.ax2.set_xlim((0, nspec - 1))
160
+ limit = np.nanmax(spec[5:-5]) * 1.1
161
+ if not np.isnan(limit):
162
+ self.ax2.set_ylim((0, limit))
163
+
164
+ self.ax3.set_xlim((0, ny - 1))
165
+ limit = np.nanmax(sf) * 1.1
166
+ if not np.isnan(limit):
167
+ self.ax3.set_ylim((0, limit))
168
+
169
+ title = f"Order {ord_num}, Columns {left} - {right}"
170
+ if self.title is not None:
171
+ title = f"{self.title}\n{title}"
172
+ self.fig.suptitle(title)
173
+ self.fig.canvas.draw()
174
+ self.fig.canvas.flush_events()
175
+
176
+ def close(self):
177
+ plt.ioff()
178
+ plt.close()
179
+
180
+ def get_spec(self, img, spec, slitf, ycen):
181
+ """get the spectrum corrected by the slit function"""
182
+ nrow, ncol = img.shape
183
+ x, y = np.indices(img.shape)
184
+ ycen = ycen - ycen.astype(int)
185
+
186
+ x = x - ycen + 0.5
187
+ old = np.linspace(-1, nrow - 1 + 1, len(slitf))
188
+ sf = np.interp(x, old, slitf)
189
+
190
+ x = img / sf
191
+
192
+ x = x.ravel()
193
+ y = y.ravel()
194
+ return y, x
195
+
196
+ def get_slitf(self, img, spec, slitf, ycen):
197
+ """get the slit function"""
198
+ x = np.indices(img.shape)[0]
199
+ ycen = ycen - ycen.astype(int)
200
+
201
+ if np.any(spec == 0):
202
+ i = np.arange(len(spec))
203
+ try:
204
+ spec = interp1d(
205
+ i[spec != 0], spec[spec != 0], fill_value="extrapolate"
206
+ )(i)
207
+ except ValueError:
208
+ spec[spec == 0] = np.median(spec)
209
+ y = img / spec[None, :]
210
+ y = y.ravel()
211
+
212
+ x = x - ycen + 0.5
213
+ x = x.ravel()
214
+ return x, y
215
+
216
+
217
+ class Swath:
218
+ def __init__(self, nswath):
219
+ self.nswath = nswath
220
+ self.spec = [None] * nswath
221
+ self.slitf = [None] * nswath
222
+ self.model = [None] * nswath
223
+ self.unc = [None] * nswath
224
+ self.mask = [None] * nswath
225
+ self.info = [None] * nswath
226
+
227
+ def __len__(self):
228
+ return self.nswath
229
+
230
+ def __getitem__(self, key):
231
+ return (
232
+ self.spec[key],
233
+ self.slitf[key],
234
+ self.model[key],
235
+ self.unc[key],
236
+ self.mask[key],
237
+ self.info[key],
238
+ )
239
+
240
+ def __setitem__(self, key, value):
241
+ self.spec[key] = value[0]
242
+ self.slitf[key] = value[1]
243
+ self.model[key] = value[2]
244
+ self.unc[key] = value[3]
245
+ self.mask[key] = value[4]
246
+ self.info[key] = value[5]
247
+
248
+
249
+ def fix_parameters(xwd, cr, orders, nrow, ncol, nord, ignore_column_range=False):
250
+ """Fix extraction width and column range, so that all pixels used are within the image.
251
+ I.e. the column range is cut so that the everything is within the image
252
+
253
+ Parameters
254
+ ----------
255
+ xwd : float, array
256
+ Extraction width, either one value for all orders, or the whole array
257
+ cr : 2-tuple(int), array
258
+ Column range, either one value for all orders, or the whole array
259
+ orders : array
260
+ polynomial coefficients that describe each order
261
+ nrow : int
262
+ Number of rows in the image
263
+ ncol : int
264
+ Number of columns in the image
265
+ nord : int
266
+ Number of orders in the image
267
+ ignore_column_range : bool, optional
268
+ if true does not change the column range, however this may lead to problems with the extraction, by default False
269
+
270
+ Returns
271
+ -------
272
+ xwd : array
273
+ fixed extraction width
274
+ cr : array
275
+ fixed column range
276
+ orders : array
277
+ the same orders as before
278
+ """
279
+
280
+ if xwd is None:
281
+ xwd = 0.5
282
+ if np.isscalar(xwd):
283
+ xwd = np.tile([xwd, xwd], (nord, 1))
284
+ else:
285
+ xwd = np.asarray(xwd)
286
+ if xwd.ndim == 1:
287
+ xwd = np.tile(xwd, (nord, 1))
288
+
289
+ if cr is None:
290
+ cr = np.tile([0, ncol], (nord, 1))
291
+ else:
292
+ cr = np.asarray(cr)
293
+ if cr.ndim == 1:
294
+ cr = np.tile(cr, (nord, 1))
295
+
296
+ orders = np.asarray(orders)
297
+
298
+ xwd = np.array([xwd[0], *xwd, xwd[-1]])
299
+ cr = np.array([cr[0], *cr, cr[-1]])
300
+ orders = extend_orders(orders, nrow)
301
+
302
+ xwd = fix_extraction_width(xwd, orders, cr, ncol)
303
+ if not ignore_column_range:
304
+ cr, orders = fix_column_range(cr, orders, xwd, nrow, ncol)
305
+
306
+ orders = orders[1:-1]
307
+ xwd = xwd[1:-1]
308
+ cr = cr[1:-1]
309
+
310
+ return xwd, cr, orders
311
+
312
+
313
+ def extend_orders(orders, nrow):
314
+ """Extrapolate extra orders above and below the existing ones
315
+
316
+ Parameters
317
+ ----------
318
+ orders : array[nord, degree]
319
+ order tracing coefficients
320
+ nrow : int
321
+ number of rows in the image
322
+
323
+ Returns
324
+ -------
325
+ orders : array[nord + 2, degree]
326
+ extended orders
327
+ """
328
+
329
+ nord, ncoef = orders.shape
330
+
331
+ if nord > 1:
332
+ order_low = 2 * orders[0] - orders[1]
333
+ order_high = 2 * orders[-1] - orders[-2]
334
+ else:
335
+ order_low = [0 for _ in range(ncoef)]
336
+ order_high = [0 for _ in range(ncoef - 1)] + [nrow]
337
+
338
+ return np.array([order_low, *orders, order_high])
339
+
340
+
341
+ def fix_extraction_width(xwd, orders, cr, ncol):
342
+ """Convert fractional extraction width to pixel range
343
+
344
+ Parameters
345
+ ----------
346
+ extraction_width : array[nord, 2]
347
+ current extraction width, in pixels or fractions (for values below 1.5)
348
+ orders : array[nord, degree]
349
+ order tracing coefficients
350
+ column_range : array[nord, 2]
351
+ column range to use
352
+ ncol : int
353
+ number of columns in image
354
+
355
+ Returns
356
+ -------
357
+ extraction_width : array[nord, 2]
358
+ updated extraction width in pixels
359
+ """
360
+
361
+ if not np.all(xwd > 1.5):
362
+ # if extraction width is in relative scale transform to pixel scale
363
+ x = np.arange(ncol)
364
+ for i in range(1, len(xwd) - 1):
365
+ for j in [0, 1]:
366
+ if xwd[i, j] < 1.5:
367
+ k = i - 1 if j == 0 else i + 1
368
+ left = max(cr[[i, k], 0])
369
+ right = min(cr[[i, k], 1])
370
+
371
+ if right < left:
372
+ raise ValueError(
373
+ f"Check your column ranges. Orders {i} and {k} are weird"
374
+ )
375
+
376
+ current = np.polyval(orders[i], x[left:right])
377
+ below = np.polyval(orders[k], x[left:right])
378
+ xwd[i, j] *= np.min(np.abs(current - below))
379
+
380
+ xwd[0] = xwd[1]
381
+ xwd[-1] = xwd[-2]
382
+
383
+ xwd = np.ceil(xwd).astype(int)
384
+
385
+ return xwd
386
+
387
+
388
+ def fix_column_range(column_range, orders, extraction_width, nrow, ncol):
389
+ """Fix the column range, so that no pixels outside the image will be accessed (Thus avoiding errors)
390
+
391
+ Parameters
392
+ ----------
393
+ img : array[nrow, ncol]
394
+ image
395
+ orders : array[nord, degree]
396
+ order tracing coefficients
397
+ extraction_width : array[nord, 2]
398
+ extraction width in pixels, (below, above)
399
+ column_range : array[nord, 2]
400
+ current column range
401
+ no_clip : bool, optional
402
+ if False, new column range will be smaller or equal to current column range, otherwise it can also be larger (default: False)
403
+
404
+ Returns
405
+ -------
406
+ column_range : array[nord, 2]
407
+ updated column range
408
+ """
409
+
410
+ ix = np.arange(ncol)
411
+ to_remove = []
412
+ # Loop over non extension orders
413
+ for i, order in zip(range(1, len(orders) - 1), orders[1:-1], strict=False):
414
+ # Shift order trace up/down by extraction_width
415
+ coeff_bot, coeff_top = np.copy(order), np.copy(order)
416
+ coeff_bot[-1] -= extraction_width[i, 0]
417
+ coeff_top[-1] += extraction_width[i, 1]
418
+
419
+ y_bot = np.polyval(coeff_bot, ix) # low edge of arc
420
+ y_top = np.polyval(coeff_top, ix) # high edge of arc
421
+
422
+ # find regions of pixels inside the image
423
+ # then use the region that most closely resembles the existing column range (from order tracing)
424
+ # but clip it to the existing column range (order tracing polynomials are not well defined outside the original range)
425
+ points_in_image = np.where((y_bot >= 0) & (y_top < nrow))[0]
426
+
427
+ if len(points_in_image) == 0:
428
+ # print(y_bot, y_top,nrow, ncol, points_in_image)
429
+ logger.warn(
430
+ f"No pixels are completely within the extraction width for order {i}, removing it."
431
+ )
432
+ to_remove += [i]
433
+ continue
434
+
435
+ regions = np.where(np.diff(points_in_image) != 1)[0]
436
+ regions = [(r, r + 1) for r in regions]
437
+ regions = [
438
+ points_in_image[0],
439
+ *points_in_image[(regions,)].ravel(),
440
+ points_in_image[-1],
441
+ ]
442
+ regions = [[regions[i], regions[i + 1] + 1] for i in range(0, len(regions), 2)]
443
+ overlap = [
444
+ min(reg[1], column_range[i, 1]) - max(reg[0], column_range[i, 0])
445
+ for reg in regions
446
+ ]
447
+ iregion = np.argmax(overlap)
448
+ column_range[i] = np.clip(
449
+ regions[iregion], column_range[i, 0], column_range[i, 1]
450
+ )
451
+
452
+ column_range[0] = column_range[1]
453
+ column_range[-1] = column_range[-2]
454
+
455
+ for i in to_remove:
456
+ np.delete(column_range, i, axis=0)
457
+ np.delete(orders, i, axis=0)
458
+
459
+ return column_range, orders
460
+
461
+
462
+ def make_bins(swath_width, xlow, xhigh, ycen):
463
+ """Create bins for the swathes
464
+ Bins are roughly equally sized, have roughly length swath width (if given)
465
+ and overlap roughly half-half with each other
466
+
467
+ Parameters
468
+ ----------
469
+ swath_width : {int, None}
470
+ initial value for the swath_width, bins will have roughly that size, but exact value may change
471
+ if swath_width is None, determine a good value, from the data
472
+ xlow : int
473
+ lower bound for x values
474
+ xhigh : int
475
+ upper bound for x values
476
+ ycen : array[ncol]
477
+ center of the order trace
478
+
479
+ Returns
480
+ -------
481
+ nbin : int
482
+ number of bins
483
+ bins_start : array[nbin]
484
+ left(beginning) side of the bins
485
+ bins_end : array[nbin]
486
+ right(ending) side of the bins
487
+ """
488
+
489
+ if swath_width is None:
490
+ ncol = len(ycen)
491
+ i = np.unique(ycen.astype(int)) # Points of row crossing
492
+ # ni = len(i) # This is how many times this order crosses to the next row
493
+ if len(i) > 1: # Curved order crosses rows
494
+ i = np.sum(i[1:] - i[:-1]) / (len(i) - 1)
495
+ nbin = np.clip(
496
+ int(np.round(ncol / i)) // 3, 3, 20
497
+ ) # number of swaths along the order
498
+ else: # Perfectly aligned orders
499
+ nbin = np.clip(ncol // 400, 3, None) # Still follow the changes in PSF
500
+ nbin = nbin * (xhigh - xlow) // ncol # Adjust for the true order length
501
+ else:
502
+ nbin = np.clip(int(np.round((xhigh - xlow) / swath_width)), 1, None)
503
+
504
+ bins = np.linspace(xlow, xhigh, 2 * nbin + 1) # boundaries of bins
505
+ bins_start = np.ceil(bins[:-2]).astype(int) # beginning of each bin
506
+ bins_end = np.floor(bins[2:]).astype(int) # end of each bin
507
+
508
+ return nbin, bins_start, bins_end
509
+
510
+
511
+ def calc_telluric_correction(telluric, img): # pragma: no cover
512
+ """Calculate telluric correction
513
+
514
+ If set to specific integer larger than 1 is used as the
515
+ offset from the order center line. The sky is then estimated by computing
516
+ median signal between this offset and the upper/lower limit of the
517
+ extraction window.
518
+
519
+ Parameters
520
+ ----------
521
+ telluric : int
522
+ telluric correction parameter
523
+ img : array
524
+ image of the swath
525
+
526
+ Returns
527
+ -------
528
+ tell : array
529
+ telluric correction
530
+ """
531
+ width, height = img.shape
532
+
533
+ tel_lim = telluric if telluric > 5 and telluric < height / 2 else min(5, height / 3)
534
+ tel = np.sum(img, axis=0)
535
+ itel = np.arange(height)
536
+ itel = itel[np.abs(itel - height / 2) >= tel_lim]
537
+ tel = img[itel, :]
538
+ sc = np.zeros(width)
539
+
540
+ for itel in range(width):
541
+ sc[itel] = np.ma.median(tel[itel])
542
+
543
+ return sc
544
+
545
+
546
+ def calc_scatter_correction(scatter, index):
547
+ """Calculate scatter correction
548
+ by interpolating between values?
549
+
550
+ Parameters
551
+ ----------
552
+ scatter : array of shape (degree_x, degree_y)
553
+ 2D polynomial coefficients of the background scatter
554
+ index : tuple (array, array)
555
+ indices of the swath within the overall image
556
+
557
+ Returns
558
+ -------
559
+ scatter_correction : array of shape (swath_width, swath_height)
560
+ correction for scattered light
561
+ """
562
+
563
+ # The indices in the image are switched
564
+ y, x = index
565
+ scatter_correction = np.polynomial.polynomial.polyval2d(x, y, scatter)
566
+ return scatter_correction
567
+
568
+
569
+ def extract_spectrum(
570
+ img,
571
+ ycen,
572
+ yrange,
573
+ xrange,
574
+ gain=1,
575
+ readnoise=0,
576
+ lambda_sf=0.1,
577
+ lambda_sp=0,
578
+ osample=1,
579
+ swath_width=None,
580
+ maxiter=20,
581
+ telluric=None,
582
+ scatter=None,
583
+ normalize=False,
584
+ threshold=0,
585
+ tilt=None,
586
+ shear=None,
587
+ plot=False,
588
+ plot_title=None,
589
+ im_norm=None,
590
+ im_ordr=None,
591
+ out_spec=None,
592
+ out_sunc=None,
593
+ out_slitf=None,
594
+ out_mask=None,
595
+ progress=None,
596
+ ord_num=0,
597
+ **kwargs,
598
+ ):
599
+ """
600
+ Extract the spectrum of a single order from an image
601
+ The order is split into several swathes of roughly swath_width length, which overlap half-half
602
+ For each swath a spectrum and slitfunction are extracted
603
+ overlapping sections are combined using linear weights (centrum is strongest, falling off to the edges)
604
+ Here is the layout for the bins:
605
+
606
+ ::
607
+
608
+ 1st swath 3rd swath 5th swath ...
609
+ /============|============|============|============|============|
610
+
611
+ 2nd swath 4th swath 6th swath
612
+ |------------|------------|------------|------------|
613
+ |.....|
614
+ overlap
615
+
616
+ + ******* 1
617
+ + *
618
+ + *
619
+ * weights (+) previous swath, (*) current swath
620
+ * +
621
+ * +
622
+ * +++++++ 0
623
+
624
+ Parameters
625
+ ----------
626
+ img : array[nrow, ncol]
627
+ observation (or similar)
628
+ ycen : array[ncol]
629
+ order trace of the current order
630
+ yrange : tuple(int, int)
631
+ extraction width in pixles, below and above
632
+ xrange : tuple(int, int)
633
+ columns range to extract (low, high)
634
+ gain : float, optional
635
+ adu to electron, amplifier gain (default: 1)
636
+ readnoise : float, optional
637
+ read out noise factor (default: 0)
638
+ lambda_sf : float, optional
639
+ slit function smoothing parameter, usually very small (default: 0.1)
640
+ lambda_sp : int, optional
641
+ spectrum smoothing parameter, usually very small (default: 0)
642
+ osample : int, optional
643
+ oversampling factor, i.e. how many subpixels to create per pixel (default: 1, i.e. no oversampling)
644
+ swath_width : int, optional
645
+ swath width suggestion, actual width depends also on ncol, see make_bins (default: None, which will determine the width based on the order tracing)
646
+ telluric : {float, None}, optional
647
+ telluric correction factor (default: None, i.e. no telluric correction)
648
+ scatter : {array, None}, optional
649
+ background scatter as 2d polynomial coefficients (default: None, no correction)
650
+ normalize : bool, optional
651
+ whether to create a normalized image. If true, im_norm and im_ordr are used as output (default: False)
652
+ threshold : int, optional
653
+ threshold for normalization (default: 0)
654
+ tilt : array[ncol], optional
655
+ The tilt (1st order curvature) of the slit in this order for the curved extraction (default: None, i.e. tilt = 0)
656
+ shear : array[ncol], optional
657
+ The shear (2nd order curvature) of the slit in this order for the curved extraction (default: None, i.e. shear = 0)
658
+ plot : bool, optional
659
+ wether to plot the progress, plotting will slow down the procedure significantly (default: False)
660
+ ord_num : int, optional
661
+ current order number, just for plotting (default: 0)
662
+ im_norm : array[nrow, ncol], optional
663
+ normalized image, only output if normalize is True (default: None)
664
+ im_ordr : array[nrow, ncol], optional
665
+ image of the order blaze, only output if normalize is True (default: None)
666
+
667
+ Returns
668
+ -------
669
+ spec : array[ncol]
670
+ extracted spectrum
671
+ slitf : array[nslitf]
672
+ extracted slitfunction
673
+ mask : array[ncol]
674
+ mask of the column range to use in the spectrum
675
+ unc : array[ncol]
676
+ uncertainty on the spectrum
677
+ """
678
+
679
+ _, ncol = img.shape
680
+ ylow, yhigh = yrange
681
+ xlow, xhigh = xrange
682
+ nslitf = osample * (ylow + yhigh + 2) + 1
683
+ yhigh + ylow + 1
684
+
685
+ ycen_int = np.floor(ycen).astype(int)
686
+
687
+ spec = np.zeros(ncol) if out_spec is None else out_spec
688
+ sunc = np.zeros(ncol) if out_sunc is None else out_sunc
689
+ mask = np.full(ncol, False) if out_mask is None else out_mask
690
+ slitf = np.zeros(nslitf) if out_slitf is None else out_slitf
691
+
692
+ nbin, bins_start, bins_end = make_bins(swath_width, xlow, xhigh, ycen)
693
+ nswath = 2 * nbin - 1
694
+ swath = Swath(nswath)
695
+ margin = np.zeros((nswath, 2), int)
696
+
697
+ if normalize:
698
+ norm_img = [None] * nswath
699
+ norm_model = [None] * nswath
700
+
701
+ # Perform slit decomposition within each swath stepping through the order with
702
+ # half swath width. Spectra for each decomposition are combined with linear weights.
703
+ with tqdm(
704
+ enumerate(zip(bins_start, bins_end, strict=False)),
705
+ total=len(bins_start),
706
+ leave=False,
707
+ desc="Swath",
708
+ ) as t:
709
+ for ihalf, (ibeg, iend) in t:
710
+ logger.debug("Extracting Swath %i, Columns: %i - %i", ihalf, ibeg, iend)
711
+
712
+ # Cut out swath from image
713
+ index = make_index(ycen_int - ylow, ycen_int + yhigh, ibeg, iend)
714
+ swath_img = img[index]
715
+ swath_ycen = ycen[ibeg:iend]
716
+
717
+ # Corrections
718
+ # TODO: what is it even supposed to do?
719
+ if telluric is not None: # pragma: no cover
720
+ telluric_correction = calc_telluric_correction(telluric, swath_img)
721
+ else:
722
+ telluric_correction = 0
723
+
724
+ if scatter is not None:
725
+ scatter_correction = calc_scatter_correction(scatter, index)
726
+ else:
727
+ scatter_correction = 0
728
+
729
+ swath_img -= scatter_correction + telluric_correction
730
+
731
+ # Do Slitfunction extraction
732
+ swath_tilt = tilt[ibeg:iend] if tilt is not None else 0
733
+ swath_shear = shear[ibeg:iend] if shear is not None else 0
734
+ swath[ihalf] = slitfunc_curved(
735
+ swath_img,
736
+ swath_ycen,
737
+ swath_tilt,
738
+ swath_shear,
739
+ lambda_sp=lambda_sp,
740
+ lambda_sf=lambda_sf,
741
+ osample=osample,
742
+ yrange=yrange,
743
+ maxiter=maxiter,
744
+ gain=gain,
745
+ )
746
+ t.set_postfix(chi=f"{swath[ihalf][5][1]:1.2f}")
747
+
748
+ if normalize:
749
+ # Save image and model for later
750
+ # Use np.divide to avoid divisions by zero
751
+ where = swath.model[ihalf] > threshold / gain
752
+ norm_img[ihalf] = np.ones_like(swath.model[ihalf])
753
+ np.divide(
754
+ np.abs(swath_img),
755
+ swath.model[ihalf],
756
+ where=where,
757
+ out=norm_img[ihalf],
758
+ )
759
+ norm_model[ihalf] = swath.model[ihalf]
760
+
761
+ if plot >= 2 and not np.all(np.isnan(swath_img)): # pragma: no cover
762
+ if progress is None:
763
+ progress = ProgressPlot(
764
+ swath_img.shape[0], swath_img.shape[1], nslitf, title=plot_title
765
+ )
766
+ progress.plot(
767
+ swath_img,
768
+ swath.spec[ihalf],
769
+ swath.slitf[ihalf],
770
+ swath.model[ihalf],
771
+ swath_ycen,
772
+ swath.mask[ihalf],
773
+ ord_num,
774
+ ibeg,
775
+ iend,
776
+ )
777
+
778
+ # Remove points at the border of the each swath, if order has tilt
779
+ # as those pixels have bad information
780
+ for i in range(nswath):
781
+ margin[i, :] = int(swath.info[i][4]) + 1
782
+
783
+ # Weight for combining swaths
784
+ weight = [np.ones(bins_end[i] - bins_start[i]) for i in range(nswath)]
785
+ weight[0][: margin[0, 0]] = 0
786
+ weight[-1][len(weight[-1]) - margin[-1, 1] :] = 0
787
+ for i, j in zip(range(0, nswath - 1), range(1, nswath), strict=False):
788
+ width = bins_end[i] - bins_start[i]
789
+ overlap = bins_end[i] - bins_start[j]
790
+
791
+ # Start and end indices for the two swaths
792
+ start_i = width - overlap + margin[j, 0]
793
+ end_i = width - margin[i, 1]
794
+
795
+ start_j = margin[j, 0]
796
+ end_j = overlap - margin[i, 1]
797
+
798
+ # Weights for one overlap from 0 to 1, but do not include those values (whats the point?)
799
+ triangle = np.linspace(0, 1, overlap + 1, endpoint=False)[1:]
800
+ # Cut away the margins at the corners
801
+ triangle = triangle[margin[j, 0] : len(triangle) - margin[i, 1]]
802
+
803
+ # Set values
804
+ weight[i][start_i:end_i] = 1 - triangle
805
+ weight[j][start_j:end_j] = triangle
806
+
807
+ # Don't use the pixels at the egdes (due to curvature)
808
+ weight[i][end_i:] = 0
809
+ weight[j][:start_j] = 0
810
+
811
+ # Update column range
812
+ xrange[0] += margin[0, 0]
813
+ xrange[1] -= margin[-1, 1]
814
+ mask[: xrange[0]] = True
815
+ mask[xrange[1] :] = True
816
+
817
+ # Apply weights
818
+ for i, (ibeg, iend) in enumerate(zip(bins_start, bins_end, strict=False)):
819
+ spec[ibeg:iend] += swath.spec[i] * weight[i]
820
+ sunc[ibeg:iend] += swath.unc[i] * weight[i]
821
+
822
+ if normalize:
823
+ for i, (ibeg, iend) in enumerate(zip(bins_start, bins_end, strict=False)):
824
+ index = make_index(ycen_int - ylow, ycen_int + yhigh, ibeg, iend)
825
+ im_norm[index] += norm_img[i] * weight[i]
826
+ im_ordr[index] += norm_model[i] * weight[i]
827
+
828
+ slitf[:] = np.mean(swath.slitf, axis=0)
829
+ sunc[:] = np.sqrt(sunc**2 + (readnoise / gain) ** 2)
830
+ return spec, slitf, mask, sunc
831
+
832
+
833
+ def model(spec, slitf):
834
+ return spec[None, :] * slitf[:, None]
835
+
836
+
837
+ def get_y_scale(ycen, xrange, extraction_width, nrow):
838
+ """Calculate the y limits of the order
839
+ This is especially important at the edges
840
+
841
+ Parameters
842
+ ----------
843
+ ycen : array[ncol]
844
+ order trace
845
+ xrange : tuple(int, int)
846
+ column range
847
+ extraction_width : tuple(int, int)
848
+ extraction width in pixels below and above the order
849
+ nrow : int
850
+ number of rows in the image, defines upper edge
851
+
852
+ Returns
853
+ -------
854
+ y_low, y_high : int, int
855
+ lower and upper y bound for extraction
856
+ """
857
+ ycen = ycen[xrange[0] : xrange[1]]
858
+
859
+ ymin = ycen - extraction_width[0]
860
+ ymin = np.floor(ymin)
861
+ if min(ymin) < 0:
862
+ ymin = ymin - min(ymin) # help for orders at edge
863
+ if max(ymin) >= nrow:
864
+ ymin = ymin - max(ymin) + nrow - 1 # helps at edge
865
+
866
+ ymax = ycen + extraction_width[1]
867
+ ymax = np.ceil(ymax)
868
+ if max(ymax) >= nrow:
869
+ ymax = ymax - max(ymax) + nrow - 1 # helps at edge
870
+
871
+ # Define a fixed height area containing one spectral order
872
+ y_lower_lim = int(np.min(ycen - ymin)) # Pixels below center line
873
+ y_upper_lim = int(np.min(ymax - ycen)) # Pixels above center line
874
+
875
+ return y_lower_lim, y_upper_lim
876
+
877
+
878
+ def optimal_extraction(
879
+ img,
880
+ orders,
881
+ extraction_width,
882
+ column_range,
883
+ tilt,
884
+ shear,
885
+ plot=False,
886
+ plot_title=None,
887
+ **kwargs,
888
+ ):
889
+ """Use optimal extraction to get spectra
890
+
891
+ This functions just loops over the orders, the actual work is done in extract_spectrum
892
+
893
+ Parameters
894
+ ----------
895
+ img : array[nrow, ncol]
896
+ image to extract
897
+ orders : array[nord, degree]
898
+ order tracing coefficients
899
+ extraction_width : array[nord, 2]
900
+ extraction width in pixels
901
+ column_range : array[nord, 2]
902
+ column range to use
903
+ scatter : array[nord, 4, ncol]
904
+ background scatter (or None)
905
+ **kwargs
906
+ other parameters for the extraction (see extract_spectrum)
907
+
908
+ Returns
909
+ -------
910
+ spectrum : array[nord, ncol]
911
+ extracted spectrum
912
+ slitfunction : array[nord, nslitf]
913
+ recovered slitfunction
914
+ uncertainties: array[nord, ncol]
915
+ uncertainties on the spectrum
916
+ """
917
+
918
+ logger.info("Using optimal extraction to produce spectrum")
919
+
920
+ nrow, ncol = img.shape
921
+ nord = len(orders)
922
+
923
+ spectrum = np.zeros((nord, ncol))
924
+ uncertainties = np.zeros((nord, ncol))
925
+ slitfunction = [None for _ in range(nord)]
926
+
927
+ if tilt is None:
928
+ tilt = [None for _ in range(nord)]
929
+ if shear is None:
930
+ shear = [None for _ in range(nord)]
931
+
932
+ # Add mask as defined by column ranges
933
+ mask = np.full((nord, ncol), True)
934
+ for i in range(nord):
935
+ mask[i, column_range[i, 0] : column_range[i, 1]] = False
936
+ spectrum = np.ma.array(spectrum, mask=mask)
937
+ uncertainties = np.ma.array(uncertainties, mask=mask)
938
+
939
+ ix = np.arange(ncol)
940
+ if plot >= 2: # pragma: no cover
941
+ ncol_swath = kwargs.get("swath_width", img.shape[1] // 400)
942
+ nrow_swath = np.sum(extraction_width, axis=1).max()
943
+ nslitf_swath = (nrow_swath + 2) * kwargs.get("osample", 1) + 1
944
+ progress = ProgressPlot(nrow_swath, ncol_swath, nslitf_swath, title=plot_title)
945
+ else:
946
+ progress = None
947
+
948
+ for i in tqdm(range(nord), desc="Order"):
949
+ logger.debug("Extracting relative order %i out of %i", i + 1, nord)
950
+
951
+ # Define a fixed height area containing one spectral order
952
+ ycen = np.polyval(orders[i], ix)
953
+ yrange = get_y_scale(ycen, column_range[i], extraction_width[i], nrow)
954
+
955
+ osample = kwargs.get("osample", 1)
956
+ slitfunction[i] = np.zeros(osample * (sum(yrange) + 2) + 1)
957
+
958
+ # Return values are set by reference, as the out parameters
959
+ # Also column_range is adjusted depending on the shear
960
+ # This is to avoid large chunks of memory of essentially duplicates
961
+ extract_spectrum(
962
+ img,
963
+ ycen,
964
+ yrange,
965
+ column_range[i],
966
+ tilt=tilt[i],
967
+ shear=shear[i],
968
+ out_spec=spectrum[i],
969
+ out_sunc=uncertainties[i],
970
+ out_slitf=slitfunction[i],
971
+ out_mask=mask[i],
972
+ progress=progress,
973
+ ord_num=i + 1,
974
+ plot=plot,
975
+ plot_title=plot_title,
976
+ **kwargs,
977
+ )
978
+
979
+ if plot >= 2: # pragma: no cover
980
+ progress.close()
981
+
982
+ if plot: # pragma: no cover
983
+ plot_comparison(
984
+ img,
985
+ orders,
986
+ spectrum,
987
+ slitfunction,
988
+ extraction_width,
989
+ column_range,
990
+ title=plot_title,
991
+ )
992
+
993
+ return spectrum, slitfunction, uncertainties
994
+
995
+
996
+ def correct_for_curvature(img_order, tilt, shear, xwd):
997
+ # img_order = np.ma.filled(img_order, np.nan)
998
+ mask = ~np.ma.getmaskarray(img_order)
999
+
1000
+ xt = np.arange(img_order.shape[1])
1001
+ for y, yt in zip(range(xwd[0] + xwd[1]), range(-xwd[0], xwd[1]), strict=False):
1002
+ xi = xt + yt * tilt + yt**2 * shear
1003
+ img_order[y] = np.interp(
1004
+ xi, xt[mask[y]], img_order[y][mask[y]], left=0, right=0
1005
+ )
1006
+
1007
+ xt = np.arange(img_order.shape[0])
1008
+ for x in range(img_order.shape[1]):
1009
+ img_order[:, x] = np.interp(
1010
+ xt, xt[mask[:, x]], img_order[:, x][mask[:, x]], left=0, right=0
1011
+ )
1012
+
1013
+ return img_order
1014
+
1015
+
1016
+ def model_image(img, xwd, tilt, shear):
1017
+ # Correct image for curvature
1018
+ img.shape[0]
1019
+ img = correct_for_curvature(img, tilt, shear, xwd)
1020
+ # Find slitfunction using the median to avoid outliers
1021
+ slitf = np.ma.median(img, axis=1)
1022
+ slitf /= np.ma.sum(slitf)
1023
+ # Use the slitfunction to find spectrum
1024
+ spec = np.ma.median(img / slitf[:, None], axis=0)
1025
+ # Create model from slitfunction and spectrum
1026
+ model = spec[None, :] * slitf[:, None]
1027
+ # Reapply curvature to the model
1028
+ model = correct_for_curvature(model, -tilt, -shear, xwd)
1029
+ return model, spec, slitf
1030
+
1031
+
1032
+ def get_mask(img, model):
1033
+ # 99.73 = 3 sigma, 2 * 3 = 6 sigma
1034
+ residual = np.ma.abs(img - model)
1035
+ median, vmax = np.percentile(np.ma.compressed(residual), (50, 99.73))
1036
+ vmax = median + 2 * (vmax - median)
1037
+ return residual > vmax
1038
+
1039
+
1040
+ def arc_extraction(
1041
+ img,
1042
+ orders,
1043
+ extraction_width,
1044
+ column_range,
1045
+ gain=1,
1046
+ readnoise=0,
1047
+ dark=0,
1048
+ plot=False,
1049
+ plot_title=None,
1050
+ tilt=None,
1051
+ shear=None,
1052
+ collapse_function="median",
1053
+ **kwargs,
1054
+ ):
1055
+ """Use "simple" arc extraction to get a spectrum
1056
+ Arc extraction simply takes the sum orthogonal to the order for extraction width pixels
1057
+
1058
+ This extraction makes a few rough assumptions and does not provide the most accurate results,
1059
+ but rather a good approximation
1060
+
1061
+ Parameters
1062
+ ----------
1063
+ img : array[nrow, ncol]
1064
+ image to extract
1065
+ orders : array[nord, order]
1066
+ order tracing coefficients
1067
+ extraction_width : array[nord, 2]
1068
+ extraction width in pixels
1069
+ column_range : array[nord, 2]
1070
+ column range to use
1071
+ gain : float, optional
1072
+ adu to electron, amplifier gain (default: 1)
1073
+ readnoise : float, optional
1074
+ read out noise (default: 0)
1075
+ dark : float, optional
1076
+ dark current noise (default: 0)
1077
+ plot : bool, optional
1078
+ wether to plot the results (default: False)
1079
+
1080
+ Returns
1081
+ -------
1082
+ spectrum : array[nord, ncol]
1083
+ extracted spectrum
1084
+ uncertainties : array[nord, ncol]
1085
+ uncertainties on extracted spectrum
1086
+ """
1087
+
1088
+ logger.info("Using arc extraction to produce spectrum")
1089
+ _, ncol = img.shape
1090
+ nord, _ = orders.shape
1091
+
1092
+ spectrum = np.zeros((nord, ncol))
1093
+ uncertainties = np.zeros((nord, ncol))
1094
+
1095
+ # Add mask as defined by column ranges
1096
+ mask = np.full((nord, ncol), True)
1097
+ for i in range(nord):
1098
+ mask[i, column_range[i, 0] : column_range[i, 1]] = False
1099
+ spectrum = np.ma.array(spectrum, mask=mask)
1100
+ uncertainties = np.ma.array(uncertainties, mask=mask)
1101
+
1102
+ x = np.arange(ncol)
1103
+
1104
+ for i in tqdm(range(nord), desc="Order"):
1105
+ logger.debug("Calculating order %i out of %i", i + 1, nord)
1106
+
1107
+ x_left_lim = column_range[i, 0]
1108
+ x_right_lim = column_range[i, 1]
1109
+
1110
+ # Rectify the image, i.e. remove the shape of the order
1111
+ # Then the center of the order is within one pixel variations
1112
+ ycen = np.polyval(orders[i], x).astype(int)
1113
+ yb, yt = ycen - extraction_width[i, 0], ycen + extraction_width[i, 1]
1114
+ extraction_width[i, 0] + extraction_width[i, 1] + 1
1115
+ index = make_index(yb, yt, x_left_lim, x_right_lim)
1116
+ img_order = img[index]
1117
+
1118
+ # Correct for tilt and shear
1119
+ # For each row of the rectified order, interpolate onto the shifted row
1120
+ # Masked pixels are set to 0, similar to the summation
1121
+ if tilt is not None and shear is not None:
1122
+ img_order = correct_for_curvature(
1123
+ img_order,
1124
+ tilt[i, x_left_lim:x_right_lim],
1125
+ shear[i, x_left_lim:x_right_lim],
1126
+ extraction_width[i],
1127
+ )
1128
+
1129
+ # Sum over the prepared image
1130
+ if collapse_function == "sum":
1131
+ arc = np.ma.sum(img_order, axis=0)
1132
+ elif collapse_function == "mean":
1133
+ arc = np.ma.mean(img_order, axis=0) * img_order.shape[0]
1134
+ elif collapse_function == "median":
1135
+ arc = np.ma.median(img_order, axis=0) * img_order.shape[0]
1136
+ else:
1137
+ raise ValueError(
1138
+ f"Could not determine the arc method, expected one of ('sum', 'mean', 'median'), but got {collapse_function}"
1139
+ )
1140
+
1141
+ # Store results
1142
+ spectrum[i, x_left_lim:x_right_lim] = arc
1143
+ uncertainties[i, x_left_lim:x_right_lim] = (
1144
+ np.sqrt(np.abs(arc * gain + dark + readnoise**2)) / gain
1145
+ )
1146
+
1147
+ if plot: # pragma: no cover
1148
+ plot_comparison(
1149
+ img,
1150
+ orders,
1151
+ spectrum,
1152
+ None,
1153
+ extraction_width,
1154
+ column_range,
1155
+ title=plot_title,
1156
+ )
1157
+
1158
+ return spectrum, uncertainties
1159
+
1160
+
1161
+ def plot_comparison(
1162
+ original, orders, spectrum, slitf, extraction_width, column_range, title=None
1163
+ ): # pragma: no cover
1164
+ nrow, ncol = original.shape
1165
+ nord = len(orders)
1166
+ output = np.zeros((np.sum(extraction_width) + nord, ncol))
1167
+ pos = [0]
1168
+ x = np.arange(ncol)
1169
+ for i in range(nord):
1170
+ ycen = np.polyval(orders[i], x)
1171
+ yb = ycen - extraction_width[i, 0]
1172
+ yt = ycen + extraction_width[i, 1]
1173
+ xl, xr = column_range[i]
1174
+ index = make_index(yb, yt, xl, xr)
1175
+ yl = pos[i]
1176
+ yr = pos[i] + index[0].shape[0]
1177
+ output[yl:yr, xl:xr] = original[index]
1178
+
1179
+ vmin, vmax = np.percentile(output[yl:yr, xl:xr], (5, 95))
1180
+ output[yl:yr, xl:xr] = np.clip(output[yl:yr, xl:xr], vmin, vmax)
1181
+ output[yl:yr, xl:xr] -= vmin
1182
+ output[yl:yr, xl:xr] /= vmax - vmin
1183
+
1184
+ pos += [yr]
1185
+
1186
+ plt.imshow(output, origin="lower", aspect="auto")
1187
+
1188
+ for i in range(nord):
1189
+ try:
1190
+ tmp = spectrum[i, column_range[i, 0] : column_range[i, 1]]
1191
+ # if len(tmp)
1192
+ vmin = np.min(tmp[tmp != 0])
1193
+ tmp = np.copy(spectrum[i])
1194
+ tmp[tmp != 0] -= vmin
1195
+ np.log(tmp, out=tmp, where=tmp > 0)
1196
+ tmp = tmp / np.max(tmp) * 0.9 * (pos[i + 1] - pos[i])
1197
+ tmp += pos[i]
1198
+ tmp[tmp < pos[i]] = pos[i]
1199
+ plt.plot(x, tmp, "r")
1200
+ except:
1201
+ pass
1202
+
1203
+ locs = np.sum(extraction_width, axis=1) + 1
1204
+ locs = np.array([0, *np.cumsum(locs)[:-1]])
1205
+ locs[:-1] += (np.diff(locs) * 0.5).astype(int)
1206
+ locs[-1] += ((output.shape[0] - locs[-1]) * 0.5).astype(int)
1207
+ plt.yticks(locs, range(len(locs)))
1208
+
1209
+ plot_title = "Extracted Spectrum vs. Rectified Image"
1210
+ if title is not None:
1211
+ plot_title = f"{title}\n{plot_title}"
1212
+ plt.title(plot_title)
1213
+ plt.xlabel("x [pixel]")
1214
+ plt.ylabel("order")
1215
+ plt.show()
1216
+
1217
+
1218
+ def extract(
1219
+ img,
1220
+ orders,
1221
+ column_range=None,
1222
+ order_range=None,
1223
+ extraction_width=0.5,
1224
+ extraction_type="optimal",
1225
+ tilt=None,
1226
+ shear=None,
1227
+ sigma_cutoff=0,
1228
+ **kwargs,
1229
+ ):
1230
+ """
1231
+ Extract the spectrum from an image
1232
+
1233
+ Parameters
1234
+ ----------
1235
+ img : array[nrow, ncol](float)
1236
+ observation to extract
1237
+ orders : array[nord, degree](float)
1238
+ polynomial coefficients of the order tracing
1239
+ column_range : array[nord, 2](int), optional
1240
+ range of pixels to use for each order (default: use all)
1241
+ order_range : array[2](int), optional
1242
+ range of orders to extract, orders have to be consecutive (default: use all)
1243
+ extraction_width : array[nord, 2]({float, int}), optional
1244
+ extraction width above and below each order, values below 1.5 are considered relative, while values above are absolute (default: 0.5)
1245
+ extraction_type : {"optimal", "arc", "normalize"}, optional
1246
+ which extracttion algorithm to use, "optimal" uses optimal extraction, "arc" uses simple arc extraction, and "normalize" also uses optimal extraction, but returns the normalized image (default: "optimal")
1247
+ tilt : float or array[nord, ncol], optional
1248
+ The tilt (1st order curvature) of the slit for curved extraction. Will use vertical extraction if no tilt is set. (default: None, i.e. tilt = 0)
1249
+ shear : float or array[nord, ncol], optional
1250
+ The shear (2nd order curvature) of the slit for curved extraction (default: None, i.e. shear = 0)
1251
+ polarization : bool, optional
1252
+ if true, pairs of orders are considered to belong to the same order, but different polarization. Only affects the scatter (default: False)
1253
+ **kwargs, optional
1254
+ parameters for extraction functions
1255
+
1256
+ Returns
1257
+ -------
1258
+ spec : array[nord, ncol](float)
1259
+ extracted spectrum for each order
1260
+ uncertainties : array[nord, ncol](float)
1261
+ uncertainties on the spectrum
1262
+
1263
+ if extraction_type == "normalize" instead return
1264
+
1265
+ im_norm : array[nrow, ncol](float)
1266
+ normalized image
1267
+ im_ordr : array[nrow, ncol](float)
1268
+ image with just the orders
1269
+ blaze : array[nord, ncol](float)
1270
+ extracted spectrum (equals blaze if img was the flat field)
1271
+ """
1272
+
1273
+ nrow, ncol = img.shape
1274
+ nord, _ = orders.shape
1275
+ if order_range is None:
1276
+ order_range = (0, nord)
1277
+ if np.isscalar(tilt):
1278
+ n = order_range[1] - order_range[0]
1279
+ tilt = np.full((n, ncol), tilt)
1280
+ if np.isscalar(shear):
1281
+ n = order_range[1] - order_range[0]
1282
+ shear = np.full((n, ncol), shear)
1283
+
1284
+ # Fix the input parameters
1285
+ extraction_width, column_range, orders = fix_parameters(
1286
+ extraction_width, column_range, orders, nrow, ncol, nord
1287
+ )
1288
+ # Limit orders (and related properties) to orders in range
1289
+ nord = order_range[1] - order_range[0]
1290
+ orders = orders[order_range[0] : order_range[1]]
1291
+ column_range = column_range[order_range[0] : order_range[1]]
1292
+ extraction_width = extraction_width[order_range[0] : order_range[1]]
1293
+
1294
+ # if sigma_cutoff > 0:
1295
+ # # Blur the image and mask outliers
1296
+ # img = np.ma.masked_invalid(img, copy=False)
1297
+ # img.data[img.mask] = 0
1298
+ # # Use the median of the sorounding pixels (excluding the pixel itself)
1299
+ # footprint = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
1300
+ # dilated = median_filter(img, footprint=footprint)
1301
+ # diff = np.ma.abs(img - dilated)
1302
+ # # median = 50%; 3 sigma = 99.73 %
1303
+ # median, std = np.percentile(diff.compressed(), (50, 99.73))
1304
+ # mask = diff > median + sigma_cutoff * std / 3
1305
+ # img[mask] = np.ma.masked
1306
+
1307
+ if extraction_type == "optimal":
1308
+ # the "normal" case, except for wavelength calibration files
1309
+ spectrum, slitfunction, uncertainties = optimal_extraction(
1310
+ img,
1311
+ orders,
1312
+ extraction_width,
1313
+ column_range,
1314
+ tilt=tilt,
1315
+ shear=shear,
1316
+ **kwargs,
1317
+ )
1318
+ elif extraction_type == "normalize":
1319
+ # TODO
1320
+ # Prepare normalized flat field image if necessary
1321
+ # These will be passed and "returned" by reference
1322
+ # I dont like it, but it works for now
1323
+ im_norm = np.zeros_like(img)
1324
+ im_ordr = np.zeros_like(img)
1325
+
1326
+ blaze, _, _ = optimal_extraction(
1327
+ img,
1328
+ orders,
1329
+ extraction_width,
1330
+ column_range,
1331
+ tilt=tilt,
1332
+ shear=shear,
1333
+ normalize=True,
1334
+ im_norm=im_norm,
1335
+ im_ordr=im_ordr,
1336
+ **kwargs,
1337
+ )
1338
+ threshold_lower = kwargs.get("threshold_lower", 0)
1339
+ im_norm[im_norm <= threshold_lower] = 1
1340
+ im_ordr[im_ordr <= threshold_lower] = 1
1341
+ return im_norm, im_ordr, blaze, column_range
1342
+ elif extraction_type == "arc":
1343
+ # Simpler extraction, just summing along the arc of the order
1344
+ spectrum, uncertainties = arc_extraction(
1345
+ img,
1346
+ orders,
1347
+ extraction_width,
1348
+ column_range,
1349
+ tilt=tilt,
1350
+ shear=shear,
1351
+ **kwargs,
1352
+ )
1353
+ slitfunction = None
1354
+ else:
1355
+ raise ValueError(
1356
+ f"Parameter 'extraction_type' not understood. Expected 'optimal', 'normalize', or 'arc' bug got {extraction_type}."
1357
+ )
1358
+
1359
+ return spectrum, uncertainties, slitfunction, column_range