pyreduce-astro 0.6.0__cp312-cp312-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (154) hide show
  1. pyreduce/__init__.py +67 -0
  2. pyreduce/__main__.py +106 -0
  3. pyreduce/clib/__init__.py +0 -0
  4. pyreduce/clib/_slitfunc_2d.cpython-311-darwin.so +0 -0
  5. pyreduce/clib/_slitfunc_2d.cpython-312-darwin.so +0 -0
  6. pyreduce/clib/_slitfunc_bd.cpython-311-darwin.so +0 -0
  7. pyreduce/clib/_slitfunc_bd.cpython-312-darwin.so +0 -0
  8. pyreduce/clib/build_extract.py +75 -0
  9. pyreduce/clib/slit_func_2d_xi_zeta_bd.c +1313 -0
  10. pyreduce/clib/slit_func_2d_xi_zeta_bd.h +55 -0
  11. pyreduce/clib/slit_func_bd.c +362 -0
  12. pyreduce/clib/slit_func_bd.h +17 -0
  13. pyreduce/clipnflip.py +147 -0
  14. pyreduce/combine_frames.py +855 -0
  15. pyreduce/configuration.py +186 -0
  16. pyreduce/continuum_normalization.py +329 -0
  17. pyreduce/cwrappers.py +404 -0
  18. pyreduce/datasets.py +231 -0
  19. pyreduce/echelle.py +413 -0
  20. pyreduce/estimate_background_scatter.py +129 -0
  21. pyreduce/extract.py +1361 -0
  22. pyreduce/extraction_width.py +77 -0
  23. pyreduce/instruments/__init__.py +0 -0
  24. pyreduce/instruments/andes.json +61 -0
  25. pyreduce/instruments/andes.py +102 -0
  26. pyreduce/instruments/common.json +46 -0
  27. pyreduce/instruments/common.py +683 -0
  28. pyreduce/instruments/crires_plus.json +63 -0
  29. pyreduce/instruments/crires_plus.py +103 -0
  30. pyreduce/instruments/filters.py +195 -0
  31. pyreduce/instruments/harpn.json +136 -0
  32. pyreduce/instruments/harpn.py +201 -0
  33. pyreduce/instruments/harps.json +155 -0
  34. pyreduce/instruments/harps.py +310 -0
  35. pyreduce/instruments/instrument_info.py +140 -0
  36. pyreduce/instruments/instrument_schema.json +318 -0
  37. pyreduce/instruments/jwst_miri.json +53 -0
  38. pyreduce/instruments/jwst_miri.py +29 -0
  39. pyreduce/instruments/jwst_niriss.json +52 -0
  40. pyreduce/instruments/jwst_niriss.py +98 -0
  41. pyreduce/instruments/lick_apf.json +53 -0
  42. pyreduce/instruments/lick_apf.py +35 -0
  43. pyreduce/instruments/mcdonald.json +59 -0
  44. pyreduce/instruments/mcdonald.py +123 -0
  45. pyreduce/instruments/metis_ifu.json +63 -0
  46. pyreduce/instruments/metis_ifu.py +45 -0
  47. pyreduce/instruments/metis_lss.json +65 -0
  48. pyreduce/instruments/metis_lss.py +45 -0
  49. pyreduce/instruments/micado.json +53 -0
  50. pyreduce/instruments/micado.py +45 -0
  51. pyreduce/instruments/neid.json +51 -0
  52. pyreduce/instruments/neid.py +154 -0
  53. pyreduce/instruments/nirspec.json +56 -0
  54. pyreduce/instruments/nirspec.py +215 -0
  55. pyreduce/instruments/nte.json +47 -0
  56. pyreduce/instruments/nte.py +42 -0
  57. pyreduce/instruments/uves.json +59 -0
  58. pyreduce/instruments/uves.py +46 -0
  59. pyreduce/instruments/xshooter.json +66 -0
  60. pyreduce/instruments/xshooter.py +39 -0
  61. pyreduce/make_shear.py +606 -0
  62. pyreduce/masks/mask_crires_plus_det1.fits.gz +0 -0
  63. pyreduce/masks/mask_crires_plus_det2.fits.gz +0 -0
  64. pyreduce/masks/mask_crires_plus_det3.fits.gz +0 -0
  65. pyreduce/masks/mask_ctio_chiron.fits.gz +0 -0
  66. pyreduce/masks/mask_elodie.fits.gz +0 -0
  67. pyreduce/masks/mask_feros3.fits.gz +0 -0
  68. pyreduce/masks/mask_flames_giraffe.fits.gz +0 -0
  69. pyreduce/masks/mask_harps_blue.fits.gz +0 -0
  70. pyreduce/masks/mask_harps_red.fits.gz +0 -0
  71. pyreduce/masks/mask_hds_blue.fits.gz +0 -0
  72. pyreduce/masks/mask_hds_red.fits.gz +0 -0
  73. pyreduce/masks/mask_het_hrs_2x5.fits.gz +0 -0
  74. pyreduce/masks/mask_jwst_miri_lrs_slitless.fits.gz +0 -0
  75. pyreduce/masks/mask_jwst_niriss_gr700xd.fits.gz +0 -0
  76. pyreduce/masks/mask_lick_apf_.fits.gz +0 -0
  77. pyreduce/masks/mask_mcdonald.fits.gz +0 -0
  78. pyreduce/masks/mask_nes.fits.gz +0 -0
  79. pyreduce/masks/mask_nirspec_nirspec.fits.gz +0 -0
  80. pyreduce/masks/mask_sarg.fits.gz +0 -0
  81. pyreduce/masks/mask_sarg_2x2a.fits.gz +0 -0
  82. pyreduce/masks/mask_sarg_2x2b.fits.gz +0 -0
  83. pyreduce/masks/mask_subaru_hds_red.fits.gz +0 -0
  84. pyreduce/masks/mask_uves_blue.fits.gz +0 -0
  85. pyreduce/masks/mask_uves_blue_binned_2_2.fits.gz +0 -0
  86. pyreduce/masks/mask_uves_middle.fits.gz +0 -0
  87. pyreduce/masks/mask_uves_middle_2x2_split.fits.gz +0 -0
  88. pyreduce/masks/mask_uves_middle_binned_2_2.fits.gz +0 -0
  89. pyreduce/masks/mask_uves_red.fits.gz +0 -0
  90. pyreduce/masks/mask_uves_red_2x2.fits.gz +0 -0
  91. pyreduce/masks/mask_uves_red_2x2_split.fits.gz +0 -0
  92. pyreduce/masks/mask_uves_red_binned_2_2.fits.gz +0 -0
  93. pyreduce/masks/mask_xshooter_nir.fits.gz +0 -0
  94. pyreduce/rectify.py +138 -0
  95. pyreduce/reduce.py +2205 -0
  96. pyreduce/settings/settings_ANDES.json +89 -0
  97. pyreduce/settings/settings_CRIRES_PLUS.json +89 -0
  98. pyreduce/settings/settings_HARPN.json +73 -0
  99. pyreduce/settings/settings_HARPS.json +69 -0
  100. pyreduce/settings/settings_JWST_MIRI.json +55 -0
  101. pyreduce/settings/settings_JWST_NIRISS.json +55 -0
  102. pyreduce/settings/settings_LICK_APF.json +62 -0
  103. pyreduce/settings/settings_MCDONALD.json +58 -0
  104. pyreduce/settings/settings_METIS_IFU.json +77 -0
  105. pyreduce/settings/settings_METIS_LSS.json +77 -0
  106. pyreduce/settings/settings_MICADO.json +78 -0
  107. pyreduce/settings/settings_NEID.json +73 -0
  108. pyreduce/settings/settings_NIRSPEC.json +58 -0
  109. pyreduce/settings/settings_NTE.json +60 -0
  110. pyreduce/settings/settings_UVES.json +54 -0
  111. pyreduce/settings/settings_XSHOOTER.json +78 -0
  112. pyreduce/settings/settings_pyreduce.json +178 -0
  113. pyreduce/settings/settings_schema.json +827 -0
  114. pyreduce/tools/__init__.py +0 -0
  115. pyreduce/tools/combine.py +117 -0
  116. pyreduce/trace_orders.py +645 -0
  117. pyreduce/util.py +1288 -0
  118. pyreduce/wavecal/MICADO_HK_3arcsec_chip5.npz +0 -0
  119. pyreduce/wavecal/atlas/thar.fits +4946 -13
  120. pyreduce/wavecal/atlas/thar_list.txt +4172 -0
  121. pyreduce/wavecal/atlas/une.fits +0 -0
  122. pyreduce/wavecal/convert.py +38 -0
  123. pyreduce/wavecal/crires_plus_J1228_Open_det1.npz +0 -0
  124. pyreduce/wavecal/crires_plus_J1228_Open_det2.npz +0 -0
  125. pyreduce/wavecal/crires_plus_J1228_Open_det3.npz +0 -0
  126. pyreduce/wavecal/harpn_harpn_2D.npz +0 -0
  127. pyreduce/wavecal/harps_blue_2D.npz +0 -0
  128. pyreduce/wavecal/harps_blue_pol_2D.npz +0 -0
  129. pyreduce/wavecal/harps_red_2D.npz +0 -0
  130. pyreduce/wavecal/harps_red_pol_2D.npz +0 -0
  131. pyreduce/wavecal/mcdonald.npz +0 -0
  132. pyreduce/wavecal/metis_lss_l_2D.npz +0 -0
  133. pyreduce/wavecal/metis_lss_m_2D.npz +0 -0
  134. pyreduce/wavecal/nirspec_K2.npz +0 -0
  135. pyreduce/wavecal/uves_blue_360nm_2D.npz +0 -0
  136. pyreduce/wavecal/uves_blue_390nm_2D.npz +0 -0
  137. pyreduce/wavecal/uves_blue_437nm_2D.npz +0 -0
  138. pyreduce/wavecal/uves_middle_2x2_2D.npz +0 -0
  139. pyreduce/wavecal/uves_middle_565nm_2D.npz +0 -0
  140. pyreduce/wavecal/uves_middle_580nm_2D.npz +0 -0
  141. pyreduce/wavecal/uves_middle_600nm_2D.npz +0 -0
  142. pyreduce/wavecal/uves_middle_665nm_2D.npz +0 -0
  143. pyreduce/wavecal/uves_middle_860nm_2D.npz +0 -0
  144. pyreduce/wavecal/uves_red_580nm_2D.npz +0 -0
  145. pyreduce/wavecal/uves_red_600nm_2D.npz +0 -0
  146. pyreduce/wavecal/uves_red_665nm_2D.npz +0 -0
  147. pyreduce/wavecal/uves_red_760nm_2D.npz +0 -0
  148. pyreduce/wavecal/uves_red_860nm_2D.npz +0 -0
  149. pyreduce/wavecal/xshooter_nir.npz +0 -0
  150. pyreduce/wavelength_calibration.py +1873 -0
  151. pyreduce_astro-0.6.0.dist-info/METADATA +114 -0
  152. pyreduce_astro-0.6.0.dist-info/RECORD +154 -0
  153. pyreduce_astro-0.6.0.dist-info/WHEEL +6 -0
  154. pyreduce_astro-0.6.0.dist-info/licenses/LICENSE +674 -0
pyreduce/extract.py ADDED
@@ -0,0 +1,1361 @@
1
+ """Module for extracting data from observations
2
+
3
+ Authors
4
+ -------
5
+
6
+ Version
7
+ -------
8
+
9
+ License
10
+ -------
11
+ """
12
+
13
+ import logging
14
+
15
+ import matplotlib.pyplot as plt
16
+ import numpy as np
17
+ from scipy.interpolate import interp1d
18
+ from tqdm import tqdm
19
+
20
+ from .cwrappers import slitfunc_curved
21
+ from .util import make_index
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class ProgressPlot: # pragma: no cover
27
+ def __init__(self, nrow, ncol, nslitf, nbad=1000, title=None):
28
+ self.nrow = nrow
29
+ self.ncol = ncol
30
+ self.nslitf = nslitf
31
+
32
+ self.nbad = nbad
33
+
34
+ plt.ion()
35
+ self.fig = plt.figure(figsize=(12, 4))
36
+
37
+ # self.ax1 = self.fig.add_subplot(231, projection="3d")
38
+ self.ax1 = self.fig.add_subplot(231)
39
+ self.ax1.set_title("Swath")
40
+ self.ax1.set_ylabel("y [pixel]")
41
+ self.ax2 = self.fig.add_subplot(132)
42
+ self.ax2.set_title("Spectrum")
43
+ self.ax2.set_xlabel("x [pixel]")
44
+ self.ax2.set_ylabel("flux [arb. unit]")
45
+ self.ax2.set_xlim((0, ncol))
46
+ self.ax3 = self.fig.add_subplot(133)
47
+ self.ax3.set_title("Slit")
48
+ self.ax3.set_xlabel("y [pixel]")
49
+ self.ax3.set_ylabel("contribution [1]")
50
+ self.ax3.set_xlim((0, nrow))
51
+ # self.ax4 = self.fig.add_subplot(234, projection="3d")
52
+ self.ax4 = self.fig.add_subplot(234)
53
+ self.ax4.set_title("Model")
54
+ self.ax4.set_xlabel("x [pixel]")
55
+ self.ax4.set_ylabel("y [pixel]")
56
+
57
+ self.title = title
58
+ if title is not None:
59
+ self.fig.suptitle(title)
60
+
61
+ self.fig.tight_layout()
62
+
63
+ # Just plot empty pictures, to create the plots
64
+ # Update the data later
65
+ img = np.ones((nrow, ncol))
66
+ # y, x = np.indices((nrow, ncol))
67
+ # self.im_obs = self.ax1.plot_surface(x, y, img)
68
+ # self.im_model = self.ax4.plot_surface(x, y, img)
69
+ self.im_obs = self.ax1.imshow(img)
70
+ self.im_model = self.ax4.imshow(img)
71
+
72
+ (self.dots_spec,) = self.ax2.plot(
73
+ np.zeros(nrow * ncol), np.zeros(nrow * ncol), ".r", ms=2, alpha=0.6
74
+ )
75
+ (self.line_spec,) = self.ax2.plot(np.zeros(ncol), "-k")
76
+ (self.mask_spec,) = self.ax2.plot(np.zeros(self.nbad), "Pg")
77
+ (self.dots_slit,) = self.ax3.plot(
78
+ np.zeros(nrow * ncol), np.zeros(nrow * ncol), ".r", ms=2, alpha=0.6
79
+ )
80
+ (self.line_slit,) = self.ax3.plot(np.zeros(nrow), "-k", lw=2)
81
+ (self.mask_slit,) = self.ax3.plot(np.zeros(self.nbad), "Pg")
82
+
83
+ # self.ax1.set_zscale("log")
84
+ # self.ax4.set_zscale("log")
85
+
86
+ self.fig.canvas.draw()
87
+ self.fig.canvas.flush_events()
88
+
89
+ def fix_linear(self, data, limit, fill=0):
90
+ """Assures the size of the 1D array data is equal to limit"""
91
+
92
+ if len(data) > limit:
93
+ data = data[:limit]
94
+ elif len(data) < limit:
95
+ padding = np.full(limit - len(data), fill, dtype=data.dtype)
96
+ data = np.concatenate((data, padding))
97
+ return data
98
+
99
+ def plot(self, img, spec, slitf, model, ycen, mask, ord_num, left, right):
100
+ img = np.copy(img)
101
+ spec = np.copy(spec)
102
+ slitf = np.copy(slitf)
103
+ ycen = np.copy(ycen)
104
+
105
+ ny = img.shape[0]
106
+ nspec = img.shape[1]
107
+ x_spec, y_spec = self.get_spec(img, spec, slitf, ycen)
108
+ x_slit, y_slit = self.get_slitf(img, spec, slitf, ycen)
109
+ ycen = ycen + ny / 2
110
+
111
+ old = np.linspace(-1, ny, len(slitf))
112
+
113
+ # Fix Sizes
114
+ mask_spec_x = self.fix_linear(x_spec[mask.ravel()], self.nbad, fill=np.nan)
115
+ mask_spec = self.fix_linear(y_spec[mask.ravel()], self.nbad, fill=np.nan)
116
+ mask_slit_x = self.fix_linear(x_slit[mask.ravel()], self.nbad, fill=np.nan)
117
+ mask_slit = self.fix_linear(y_slit[mask.ravel()], self.nbad, fill=np.nan)
118
+
119
+ ycen = self.fix_linear(ycen, self.ncol)
120
+ x_spec = self.fix_linear(x_spec, self.ncol * self.nrow)
121
+ y_spec = self.fix_linear(y_spec, self.ncol * self.nrow)
122
+ spec = self.fix_linear(spec, self.ncol)
123
+ x_slit = self.fix_linear(x_slit, self.ncol * self.nrow)
124
+ y_slit = self.fix_linear(y_slit, self.ncol * self.nrow)
125
+ old = self.fix_linear(old, self.nslitf)
126
+ sf = self.fix_linear(slitf, self.nslitf)
127
+
128
+ # Update Data
129
+ model = np.clip(model, 0, np.max(model[5:-5, 5:-5]) * 1.1)
130
+ self.im_obs.remove()
131
+ img = np.clip(img, 0, np.max(model) * 1.1)
132
+ # y, x = np.indices(img.shape)
133
+ # self.im_obs = self.ax1.plot_surface(x, y, img)
134
+ self.im_obs = self.ax1.imshow(img, aspect="auto", origin="lower")
135
+ vmin, vmax = self.im_obs.norm.vmin, self.im_obs.norm.vmax
136
+ self.im_model.remove()
137
+ # y, x = np.indices(model.shape)
138
+ # self.im_model = self.ax4.plot_surface(x, y, model)
139
+ self.im_model = self.ax4.imshow(
140
+ model, aspect="auto", origin="lower", vmin=vmin, vmax=vmax
141
+ )
142
+
143
+ # self.line_ycen.set_ydata(ycen)
144
+ self.dots_spec.set_xdata(x_spec)
145
+ self.dots_spec.set_ydata(y_spec)
146
+ self.line_spec.set_ydata(spec)
147
+
148
+ self.mask_spec.set_xdata(mask_spec_x)
149
+ self.mask_spec.set_ydata(mask_spec)
150
+
151
+ self.dots_slit.set_xdata(x_slit)
152
+ self.dots_slit.set_ydata(y_slit)
153
+ self.line_slit.set_xdata(old)
154
+ self.line_slit.set_ydata(sf)
155
+
156
+ self.mask_slit.set_xdata(mask_slit_x)
157
+ self.mask_slit.set_ydata(mask_slit)
158
+
159
+ self.ax2.set_xlim((0, nspec - 1))
160
+ limit = np.nanmax(spec[5:-5]) * 1.1
161
+ if not np.isnan(limit):
162
+ self.ax2.set_ylim((0, limit))
163
+
164
+ self.ax3.set_xlim((0, ny - 1))
165
+ limit = np.nanmax(sf) * 1.1
166
+ if not np.isnan(limit):
167
+ self.ax3.set_ylim((0, limit))
168
+
169
+ title = f"Order {ord_num}, Columns {left} - {right}"
170
+ if self.title is not None:
171
+ title = f"{self.title}\n{title}"
172
+ self.fig.suptitle(title)
173
+ self.fig.canvas.draw()
174
+ self.fig.canvas.flush_events()
175
+
176
+ def close(self):
177
+ plt.ioff()
178
+ plt.close()
179
+
180
+ def get_spec(self, img, spec, slitf, ycen):
181
+ """get the spectrum corrected by the slit function"""
182
+ nrow, ncol = img.shape
183
+ x, y = np.indices(img.shape)
184
+ ycen = ycen - ycen.astype(int)
185
+
186
+ x = x - ycen + 0.5
187
+ old = np.linspace(-1, nrow - 1 + 1, len(slitf))
188
+ sf = np.interp(x, old, slitf)
189
+
190
+ x = img / sf
191
+
192
+ x = x.ravel()
193
+ y = y.ravel()
194
+ return y, x
195
+
196
+ def get_slitf(self, img, spec, slitf, ycen):
197
+ """get the slit function"""
198
+ x = np.indices(img.shape)[0]
199
+ ycen = ycen - ycen.astype(int)
200
+
201
+ if np.any(spec == 0):
202
+ i = np.arange(len(spec))
203
+ try:
204
+ spec = interp1d(
205
+ i[spec != 0], spec[spec != 0], fill_value="extrapolate"
206
+ )(i)
207
+ except ValueError:
208
+ spec[spec == 0] = np.median(spec)
209
+ y = img / spec[None, :]
210
+ y = y.ravel()
211
+
212
+ x = x - ycen + 0.5
213
+ x = x.ravel()
214
+ return x, y
215
+
216
+
217
+ class Swath:
218
+ def __init__(self, nswath):
219
+ self.nswath = nswath
220
+ self.spec = [None] * nswath
221
+ self.slitf = [None] * nswath
222
+ self.model = [None] * nswath
223
+ self.unc = [None] * nswath
224
+ self.mask = [None] * nswath
225
+ self.info = [None] * nswath
226
+
227
+ def __len__(self):
228
+ return self.nswath
229
+
230
+ def __getitem__(self, key):
231
+ return (
232
+ self.spec[key],
233
+ self.slitf[key],
234
+ self.model[key],
235
+ self.unc[key],
236
+ self.mask[key],
237
+ self.info[key],
238
+ )
239
+
240
+ def __setitem__(self, key, value):
241
+ self.spec[key] = value[0]
242
+ self.slitf[key] = value[1]
243
+ self.model[key] = value[2]
244
+ self.unc[key] = value[3]
245
+ self.mask[key] = value[4]
246
+ self.info[key] = value[5]
247
+
248
+
249
+ def fix_parameters(xwd, cr, orders, nrow, ncol, nord, ignore_column_range=False):
250
+ """Fix extraction width and column range, so that all pixels used are within the image.
251
+ I.e. the column range is cut so that the everything is within the image
252
+
253
+ Parameters
254
+ ----------
255
+ xwd : float, array
256
+ Extraction width, either one value for all orders, or the whole array
257
+ cr : 2-tuple(int), array
258
+ Column range, either one value for all orders, or the whole array
259
+ orders : array
260
+ polynomial coefficients that describe each order
261
+ nrow : int
262
+ Number of rows in the image
263
+ ncol : int
264
+ Number of columns in the image
265
+ nord : int
266
+ Number of orders in the image
267
+ ignore_column_range : bool, optional
268
+ if true does not change the column range, however this may lead to problems with the extraction, by default False
269
+
270
+ Returns
271
+ -------
272
+ xwd : array
273
+ fixed extraction width
274
+ cr : array
275
+ fixed column range
276
+ orders : array
277
+ the same orders as before
278
+ """
279
+
280
+ if xwd is None:
281
+ xwd = 0.5
282
+ if np.isscalar(xwd):
283
+ xwd = np.tile([xwd, xwd], (nord, 1))
284
+ else:
285
+ xwd = np.asarray(xwd)
286
+ if xwd.ndim == 1:
287
+ xwd = np.tile(xwd, (nord, 1))
288
+
289
+ if cr is None:
290
+ cr = np.tile([0, ncol], (nord, 1))
291
+ else:
292
+ cr = np.asarray(cr)
293
+ if cr.ndim == 1:
294
+ cr = np.tile(cr, (nord, 1))
295
+
296
+ orders = np.asarray(orders)
297
+
298
+ xwd = np.array([xwd[0], *xwd, xwd[-1]])
299
+ cr = np.array([cr[0], *cr, cr[-1]])
300
+ orders = extend_orders(orders, nrow)
301
+
302
+ xwd = fix_extraction_width(xwd, orders, cr, ncol)
303
+ if not ignore_column_range:
304
+ cr, orders = fix_column_range(cr, orders, xwd, nrow, ncol)
305
+
306
+ orders = orders[1:-1]
307
+ xwd = xwd[1:-1]
308
+ cr = cr[1:-1]
309
+
310
+ return xwd, cr, orders
311
+
312
+
313
+ def extend_orders(orders, nrow):
314
+ """Extrapolate extra orders above and below the existing ones
315
+
316
+ Parameters
317
+ ----------
318
+ orders : array[nord, degree]
319
+ order tracing coefficients
320
+ nrow : int
321
+ number of rows in the image
322
+
323
+ Returns
324
+ -------
325
+ orders : array[nord + 2, degree]
326
+ extended orders
327
+ """
328
+
329
+ nord, ncoef = orders.shape
330
+
331
+ if nord > 1:
332
+ order_low = 2 * orders[0] - orders[1]
333
+ order_high = 2 * orders[-1] - orders[-2]
334
+ else:
335
+ order_low = [0 for _ in range(ncoef)]
336
+ order_high = [0 for _ in range(ncoef - 1)] + [nrow]
337
+
338
+ return np.array([order_low, *orders, order_high])
339
+
340
+
341
+ def fix_extraction_width(xwd, orders, cr, ncol):
342
+ """Convert fractional extraction width to pixel range
343
+
344
+ Parameters
345
+ ----------
346
+ extraction_width : array[nord, 2]
347
+ current extraction width, in pixels or fractions (for values below 1.5)
348
+ orders : array[nord, degree]
349
+ order tracing coefficients
350
+ column_range : array[nord, 2]
351
+ column range to use
352
+ ncol : int
353
+ number of columns in image
354
+
355
+ Returns
356
+ -------
357
+ extraction_width : array[nord, 2]
358
+ updated extraction width in pixels
359
+ """
360
+
361
+ if not np.all(xwd > 1.5):
362
+ # if extraction width is in relative scale transform to pixel scale
363
+ x = np.arange(ncol)
364
+ for i in range(1, len(xwd) - 1):
365
+ for j in [0, 1]:
366
+ if xwd[i, j] < 1.5:
367
+ k = i - 1 if j == 0 else i + 1
368
+ left = max(cr[[i, k], 0])
369
+ right = min(cr[[i, k], 1])
370
+
371
+ if right < left:
372
+ raise ValueError(
373
+ f"Check your column ranges. Orders {i} and {k} are weird"
374
+ )
375
+
376
+ current = np.polyval(orders[i], x[left:right])
377
+ below = np.polyval(orders[k], x[left:right])
378
+ xwd[i, j] *= np.min(np.abs(current - below))
379
+
380
+ xwd[0] = xwd[1]
381
+ xwd[-1] = xwd[-2]
382
+
383
+ xwd = np.ceil(xwd).astype(int)
384
+
385
+ return xwd
386
+
387
+
388
+ def fix_column_range(column_range, orders, extraction_width, nrow, ncol):
389
+ """Fix the column range, so that no pixels outside the image will be accessed (Thus avoiding errors)
390
+
391
+ Parameters
392
+ ----------
393
+ img : array[nrow, ncol]
394
+ image
395
+ orders : array[nord, degree]
396
+ order tracing coefficients
397
+ extraction_width : array[nord, 2]
398
+ extraction width in pixels, (below, above)
399
+ column_range : array[nord, 2]
400
+ current column range
401
+ no_clip : bool, optional
402
+ if False, new column range will be smaller or equal to current column range, otherwise it can also be larger (default: False)
403
+
404
+ Returns
405
+ -------
406
+ column_range : array[nord, 2]
407
+ updated column range
408
+ orders : array[nord, degree]
409
+ order tracing coefficients (may have rows removed if no valid pixels)
410
+ """
411
+
412
+ ix = np.arange(ncol)
413
+ to_remove = []
414
+ # Loop over non extension orders
415
+ for i, order in zip(range(1, len(orders) - 1), orders[1:-1], strict=False):
416
+ # Shift order trace up/down by extraction_width
417
+ coeff_bot, coeff_top = np.copy(order), np.copy(order)
418
+ coeff_bot[-1] -= extraction_width[i, 0]
419
+ coeff_top[-1] += extraction_width[i, 1]
420
+
421
+ y_bot = np.polyval(coeff_bot, ix) # low edge of arc
422
+ y_top = np.polyval(coeff_top, ix) # high edge of arc
423
+
424
+ # find regions of pixels inside the image
425
+ # then use the region that most closely resembles the existing column range (from order tracing)
426
+ # but clip it to the existing column range (order tracing polynomials are not well defined outside the original range)
427
+ points_in_image = np.where((y_bot >= 0) & (y_top < nrow))[0]
428
+
429
+ if len(points_in_image) == 0:
430
+ # print(y_bot, y_top,nrow, ncol, points_in_image)
431
+ logger.warning(
432
+ f"No pixels are completely within the extraction width for order {i}, removing it."
433
+ )
434
+ to_remove += [i]
435
+ continue
436
+
437
+ regions = np.where(np.diff(points_in_image) != 1)[0]
438
+ regions = [(r, r + 1) for r in regions]
439
+ regions = [
440
+ points_in_image[0],
441
+ *points_in_image[(regions,)].ravel(),
442
+ points_in_image[-1],
443
+ ]
444
+ regions = [[regions[i], regions[i + 1] + 1] for i in range(0, len(regions), 2)]
445
+ overlap = [
446
+ min(reg[1], column_range[i, 1]) - max(reg[0], column_range[i, 0])
447
+ for reg in regions
448
+ ]
449
+ iregion = np.argmax(overlap)
450
+ column_range[i] = np.clip(
451
+ regions[iregion], column_range[i, 0], column_range[i, 1]
452
+ )
453
+
454
+ column_range[0] = column_range[1]
455
+ column_range[-1] = column_range[-2]
456
+
457
+ if to_remove:
458
+ column_range = np.delete(column_range, to_remove, axis=0)
459
+ orders = np.delete(orders, to_remove, axis=0)
460
+
461
+ return column_range, orders
462
+
463
+
464
+ def make_bins(swath_width, xlow, xhigh, ycen):
465
+ """Create bins for the swathes
466
+ Bins are roughly equally sized, have roughly length swath width (if given)
467
+ and overlap roughly half-half with each other
468
+
469
+ Parameters
470
+ ----------
471
+ swath_width : {int, None}
472
+ initial value for the swath_width, bins will have roughly that size, but exact value may change
473
+ if swath_width is None, determine a good value, from the data
474
+ xlow : int
475
+ lower bound for x values
476
+ xhigh : int
477
+ upper bound for x values
478
+ ycen : array[ncol]
479
+ center of the order trace
480
+
481
+ Returns
482
+ -------
483
+ nbin : int
484
+ number of bins
485
+ bins_start : array[nbin]
486
+ left(beginning) side of the bins
487
+ bins_end : array[nbin]
488
+ right(ending) side of the bins
489
+ """
490
+
491
+ if swath_width is None:
492
+ ncol = len(ycen)
493
+ i = np.unique(ycen.astype(int)) # Points of row crossing
494
+ # ni = len(i) # This is how many times this order crosses to the next row
495
+ if len(i) > 1: # Curved order crosses rows
496
+ i = np.sum(i[1:] - i[:-1]) / (len(i) - 1)
497
+ nbin = np.clip(
498
+ int(np.round(ncol / i)) // 3, 3, 20
499
+ ) # number of swaths along the order
500
+ else: # Perfectly aligned orders
501
+ nbin = np.clip(ncol // 400, 3, None) # Still follow the changes in PSF
502
+ nbin = nbin * (xhigh - xlow) // ncol # Adjust for the true order length
503
+ else:
504
+ nbin = np.clip(int(np.round((xhigh - xlow) / swath_width)), 1, None)
505
+
506
+ bins = np.linspace(xlow, xhigh, 2 * nbin + 1) # boundaries of bins
507
+ bins_start = np.ceil(bins[:-2]).astype(int) # beginning of each bin
508
+ bins_end = np.floor(bins[2:]).astype(int) # end of each bin
509
+
510
+ return nbin, bins_start, bins_end
511
+
512
+
513
+ def calc_telluric_correction(telluric, img): # pragma: no cover
514
+ """Calculate telluric correction
515
+
516
+ If set to specific integer larger than 1 is used as the
517
+ offset from the order center line. The sky is then estimated by computing
518
+ median signal between this offset and the upper/lower limit of the
519
+ extraction window.
520
+
521
+ Parameters
522
+ ----------
523
+ telluric : int
524
+ telluric correction parameter
525
+ img : array
526
+ image of the swath
527
+
528
+ Returns
529
+ -------
530
+ tell : array
531
+ telluric correction
532
+ """
533
+ width, height = img.shape
534
+
535
+ tel_lim = telluric if telluric > 5 and telluric < height / 2 else min(5, height / 3)
536
+ tel = np.sum(img, axis=0)
537
+ itel = np.arange(height)
538
+ itel = itel[np.abs(itel - height / 2) >= tel_lim]
539
+ tel = img[itel, :]
540
+ sc = np.zeros(width)
541
+
542
+ for itel in range(width):
543
+ sc[itel] = np.ma.median(tel[itel])
544
+
545
+ return sc
546
+
547
+
548
+ def calc_scatter_correction(scatter, index):
549
+ """Calculate scatter correction
550
+ by interpolating between values?
551
+
552
+ Parameters
553
+ ----------
554
+ scatter : array of shape (degree_x, degree_y)
555
+ 2D polynomial coefficients of the background scatter
556
+ index : tuple (array, array)
557
+ indices of the swath within the overall image
558
+
559
+ Returns
560
+ -------
561
+ scatter_correction : array of shape (swath_width, swath_height)
562
+ correction for scattered light
563
+ """
564
+
565
+ # The indices in the image are switched
566
+ y, x = index
567
+ scatter_correction = np.polynomial.polynomial.polyval2d(x, y, scatter)
568
+ return scatter_correction
569
+
570
+
571
+ def extract_spectrum(
572
+ img,
573
+ ycen,
574
+ yrange,
575
+ xrange,
576
+ gain=1,
577
+ readnoise=0,
578
+ lambda_sf=0.1,
579
+ lambda_sp=0,
580
+ osample=1,
581
+ swath_width=None,
582
+ maxiter=20,
583
+ telluric=None,
584
+ scatter=None,
585
+ normalize=False,
586
+ threshold=0,
587
+ tilt=None,
588
+ shear=None,
589
+ plot=False,
590
+ plot_title=None,
591
+ im_norm=None,
592
+ im_ordr=None,
593
+ out_spec=None,
594
+ out_sunc=None,
595
+ out_slitf=None,
596
+ out_mask=None,
597
+ progress=None,
598
+ ord_num=0,
599
+ **kwargs,
600
+ ):
601
+ """
602
+ Extract the spectrum of a single order from an image
603
+ The order is split into several swathes of roughly swath_width length, which overlap half-half
604
+ For each swath a spectrum and slitfunction are extracted
605
+ overlapping sections are combined using linear weights (centrum is strongest, falling off to the edges)
606
+ Here is the layout for the bins:
607
+
608
+ ::
609
+
610
+ 1st swath 3rd swath 5th swath ...
611
+ /============|============|============|============|============|
612
+
613
+ 2nd swath 4th swath 6th swath
614
+ |------------|------------|------------|------------|
615
+ |.....|
616
+ overlap
617
+
618
+ + ******* 1
619
+ + *
620
+ + *
621
+ * weights (+) previous swath, (*) current swath
622
+ * +
623
+ * +
624
+ * +++++++ 0
625
+
626
+ Parameters
627
+ ----------
628
+ img : array[nrow, ncol]
629
+ observation (or similar)
630
+ ycen : array[ncol]
631
+ order trace of the current order
632
+ yrange : tuple(int, int)
633
+ extraction width in pixles, below and above
634
+ xrange : tuple(int, int)
635
+ columns range to extract (low, high)
636
+ gain : float, optional
637
+ adu to electron, amplifier gain (default: 1)
638
+ readnoise : float, optional
639
+ read out noise factor (default: 0)
640
+ lambda_sf : float, optional
641
+ slit function smoothing parameter, usually very small (default: 0.1)
642
+ lambda_sp : int, optional
643
+ spectrum smoothing parameter, usually very small (default: 0)
644
+ osample : int, optional
645
+ oversampling factor, i.e. how many subpixels to create per pixel (default: 1, i.e. no oversampling)
646
+ swath_width : int, optional
647
+ swath width suggestion, actual width depends also on ncol, see make_bins (default: None, which will determine the width based on the order tracing)
648
+ telluric : {float, None}, optional
649
+ telluric correction factor (default: None, i.e. no telluric correction)
650
+ scatter : {array, None}, optional
651
+ background scatter as 2d polynomial coefficients (default: None, no correction)
652
+ normalize : bool, optional
653
+ whether to create a normalized image. If true, im_norm and im_ordr are used as output (default: False)
654
+ threshold : int, optional
655
+ threshold for normalization (default: 0)
656
+ tilt : array[ncol], optional
657
+ The tilt (1st order curvature) of the slit in this order for the curved extraction (default: None, i.e. tilt = 0)
658
+ shear : array[ncol], optional
659
+ The shear (2nd order curvature) of the slit in this order for the curved extraction (default: None, i.e. shear = 0)
660
+ plot : bool, optional
661
+ wether to plot the progress, plotting will slow down the procedure significantly (default: False)
662
+ ord_num : int, optional
663
+ current order number, just for plotting (default: 0)
664
+ im_norm : array[nrow, ncol], optional
665
+ normalized image, only output if normalize is True (default: None)
666
+ im_ordr : array[nrow, ncol], optional
667
+ image of the order blaze, only output if normalize is True (default: None)
668
+
669
+ Returns
670
+ -------
671
+ spec : array[ncol]
672
+ extracted spectrum
673
+ slitf : array[nslitf]
674
+ extracted slitfunction
675
+ mask : array[ncol]
676
+ mask of the column range to use in the spectrum
677
+ unc : array[ncol]
678
+ uncertainty on the spectrum
679
+ """
680
+
681
+ _, ncol = img.shape
682
+ ylow, yhigh = yrange
683
+ xlow, xhigh = xrange
684
+ nslitf = osample * (ylow + yhigh + 2) + 1
685
+ yhigh + ylow + 1
686
+
687
+ ycen_int = np.floor(ycen).astype(int)
688
+
689
+ spec = np.zeros(ncol) if out_spec is None else out_spec
690
+ sunc = np.zeros(ncol) if out_sunc is None else out_sunc
691
+ mask = np.full(ncol, False) if out_mask is None else out_mask
692
+ slitf = np.zeros(nslitf) if out_slitf is None else out_slitf
693
+
694
+ nbin, bins_start, bins_end = make_bins(swath_width, xlow, xhigh, ycen)
695
+ nswath = 2 * nbin - 1
696
+ swath = Swath(nswath)
697
+ margin = np.zeros((nswath, 2), int)
698
+
699
+ if normalize:
700
+ norm_img = [None] * nswath
701
+ norm_model = [None] * nswath
702
+
703
+ # Perform slit decomposition within each swath stepping through the order with
704
+ # half swath width. Spectra for each decomposition are combined with linear weights.
705
+ with tqdm(
706
+ enumerate(zip(bins_start, bins_end, strict=False)),
707
+ total=len(bins_start),
708
+ leave=False,
709
+ desc="Swath",
710
+ ) as t:
711
+ for ihalf, (ibeg, iend) in t:
712
+ logger.debug("Extracting Swath %i, Columns: %i - %i", ihalf, ibeg, iend)
713
+
714
+ # Cut out swath from image
715
+ index = make_index(ycen_int - ylow, ycen_int + yhigh, ibeg, iend)
716
+ swath_img = img[index]
717
+ swath_ycen = ycen[ibeg:iend]
718
+
719
+ # Corrections
720
+ # TODO: what is it even supposed to do?
721
+ if telluric is not None: # pragma: no cover
722
+ telluric_correction = calc_telluric_correction(telluric, swath_img)
723
+ else:
724
+ telluric_correction = 0
725
+
726
+ if scatter is not None:
727
+ scatter_correction = calc_scatter_correction(scatter, index)
728
+ else:
729
+ scatter_correction = 0
730
+
731
+ swath_img -= scatter_correction + telluric_correction
732
+
733
+ # Do Slitfunction extraction
734
+ swath_tilt = tilt[ibeg:iend] if tilt is not None else 0
735
+ swath_shear = shear[ibeg:iend] if shear is not None else 0
736
+ swath[ihalf] = slitfunc_curved(
737
+ swath_img,
738
+ swath_ycen,
739
+ swath_tilt,
740
+ swath_shear,
741
+ lambda_sp=lambda_sp,
742
+ lambda_sf=lambda_sf,
743
+ osample=osample,
744
+ yrange=yrange,
745
+ maxiter=maxiter,
746
+ gain=gain,
747
+ )
748
+ t.set_postfix(chi=f"{swath[ihalf][5][1]:1.2f}")
749
+
750
+ if normalize:
751
+ # Save image and model for later
752
+ # Use np.divide to avoid divisions by zero
753
+ where = swath.model[ihalf] > threshold / gain
754
+ norm_img[ihalf] = np.ones_like(swath.model[ihalf])
755
+ np.divide(
756
+ np.abs(swath_img),
757
+ swath.model[ihalf],
758
+ where=where,
759
+ out=norm_img[ihalf],
760
+ )
761
+ norm_model[ihalf] = swath.model[ihalf]
762
+
763
+ if plot >= 2 and not np.all(np.isnan(swath_img)): # pragma: no cover
764
+ if progress is None:
765
+ progress = ProgressPlot(
766
+ swath_img.shape[0], swath_img.shape[1], nslitf, title=plot_title
767
+ )
768
+ progress.plot(
769
+ swath_img,
770
+ swath.spec[ihalf],
771
+ swath.slitf[ihalf],
772
+ swath.model[ihalf],
773
+ swath_ycen,
774
+ swath.mask[ihalf],
775
+ ord_num,
776
+ ibeg,
777
+ iend,
778
+ )
779
+
780
+ # Remove points at the border of the each swath, if order has tilt
781
+ # as those pixels have bad information
782
+ for i in range(nswath):
783
+ margin[i, :] = int(swath.info[i][4]) + 1
784
+
785
+ # Weight for combining swaths
786
+ weight = [np.ones(bins_end[i] - bins_start[i]) for i in range(nswath)]
787
+ weight[0][: margin[0, 0]] = 0
788
+ weight[-1][len(weight[-1]) - margin[-1, 1] :] = 0
789
+ for i, j in zip(range(0, nswath - 1), range(1, nswath), strict=False):
790
+ width = bins_end[i] - bins_start[i]
791
+ overlap = bins_end[i] - bins_start[j]
792
+
793
+ # Start and end indices for the two swaths
794
+ start_i = width - overlap + margin[j, 0]
795
+ end_i = width - margin[i, 1]
796
+
797
+ start_j = margin[j, 0]
798
+ end_j = overlap - margin[i, 1]
799
+
800
+ # Weights for one overlap from 0 to 1, but do not include those values (whats the point?)
801
+ triangle = np.linspace(0, 1, overlap + 1, endpoint=False)[1:]
802
+ # Cut away the margins at the corners
803
+ triangle = triangle[margin[j, 0] : len(triangle) - margin[i, 1]]
804
+
805
+ # Set values
806
+ weight[i][start_i:end_i] = 1 - triangle
807
+ weight[j][start_j:end_j] = triangle
808
+
809
+ # Don't use the pixels at the egdes (due to curvature)
810
+ weight[i][end_i:] = 0
811
+ weight[j][:start_j] = 0
812
+
813
+ # Update column range
814
+ xrange[0] += margin[0, 0]
815
+ xrange[1] -= margin[-1, 1]
816
+ mask[: xrange[0]] = True
817
+ mask[xrange[1] :] = True
818
+
819
+ # Apply weights
820
+ for i, (ibeg, iend) in enumerate(zip(bins_start, bins_end, strict=False)):
821
+ spec[ibeg:iend] += swath.spec[i] * weight[i]
822
+ sunc[ibeg:iend] += swath.unc[i] * weight[i]
823
+
824
+ if normalize:
825
+ for i, (ibeg, iend) in enumerate(zip(bins_start, bins_end, strict=False)):
826
+ index = make_index(ycen_int - ylow, ycen_int + yhigh, ibeg, iend)
827
+ im_norm[index] += norm_img[i] * weight[i]
828
+ im_ordr[index] += norm_model[i] * weight[i]
829
+
830
+ slitf[:] = np.mean(swath.slitf, axis=0)
831
+ sunc[:] = np.sqrt(sunc**2 + (readnoise / gain) ** 2)
832
+ return spec, slitf, mask, sunc
833
+
834
+
835
+ def model(spec, slitf):
836
+ return spec[None, :] * slitf[:, None]
837
+
838
+
839
+ def get_y_scale(ycen, xrange, extraction_width, nrow):
840
+ """Calculate the y limits of the order
841
+ This is especially important at the edges
842
+
843
+ Parameters
844
+ ----------
845
+ ycen : array[ncol]
846
+ order trace
847
+ xrange : tuple(int, int)
848
+ column range
849
+ extraction_width : tuple(int, int)
850
+ extraction width in pixels below and above the order
851
+ nrow : int
852
+ number of rows in the image, defines upper edge
853
+
854
+ Returns
855
+ -------
856
+ y_low, y_high : int, int
857
+ lower and upper y bound for extraction
858
+ """
859
+ ycen = ycen[xrange[0] : xrange[1]]
860
+
861
+ ymin = ycen - extraction_width[0]
862
+ ymin = np.floor(ymin)
863
+ if min(ymin) < 0:
864
+ ymin = ymin - min(ymin) # help for orders at edge
865
+ if max(ymin) >= nrow:
866
+ ymin = ymin - max(ymin) + nrow - 1 # helps at edge
867
+
868
+ ymax = ycen + extraction_width[1]
869
+ ymax = np.ceil(ymax)
870
+ if max(ymax) >= nrow:
871
+ ymax = ymax - max(ymax) + nrow - 1 # helps at edge
872
+
873
+ # Define a fixed height area containing one spectral order
874
+ y_lower_lim = int(np.min(ycen - ymin)) # Pixels below center line
875
+ y_upper_lim = int(np.min(ymax - ycen)) # Pixels above center line
876
+
877
+ return y_lower_lim, y_upper_lim
878
+
879
+
880
+ def optimal_extraction(
881
+ img,
882
+ orders,
883
+ extraction_width,
884
+ column_range,
885
+ tilt,
886
+ shear,
887
+ plot=False,
888
+ plot_title=None,
889
+ **kwargs,
890
+ ):
891
+ """Use optimal extraction to get spectra
892
+
893
+ This functions just loops over the orders, the actual work is done in extract_spectrum
894
+
895
+ Parameters
896
+ ----------
897
+ img : array[nrow, ncol]
898
+ image to extract
899
+ orders : array[nord, degree]
900
+ order tracing coefficients
901
+ extraction_width : array[nord, 2]
902
+ extraction width in pixels
903
+ column_range : array[nord, 2]
904
+ column range to use
905
+ scatter : array[nord, 4, ncol]
906
+ background scatter (or None)
907
+ **kwargs
908
+ other parameters for the extraction (see extract_spectrum)
909
+
910
+ Returns
911
+ -------
912
+ spectrum : array[nord, ncol]
913
+ extracted spectrum
914
+ slitfunction : array[nord, nslitf]
915
+ recovered slitfunction
916
+ uncertainties: array[nord, ncol]
917
+ uncertainties on the spectrum
918
+ """
919
+
920
+ logger.info("Using optimal extraction to produce spectrum")
921
+
922
+ nrow, ncol = img.shape
923
+ nord = len(orders)
924
+
925
+ spectrum = np.zeros((nord, ncol))
926
+ uncertainties = np.zeros((nord, ncol))
927
+ slitfunction = [None for _ in range(nord)]
928
+
929
+ if tilt is None:
930
+ tilt = [None for _ in range(nord)]
931
+ if shear is None:
932
+ shear = [None for _ in range(nord)]
933
+
934
+ # Add mask as defined by column ranges
935
+ mask = np.full((nord, ncol), True)
936
+ for i in range(nord):
937
+ mask[i, column_range[i, 0] : column_range[i, 1]] = False
938
+ spectrum = np.ma.array(spectrum, mask=mask)
939
+ uncertainties = np.ma.array(uncertainties, mask=mask)
940
+
941
+ ix = np.arange(ncol)
942
+ if plot >= 2: # pragma: no cover
943
+ ncol_swath = kwargs.get("swath_width", img.shape[1] // 400)
944
+ nrow_swath = np.sum(extraction_width, axis=1).max()
945
+ nslitf_swath = (nrow_swath + 2) * kwargs.get("osample", 1) + 1
946
+ progress = ProgressPlot(nrow_swath, ncol_swath, nslitf_swath, title=plot_title)
947
+ else:
948
+ progress = None
949
+
950
+ for i in tqdm(range(nord), desc="Order"):
951
+ logger.debug("Extracting relative order %i out of %i", i + 1, nord)
952
+
953
+ # Define a fixed height area containing one spectral order
954
+ ycen = np.polyval(orders[i], ix)
955
+ yrange = get_y_scale(ycen, column_range[i], extraction_width[i], nrow)
956
+
957
+ osample = kwargs.get("osample", 1)
958
+ slitfunction[i] = np.zeros(osample * (sum(yrange) + 2) + 1)
959
+
960
+ # Return values are set by reference, as the out parameters
961
+ # Also column_range is adjusted depending on the shear
962
+ # This is to avoid large chunks of memory of essentially duplicates
963
+ extract_spectrum(
964
+ img,
965
+ ycen,
966
+ yrange,
967
+ column_range[i],
968
+ tilt=tilt[i],
969
+ shear=shear[i],
970
+ out_spec=spectrum[i],
971
+ out_sunc=uncertainties[i],
972
+ out_slitf=slitfunction[i],
973
+ out_mask=mask[i],
974
+ progress=progress,
975
+ ord_num=i + 1,
976
+ plot=plot,
977
+ plot_title=plot_title,
978
+ **kwargs,
979
+ )
980
+
981
+ if plot >= 2: # pragma: no cover
982
+ progress.close()
983
+
984
+ if plot: # pragma: no cover
985
+ plot_comparison(
986
+ img,
987
+ orders,
988
+ spectrum,
989
+ slitfunction,
990
+ extraction_width,
991
+ column_range,
992
+ title=plot_title,
993
+ )
994
+
995
+ return spectrum, slitfunction, uncertainties
996
+
997
+
998
+ def correct_for_curvature(img_order, tilt, shear, xwd):
999
+ # img_order = np.ma.filled(img_order, np.nan)
1000
+ mask = ~np.ma.getmaskarray(img_order)
1001
+
1002
+ xt = np.arange(img_order.shape[1])
1003
+ for y, yt in zip(range(xwd[0] + xwd[1]), range(-xwd[0], xwd[1]), strict=False):
1004
+ xi = xt + yt * tilt + yt**2 * shear
1005
+ img_order[y] = np.interp(
1006
+ xi, xt[mask[y]], img_order[y][mask[y]], left=0, right=0
1007
+ )
1008
+
1009
+ xt = np.arange(img_order.shape[0])
1010
+ for x in range(img_order.shape[1]):
1011
+ img_order[:, x] = np.interp(
1012
+ xt, xt[mask[:, x]], img_order[:, x][mask[:, x]], left=0, right=0
1013
+ )
1014
+
1015
+ return img_order
1016
+
1017
+
1018
+ def model_image(img, xwd, tilt, shear):
1019
+ # Correct image for curvature
1020
+ img.shape[0]
1021
+ img = correct_for_curvature(img, tilt, shear, xwd)
1022
+ # Find slitfunction using the median to avoid outliers
1023
+ slitf = np.ma.median(img, axis=1)
1024
+ slitf /= np.ma.sum(slitf)
1025
+ # Use the slitfunction to find spectrum
1026
+ spec = np.ma.median(img / slitf[:, None], axis=0)
1027
+ # Create model from slitfunction and spectrum
1028
+ model = spec[None, :] * slitf[:, None]
1029
+ # Reapply curvature to the model
1030
+ model = correct_for_curvature(model, -tilt, -shear, xwd)
1031
+ return model, spec, slitf
1032
+
1033
+
1034
+ def get_mask(img, model):
1035
+ # 99.73 = 3 sigma, 2 * 3 = 6 sigma
1036
+ residual = np.ma.abs(img - model)
1037
+ median, vmax = np.percentile(np.ma.compressed(residual), (50, 99.73))
1038
+ vmax = median + 2 * (vmax - median)
1039
+ return residual > vmax
1040
+
1041
+
1042
+ def arc_extraction(
1043
+ img,
1044
+ orders,
1045
+ extraction_width,
1046
+ column_range,
1047
+ gain=1,
1048
+ readnoise=0,
1049
+ dark=0,
1050
+ plot=False,
1051
+ plot_title=None,
1052
+ tilt=None,
1053
+ shear=None,
1054
+ collapse_function="median",
1055
+ **kwargs,
1056
+ ):
1057
+ """Use "simple" arc extraction to get a spectrum
1058
+ Arc extraction simply takes the sum orthogonal to the order for extraction width pixels
1059
+
1060
+ This extraction makes a few rough assumptions and does not provide the most accurate results,
1061
+ but rather a good approximation
1062
+
1063
+ Parameters
1064
+ ----------
1065
+ img : array[nrow, ncol]
1066
+ image to extract
1067
+ orders : array[nord, order]
1068
+ order tracing coefficients
1069
+ extraction_width : array[nord, 2]
1070
+ extraction width in pixels
1071
+ column_range : array[nord, 2]
1072
+ column range to use
1073
+ gain : float, optional
1074
+ adu to electron, amplifier gain (default: 1)
1075
+ readnoise : float, optional
1076
+ read out noise (default: 0)
1077
+ dark : float, optional
1078
+ dark current noise (default: 0)
1079
+ plot : bool, optional
1080
+ wether to plot the results (default: False)
1081
+
1082
+ Returns
1083
+ -------
1084
+ spectrum : array[nord, ncol]
1085
+ extracted spectrum
1086
+ uncertainties : array[nord, ncol]
1087
+ uncertainties on extracted spectrum
1088
+ """
1089
+
1090
+ logger.info("Using arc extraction to produce spectrum")
1091
+ _, ncol = img.shape
1092
+ nord, _ = orders.shape
1093
+
1094
+ spectrum = np.zeros((nord, ncol))
1095
+ uncertainties = np.zeros((nord, ncol))
1096
+
1097
+ # Add mask as defined by column ranges
1098
+ mask = np.full((nord, ncol), True)
1099
+ for i in range(nord):
1100
+ mask[i, column_range[i, 0] : column_range[i, 1]] = False
1101
+ spectrum = np.ma.array(spectrum, mask=mask)
1102
+ uncertainties = np.ma.array(uncertainties, mask=mask)
1103
+
1104
+ x = np.arange(ncol)
1105
+
1106
+ for i in tqdm(range(nord), desc="Order"):
1107
+ logger.debug("Calculating order %i out of %i", i + 1, nord)
1108
+
1109
+ x_left_lim = column_range[i, 0]
1110
+ x_right_lim = column_range[i, 1]
1111
+
1112
+ # Rectify the image, i.e. remove the shape of the order
1113
+ # Then the center of the order is within one pixel variations
1114
+ ycen = np.polyval(orders[i], x).astype(int)
1115
+ yb, yt = ycen - extraction_width[i, 0], ycen + extraction_width[i, 1]
1116
+ extraction_width[i, 0] + extraction_width[i, 1] + 1
1117
+ index = make_index(yb, yt, x_left_lim, x_right_lim)
1118
+ img_order = img[index]
1119
+
1120
+ # Correct for tilt and shear
1121
+ # For each row of the rectified order, interpolate onto the shifted row
1122
+ # Masked pixels are set to 0, similar to the summation
1123
+ if tilt is not None and shear is not None:
1124
+ img_order = correct_for_curvature(
1125
+ img_order,
1126
+ tilt[i, x_left_lim:x_right_lim],
1127
+ shear[i, x_left_lim:x_right_lim],
1128
+ extraction_width[i],
1129
+ )
1130
+
1131
+ # Sum over the prepared image
1132
+ if collapse_function == "sum":
1133
+ arc = np.ma.sum(img_order, axis=0)
1134
+ elif collapse_function == "mean":
1135
+ arc = np.ma.mean(img_order, axis=0) * img_order.shape[0]
1136
+ elif collapse_function == "median":
1137
+ arc = np.ma.median(img_order, axis=0) * img_order.shape[0]
1138
+ else:
1139
+ raise ValueError(
1140
+ f"Could not determine the arc method, expected one of ('sum', 'mean', 'median'), but got {collapse_function}"
1141
+ )
1142
+
1143
+ # Store results
1144
+ spectrum[i, x_left_lim:x_right_lim] = arc
1145
+ uncertainties[i, x_left_lim:x_right_lim] = (
1146
+ np.sqrt(np.abs(arc * gain + dark + readnoise**2)) / gain
1147
+ )
1148
+
1149
+ if plot: # pragma: no cover
1150
+ plot_comparison(
1151
+ img,
1152
+ orders,
1153
+ spectrum,
1154
+ None,
1155
+ extraction_width,
1156
+ column_range,
1157
+ title=plot_title,
1158
+ )
1159
+
1160
+ return spectrum, uncertainties
1161
+
1162
+
1163
+ def plot_comparison(
1164
+ original, orders, spectrum, slitf, extraction_width, column_range, title=None
1165
+ ): # pragma: no cover
1166
+ nrow, ncol = original.shape
1167
+ nord = len(orders)
1168
+ output = np.zeros((np.sum(extraction_width) + nord, ncol))
1169
+ pos = [0]
1170
+ x = np.arange(ncol)
1171
+ for i in range(nord):
1172
+ ycen = np.polyval(orders[i], x)
1173
+ yb = ycen - extraction_width[i, 0]
1174
+ yt = ycen + extraction_width[i, 1]
1175
+ xl, xr = column_range[i]
1176
+ index = make_index(yb, yt, xl, xr)
1177
+ yl = pos[i]
1178
+ yr = pos[i] + index[0].shape[0]
1179
+ output[yl:yr, xl:xr] = original[index]
1180
+
1181
+ vmin, vmax = np.percentile(output[yl:yr, xl:xr], (5, 95))
1182
+ output[yl:yr, xl:xr] = np.clip(output[yl:yr, xl:xr], vmin, vmax)
1183
+ output[yl:yr, xl:xr] -= vmin
1184
+ output[yl:yr, xl:xr] /= vmax - vmin
1185
+
1186
+ pos += [yr]
1187
+
1188
+ plt.imshow(output, origin="lower", aspect="auto")
1189
+
1190
+ for i in range(nord):
1191
+ try:
1192
+ tmp = spectrum[i, column_range[i, 0] : column_range[i, 1]]
1193
+ # if len(tmp)
1194
+ vmin = np.min(tmp[tmp != 0])
1195
+ tmp = np.copy(spectrum[i])
1196
+ tmp[tmp != 0] -= vmin
1197
+ np.log(tmp, out=tmp, where=tmp > 0)
1198
+ tmp = tmp / np.max(tmp) * 0.9 * (pos[i + 1] - pos[i])
1199
+ tmp += pos[i]
1200
+ tmp[tmp < pos[i]] = pos[i]
1201
+ plt.plot(x, tmp, "r")
1202
+ except:
1203
+ pass
1204
+
1205
+ locs = np.sum(extraction_width, axis=1) + 1
1206
+ locs = np.array([0, *np.cumsum(locs)[:-1]])
1207
+ locs[:-1] += (np.diff(locs) * 0.5).astype(int)
1208
+ locs[-1] += ((output.shape[0] - locs[-1]) * 0.5).astype(int)
1209
+ plt.yticks(locs, range(len(locs)))
1210
+
1211
+ plot_title = "Extracted Spectrum vs. Rectified Image"
1212
+ if title is not None:
1213
+ plot_title = f"{title}\n{plot_title}"
1214
+ plt.title(plot_title)
1215
+ plt.xlabel("x [pixel]")
1216
+ plt.ylabel("order")
1217
+ plt.show()
1218
+
1219
+
1220
+ def extract(
1221
+ img,
1222
+ orders,
1223
+ column_range=None,
1224
+ order_range=None,
1225
+ extraction_width=0.5,
1226
+ extraction_type="optimal",
1227
+ tilt=None,
1228
+ shear=None,
1229
+ sigma_cutoff=0,
1230
+ **kwargs,
1231
+ ):
1232
+ """
1233
+ Extract the spectrum from an image
1234
+
1235
+ Parameters
1236
+ ----------
1237
+ img : array[nrow, ncol](float)
1238
+ observation to extract
1239
+ orders : array[nord, degree](float)
1240
+ polynomial coefficients of the order tracing
1241
+ column_range : array[nord, 2](int), optional
1242
+ range of pixels to use for each order (default: use all)
1243
+ order_range : array[2](int), optional
1244
+ range of orders to extract, orders have to be consecutive (default: use all)
1245
+ extraction_width : array[nord, 2]({float, int}), optional
1246
+ extraction width above and below each order, values below 1.5 are considered relative, while values above are absolute (default: 0.5)
1247
+ extraction_type : {"optimal", "arc", "normalize"}, optional
1248
+ which extracttion algorithm to use, "optimal" uses optimal extraction, "arc" uses simple arc extraction, and "normalize" also uses optimal extraction, but returns the normalized image (default: "optimal")
1249
+ tilt : float or array[nord, ncol], optional
1250
+ The tilt (1st order curvature) of the slit for curved extraction. Will use vertical extraction if no tilt is set. (default: None, i.e. tilt = 0)
1251
+ shear : float or array[nord, ncol], optional
1252
+ The shear (2nd order curvature) of the slit for curved extraction (default: None, i.e. shear = 0)
1253
+ polarization : bool, optional
1254
+ if true, pairs of orders are considered to belong to the same order, but different polarization. Only affects the scatter (default: False)
1255
+ **kwargs, optional
1256
+ parameters for extraction functions
1257
+
1258
+ Returns
1259
+ -------
1260
+ spec : array[nord, ncol](float)
1261
+ extracted spectrum for each order
1262
+ uncertainties : array[nord, ncol](float)
1263
+ uncertainties on the spectrum
1264
+
1265
+ if extraction_type == "normalize" instead return
1266
+
1267
+ im_norm : array[nrow, ncol](float)
1268
+ normalized image
1269
+ im_ordr : array[nrow, ncol](float)
1270
+ image with just the orders
1271
+ blaze : array[nord, ncol](float)
1272
+ extracted spectrum (equals blaze if img was the flat field)
1273
+ """
1274
+
1275
+ nrow, ncol = img.shape
1276
+ nord, _ = orders.shape
1277
+ if order_range is None:
1278
+ order_range = (0, nord)
1279
+ if np.isscalar(tilt):
1280
+ n = order_range[1] - order_range[0]
1281
+ tilt = np.full((n, ncol), tilt)
1282
+ if np.isscalar(shear):
1283
+ n = order_range[1] - order_range[0]
1284
+ shear = np.full((n, ncol), shear)
1285
+
1286
+ # Fix the input parameters
1287
+ extraction_width, column_range, orders = fix_parameters(
1288
+ extraction_width, column_range, orders, nrow, ncol, nord
1289
+ )
1290
+ # Limit orders (and related properties) to orders in range
1291
+ nord = order_range[1] - order_range[0]
1292
+ orders = orders[order_range[0] : order_range[1]]
1293
+ column_range = column_range[order_range[0] : order_range[1]]
1294
+ extraction_width = extraction_width[order_range[0] : order_range[1]]
1295
+
1296
+ # if sigma_cutoff > 0:
1297
+ # # Blur the image and mask outliers
1298
+ # img = np.ma.masked_invalid(img, copy=False)
1299
+ # img.data[img.mask] = 0
1300
+ # # Use the median of the sorounding pixels (excluding the pixel itself)
1301
+ # footprint = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
1302
+ # dilated = median_filter(img, footprint=footprint)
1303
+ # diff = np.ma.abs(img - dilated)
1304
+ # # median = 50%; 3 sigma = 99.73 %
1305
+ # median, std = np.percentile(diff.compressed(), (50, 99.73))
1306
+ # mask = diff > median + sigma_cutoff * std / 3
1307
+ # img[mask] = np.ma.masked
1308
+
1309
+ if extraction_type == "optimal":
1310
+ # the "normal" case, except for wavelength calibration files
1311
+ spectrum, slitfunction, uncertainties = optimal_extraction(
1312
+ img,
1313
+ orders,
1314
+ extraction_width,
1315
+ column_range,
1316
+ tilt=tilt,
1317
+ shear=shear,
1318
+ **kwargs,
1319
+ )
1320
+ elif extraction_type == "normalize":
1321
+ # TODO
1322
+ # Prepare normalized flat field image if necessary
1323
+ # These will be passed and "returned" by reference
1324
+ # I dont like it, but it works for now
1325
+ im_norm = np.zeros_like(img)
1326
+ im_ordr = np.zeros_like(img)
1327
+
1328
+ blaze, _, _ = optimal_extraction(
1329
+ img,
1330
+ orders,
1331
+ extraction_width,
1332
+ column_range,
1333
+ tilt=tilt,
1334
+ shear=shear,
1335
+ normalize=True,
1336
+ im_norm=im_norm,
1337
+ im_ordr=im_ordr,
1338
+ **kwargs,
1339
+ )
1340
+ threshold_lower = kwargs.get("threshold_lower", 0)
1341
+ im_norm[im_norm <= threshold_lower] = 1
1342
+ im_ordr[im_ordr <= threshold_lower] = 1
1343
+ return im_norm, im_ordr, blaze, column_range
1344
+ elif extraction_type == "arc":
1345
+ # Simpler extraction, just summing along the arc of the order
1346
+ spectrum, uncertainties = arc_extraction(
1347
+ img,
1348
+ orders,
1349
+ extraction_width,
1350
+ column_range,
1351
+ tilt=tilt,
1352
+ shear=shear,
1353
+ **kwargs,
1354
+ )
1355
+ slitfunction = None
1356
+ else:
1357
+ raise ValueError(
1358
+ f"Parameter 'extraction_type' not understood. Expected 'optimal', 'normalize', or 'arc' bug got {extraction_type}."
1359
+ )
1360
+
1361
+ return spectrum, uncertainties, slitfunction, column_range