pyreduce-astro 0.7a4__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. pyreduce/__init__.py +67 -0
  2. pyreduce/__main__.py +322 -0
  3. pyreduce/cli.py +342 -0
  4. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.exp +0 -0
  5. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.lib +0 -0
  6. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.exp +0 -0
  7. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.lib +0 -0
  8. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.exp +0 -0
  9. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.lib +0 -0
  10. pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.exp +0 -0
  11. pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.lib +0 -0
  12. pyreduce/clib/Release/_slitfunc_2d.obj +0 -0
  13. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.exp +0 -0
  14. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.lib +0 -0
  15. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.exp +0 -0
  16. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.lib +0 -0
  17. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.exp +0 -0
  18. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.lib +0 -0
  19. pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.exp +0 -0
  20. pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.lib +0 -0
  21. pyreduce/clib/Release/_slitfunc_bd.obj +0 -0
  22. pyreduce/clib/__init__.py +0 -0
  23. pyreduce/clib/_slitfunc_2d.cp311-win_amd64.pyd +0 -0
  24. pyreduce/clib/_slitfunc_2d.cp312-win_amd64.pyd +0 -0
  25. pyreduce/clib/_slitfunc_2d.cp313-win_amd64.pyd +0 -0
  26. pyreduce/clib/_slitfunc_2d.cp314-win_amd64.pyd +0 -0
  27. pyreduce/clib/_slitfunc_bd.cp311-win_amd64.pyd +0 -0
  28. pyreduce/clib/_slitfunc_bd.cp312-win_amd64.pyd +0 -0
  29. pyreduce/clib/_slitfunc_bd.cp313-win_amd64.pyd +0 -0
  30. pyreduce/clib/_slitfunc_bd.cp314-win_amd64.pyd +0 -0
  31. pyreduce/clib/build_extract.py +75 -0
  32. pyreduce/clib/slit_func_2d_xi_zeta_bd.c +1313 -0
  33. pyreduce/clib/slit_func_2d_xi_zeta_bd.h +55 -0
  34. pyreduce/clib/slit_func_bd.c +362 -0
  35. pyreduce/clib/slit_func_bd.h +17 -0
  36. pyreduce/clipnflip.py +147 -0
  37. pyreduce/combine_frames.py +861 -0
  38. pyreduce/configuration.py +191 -0
  39. pyreduce/continuum_normalization.py +329 -0
  40. pyreduce/cwrappers.py +404 -0
  41. pyreduce/datasets.py +238 -0
  42. pyreduce/echelle.py +413 -0
  43. pyreduce/estimate_background_scatter.py +130 -0
  44. pyreduce/extract.py +1362 -0
  45. pyreduce/extraction_width.py +77 -0
  46. pyreduce/instruments/__init__.py +0 -0
  47. pyreduce/instruments/aj.py +9 -0
  48. pyreduce/instruments/aj.yaml +51 -0
  49. pyreduce/instruments/andes.py +102 -0
  50. pyreduce/instruments/andes.yaml +72 -0
  51. pyreduce/instruments/common.py +711 -0
  52. pyreduce/instruments/common.yaml +57 -0
  53. pyreduce/instruments/crires_plus.py +103 -0
  54. pyreduce/instruments/crires_plus.yaml +101 -0
  55. pyreduce/instruments/filters.py +195 -0
  56. pyreduce/instruments/harpn.py +203 -0
  57. pyreduce/instruments/harpn.yaml +140 -0
  58. pyreduce/instruments/harps.py +312 -0
  59. pyreduce/instruments/harps.yaml +144 -0
  60. pyreduce/instruments/instrument_info.py +140 -0
  61. pyreduce/instruments/jwst_miri.py +29 -0
  62. pyreduce/instruments/jwst_miri.yaml +53 -0
  63. pyreduce/instruments/jwst_niriss.py +98 -0
  64. pyreduce/instruments/jwst_niriss.yaml +60 -0
  65. pyreduce/instruments/lick_apf.py +35 -0
  66. pyreduce/instruments/lick_apf.yaml +60 -0
  67. pyreduce/instruments/mcdonald.py +123 -0
  68. pyreduce/instruments/mcdonald.yaml +56 -0
  69. pyreduce/instruments/metis_ifu.py +45 -0
  70. pyreduce/instruments/metis_ifu.yaml +62 -0
  71. pyreduce/instruments/metis_lss.py +45 -0
  72. pyreduce/instruments/metis_lss.yaml +62 -0
  73. pyreduce/instruments/micado.py +45 -0
  74. pyreduce/instruments/micado.yaml +62 -0
  75. pyreduce/instruments/models.py +257 -0
  76. pyreduce/instruments/neid.py +156 -0
  77. pyreduce/instruments/neid.yaml +61 -0
  78. pyreduce/instruments/nirspec.py +215 -0
  79. pyreduce/instruments/nirspec.yaml +63 -0
  80. pyreduce/instruments/nte.py +42 -0
  81. pyreduce/instruments/nte.yaml +55 -0
  82. pyreduce/instruments/uves.py +46 -0
  83. pyreduce/instruments/uves.yaml +65 -0
  84. pyreduce/instruments/xshooter.py +39 -0
  85. pyreduce/instruments/xshooter.yaml +63 -0
  86. pyreduce/make_shear.py +607 -0
  87. pyreduce/masks/mask_crires_plus_det1.fits.gz +0 -0
  88. pyreduce/masks/mask_crires_plus_det2.fits.gz +0 -0
  89. pyreduce/masks/mask_crires_plus_det3.fits.gz +0 -0
  90. pyreduce/masks/mask_ctio_chiron.fits.gz +0 -0
  91. pyreduce/masks/mask_elodie.fits.gz +0 -0
  92. pyreduce/masks/mask_feros3.fits.gz +0 -0
  93. pyreduce/masks/mask_flames_giraffe.fits.gz +0 -0
  94. pyreduce/masks/mask_harps_blue.fits.gz +0 -0
  95. pyreduce/masks/mask_harps_red.fits.gz +0 -0
  96. pyreduce/masks/mask_hds_blue.fits.gz +0 -0
  97. pyreduce/masks/mask_hds_red.fits.gz +0 -0
  98. pyreduce/masks/mask_het_hrs_2x5.fits.gz +0 -0
  99. pyreduce/masks/mask_jwst_miri_lrs_slitless.fits.gz +0 -0
  100. pyreduce/masks/mask_jwst_niriss_gr700xd.fits.gz +0 -0
  101. pyreduce/masks/mask_lick_apf_.fits.gz +0 -0
  102. pyreduce/masks/mask_mcdonald.fits.gz +0 -0
  103. pyreduce/masks/mask_nes.fits.gz +0 -0
  104. pyreduce/masks/mask_nirspec_nirspec.fits.gz +0 -0
  105. pyreduce/masks/mask_sarg.fits.gz +0 -0
  106. pyreduce/masks/mask_sarg_2x2a.fits.gz +0 -0
  107. pyreduce/masks/mask_sarg_2x2b.fits.gz +0 -0
  108. pyreduce/masks/mask_subaru_hds_red.fits.gz +0 -0
  109. pyreduce/masks/mask_uves_blue.fits.gz +0 -0
  110. pyreduce/masks/mask_uves_blue_binned_2_2.fits.gz +0 -0
  111. pyreduce/masks/mask_uves_middle.fits.gz +0 -0
  112. pyreduce/masks/mask_uves_middle_2x2_split.fits.gz +0 -0
  113. pyreduce/masks/mask_uves_middle_binned_2_2.fits.gz +0 -0
  114. pyreduce/masks/mask_uves_red.fits.gz +0 -0
  115. pyreduce/masks/mask_uves_red_2x2.fits.gz +0 -0
  116. pyreduce/masks/mask_uves_red_2x2_split.fits.gz +0 -0
  117. pyreduce/masks/mask_uves_red_binned_2_2.fits.gz +0 -0
  118. pyreduce/masks/mask_xshooter_nir.fits.gz +0 -0
  119. pyreduce/pipeline.py +619 -0
  120. pyreduce/rectify.py +138 -0
  121. pyreduce/reduce.py +2065 -0
  122. pyreduce/settings/settings_AJ.json +19 -0
  123. pyreduce/settings/settings_ANDES.json +89 -0
  124. pyreduce/settings/settings_CRIRES_PLUS.json +89 -0
  125. pyreduce/settings/settings_HARPN.json +73 -0
  126. pyreduce/settings/settings_HARPS.json +69 -0
  127. pyreduce/settings/settings_JWST_MIRI.json +55 -0
  128. pyreduce/settings/settings_JWST_NIRISS.json +55 -0
  129. pyreduce/settings/settings_LICK_APF.json +62 -0
  130. pyreduce/settings/settings_MCDONALD.json +58 -0
  131. pyreduce/settings/settings_METIS_IFU.json +77 -0
  132. pyreduce/settings/settings_METIS_LSS.json +77 -0
  133. pyreduce/settings/settings_MICADO.json +78 -0
  134. pyreduce/settings/settings_NEID.json +73 -0
  135. pyreduce/settings/settings_NIRSPEC.json +58 -0
  136. pyreduce/settings/settings_NTE.json +60 -0
  137. pyreduce/settings/settings_UVES.json +54 -0
  138. pyreduce/settings/settings_XSHOOTER.json +78 -0
  139. pyreduce/settings/settings_pyreduce.json +184 -0
  140. pyreduce/settings/settings_schema.json +850 -0
  141. pyreduce/tools/__init__.py +0 -0
  142. pyreduce/tools/combine.py +117 -0
  143. pyreduce/trace.py +979 -0
  144. pyreduce/util.py +1366 -0
  145. pyreduce/wavecal/MICADO_HK_3arcsec_chip5.npz +0 -0
  146. pyreduce/wavecal/atlas/thar.fits +4946 -13
  147. pyreduce/wavecal/atlas/thar_list.txt +4172 -0
  148. pyreduce/wavecal/atlas/une.fits +0 -0
  149. pyreduce/wavecal/convert.py +38 -0
  150. pyreduce/wavecal/crires_plus_J1228_Open_det1.npz +0 -0
  151. pyreduce/wavecal/crires_plus_J1228_Open_det2.npz +0 -0
  152. pyreduce/wavecal/crires_plus_J1228_Open_det3.npz +0 -0
  153. pyreduce/wavecal/harpn_harpn_2D.npz +0 -0
  154. pyreduce/wavecal/harps_blue_2D.npz +0 -0
  155. pyreduce/wavecal/harps_blue_pol_2D.npz +0 -0
  156. pyreduce/wavecal/harps_red_2D.npz +0 -0
  157. pyreduce/wavecal/harps_red_pol_2D.npz +0 -0
  158. pyreduce/wavecal/mcdonald.npz +0 -0
  159. pyreduce/wavecal/metis_lss_l_2D.npz +0 -0
  160. pyreduce/wavecal/metis_lss_m_2D.npz +0 -0
  161. pyreduce/wavecal/nirspec_K2.npz +0 -0
  162. pyreduce/wavecal/uves_blue_360nm_2D.npz +0 -0
  163. pyreduce/wavecal/uves_blue_390nm_2D.npz +0 -0
  164. pyreduce/wavecal/uves_blue_437nm_2D.npz +0 -0
  165. pyreduce/wavecal/uves_middle_2x2_2D.npz +0 -0
  166. pyreduce/wavecal/uves_middle_565nm_2D.npz +0 -0
  167. pyreduce/wavecal/uves_middle_580nm_2D.npz +0 -0
  168. pyreduce/wavecal/uves_middle_600nm_2D.npz +0 -0
  169. pyreduce/wavecal/uves_middle_665nm_2D.npz +0 -0
  170. pyreduce/wavecal/uves_middle_860nm_2D.npz +0 -0
  171. pyreduce/wavecal/uves_red_580nm_2D.npz +0 -0
  172. pyreduce/wavecal/uves_red_600nm_2D.npz +0 -0
  173. pyreduce/wavecal/uves_red_665nm_2D.npz +0 -0
  174. pyreduce/wavecal/uves_red_760nm_2D.npz +0 -0
  175. pyreduce/wavecal/uves_red_860nm_2D.npz +0 -0
  176. pyreduce/wavecal/xshooter_nir.npz +0 -0
  177. pyreduce/wavelength_calibration.py +1871 -0
  178. pyreduce_astro-0.7a4.dist-info/METADATA +106 -0
  179. pyreduce_astro-0.7a4.dist-info/RECORD +182 -0
  180. pyreduce_astro-0.7a4.dist-info/WHEEL +4 -0
  181. pyreduce_astro-0.7a4.dist-info/entry_points.txt +2 -0
  182. pyreduce_astro-0.7a4.dist-info/licenses/LICENSE +674 -0
pyreduce/extract.py ADDED
@@ -0,0 +1,1362 @@
1
+ """Module for extracting data from observations
2
+
3
+ Authors
4
+ -------
5
+
6
+ Version
7
+ -------
8
+
9
+ License
10
+ -------
11
+ """
12
+
13
+ import logging
14
+
15
+ import matplotlib.pyplot as plt
16
+ import numpy as np
17
+ from scipy.interpolate import interp1d
18
+ from tqdm import tqdm
19
+
20
+ from . import util
21
+ from .cwrappers import slitfunc_curved
22
+ from .util import make_index
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ class ProgressPlot: # pragma: no cover
28
+ def __init__(self, nrow, ncol, nslitf, nbad=1000, title=None):
29
+ self.nrow = nrow
30
+ self.ncol = ncol
31
+ self.nslitf = nslitf
32
+
33
+ self.nbad = nbad
34
+
35
+ plt.ion()
36
+ self.fig = plt.figure(figsize=(12, 4))
37
+
38
+ # self.ax1 = self.fig.add_subplot(231, projection="3d")
39
+ self.ax1 = self.fig.add_subplot(231)
40
+ self.ax1.set_title("Swath")
41
+ self.ax1.set_ylabel("y [pixel]")
42
+ self.ax2 = self.fig.add_subplot(132)
43
+ self.ax2.set_title("Spectrum")
44
+ self.ax2.set_xlabel("x [pixel]")
45
+ self.ax2.set_ylabel("flux [arb. unit]")
46
+ self.ax2.set_xlim((0, ncol))
47
+ self.ax3 = self.fig.add_subplot(133)
48
+ self.ax3.set_title("Slit")
49
+ self.ax3.set_xlabel("y [pixel]")
50
+ self.ax3.set_ylabel("contribution [1]")
51
+ self.ax3.set_xlim((0, nrow))
52
+ # self.ax4 = self.fig.add_subplot(234, projection="3d")
53
+ self.ax4 = self.fig.add_subplot(234)
54
+ self.ax4.set_title("Model")
55
+ self.ax4.set_xlabel("x [pixel]")
56
+ self.ax4.set_ylabel("y [pixel]")
57
+
58
+ self.title = title
59
+ if title is not None:
60
+ self.fig.suptitle(title)
61
+
62
+ self.fig.tight_layout()
63
+
64
+ # Just plot empty pictures, to create the plots
65
+ # Update the data later
66
+ img = np.ones((nrow, ncol))
67
+ # y, x = np.indices((nrow, ncol))
68
+ # self.im_obs = self.ax1.plot_surface(x, y, img)
69
+ # self.im_model = self.ax4.plot_surface(x, y, img)
70
+ self.im_obs = self.ax1.imshow(img)
71
+ self.im_model = self.ax4.imshow(img)
72
+
73
+ (self.dots_spec,) = self.ax2.plot(
74
+ np.zeros(nrow * ncol), np.zeros(nrow * ncol), ".r", ms=2, alpha=0.6
75
+ )
76
+ (self.line_spec,) = self.ax2.plot(np.zeros(ncol), "-k")
77
+ (self.mask_spec,) = self.ax2.plot(np.zeros(self.nbad), "Pg")
78
+ (self.dots_slit,) = self.ax3.plot(
79
+ np.zeros(nrow * ncol), np.zeros(nrow * ncol), ".r", ms=2, alpha=0.6
80
+ )
81
+ (self.line_slit,) = self.ax3.plot(np.zeros(nrow), "-k", lw=2)
82
+ (self.mask_slit,) = self.ax3.plot(np.zeros(self.nbad), "Pg")
83
+
84
+ # self.ax1.set_zscale("log")
85
+ # self.ax4.set_zscale("log")
86
+
87
+ self.fig.canvas.draw()
88
+ self.fig.canvas.flush_events()
89
+
90
+ def fix_linear(self, data, limit, fill=0):
91
+ """Assures the size of the 1D array data is equal to limit"""
92
+
93
+ if len(data) > limit:
94
+ data = data[:limit]
95
+ elif len(data) < limit:
96
+ padding = np.full(limit - len(data), fill, dtype=data.dtype)
97
+ data = np.concatenate((data, padding))
98
+ return data
99
+
100
+ def plot(self, img, spec, slitf, model, ycen, mask, ord_num, left, right):
101
+ img = np.copy(img)
102
+ spec = np.copy(spec)
103
+ slitf = np.copy(slitf)
104
+ ycen = np.copy(ycen)
105
+
106
+ ny = img.shape[0]
107
+ nspec = img.shape[1]
108
+ x_spec, y_spec = self.get_spec(img, spec, slitf, ycen)
109
+ x_slit, y_slit = self.get_slitf(img, spec, slitf, ycen)
110
+ ycen = ycen + ny / 2
111
+
112
+ old = np.linspace(-1, ny, len(slitf))
113
+
114
+ # Fix Sizes
115
+ mask_spec_x = self.fix_linear(x_spec[mask.ravel()], self.nbad, fill=np.nan)
116
+ mask_spec = self.fix_linear(y_spec[mask.ravel()], self.nbad, fill=np.nan)
117
+ mask_slit_x = self.fix_linear(x_slit[mask.ravel()], self.nbad, fill=np.nan)
118
+ mask_slit = self.fix_linear(y_slit[mask.ravel()], self.nbad, fill=np.nan)
119
+
120
+ ycen = self.fix_linear(ycen, self.ncol)
121
+ x_spec = self.fix_linear(x_spec, self.ncol * self.nrow)
122
+ y_spec = self.fix_linear(y_spec, self.ncol * self.nrow)
123
+ spec = self.fix_linear(spec, self.ncol)
124
+ x_slit = self.fix_linear(x_slit, self.ncol * self.nrow)
125
+ y_slit = self.fix_linear(y_slit, self.ncol * self.nrow)
126
+ old = self.fix_linear(old, self.nslitf)
127
+ sf = self.fix_linear(slitf, self.nslitf)
128
+
129
+ # Update Data
130
+ model = np.clip(model, 0, np.max(model[5:-5, 5:-5]) * 1.1)
131
+ self.im_obs.remove()
132
+ img = np.clip(img, 0, np.max(model) * 1.1)
133
+ # y, x = np.indices(img.shape)
134
+ # self.im_obs = self.ax1.plot_surface(x, y, img)
135
+ self.im_obs = self.ax1.imshow(img, aspect="auto", origin="lower")
136
+ vmin, vmax = self.im_obs.norm.vmin, self.im_obs.norm.vmax
137
+ self.im_model.remove()
138
+ # y, x = np.indices(model.shape)
139
+ # self.im_model = self.ax4.plot_surface(x, y, model)
140
+ self.im_model = self.ax4.imshow(
141
+ model, aspect="auto", origin="lower", vmin=vmin, vmax=vmax
142
+ )
143
+
144
+ # self.line_ycen.set_ydata(ycen)
145
+ self.dots_spec.set_xdata(x_spec)
146
+ self.dots_spec.set_ydata(y_spec)
147
+ self.line_spec.set_ydata(spec)
148
+
149
+ self.mask_spec.set_xdata(mask_spec_x)
150
+ self.mask_spec.set_ydata(mask_spec)
151
+
152
+ self.dots_slit.set_xdata(x_slit)
153
+ self.dots_slit.set_ydata(y_slit)
154
+ self.line_slit.set_xdata(old)
155
+ self.line_slit.set_ydata(sf)
156
+
157
+ self.mask_slit.set_xdata(mask_slit_x)
158
+ self.mask_slit.set_ydata(mask_slit)
159
+
160
+ self.ax2.set_xlim((0, nspec - 1))
161
+ limit = np.nanmax(spec[5:-5]) * 1.1
162
+ if not np.isnan(limit):
163
+ self.ax2.set_ylim((0, limit))
164
+
165
+ self.ax3.set_xlim((0, ny - 1))
166
+ limit = np.nanmax(sf) * 1.1
167
+ if not np.isnan(limit):
168
+ self.ax3.set_ylim((0, limit))
169
+
170
+ title = f"Order {ord_num}, Columns {left} - {right}"
171
+ if self.title is not None:
172
+ title = f"{self.title}\n{title}"
173
+ self.fig.suptitle(title)
174
+ self.fig.canvas.draw()
175
+ self.fig.canvas.flush_events()
176
+
177
+ def close(self):
178
+ plt.ioff()
179
+ plt.close()
180
+
181
+ def get_spec(self, img, spec, slitf, ycen):
182
+ """get the spectrum corrected by the slit function"""
183
+ nrow, ncol = img.shape
184
+ x, y = np.indices(img.shape)
185
+ ycen = ycen - ycen.astype(int)
186
+
187
+ x = x - ycen + 0.5
188
+ old = np.linspace(-1, nrow - 1 + 1, len(slitf))
189
+ sf = np.interp(x, old, slitf)
190
+
191
+ x = img / sf
192
+
193
+ x = x.ravel()
194
+ y = y.ravel()
195
+ return y, x
196
+
197
+ def get_slitf(self, img, spec, slitf, ycen):
198
+ """get the slit function"""
199
+ x = np.indices(img.shape)[0]
200
+ ycen = ycen - ycen.astype(int)
201
+
202
+ if np.any(spec == 0):
203
+ i = np.arange(len(spec))
204
+ try:
205
+ spec = interp1d(
206
+ i[spec != 0], spec[spec != 0], fill_value="extrapolate"
207
+ )(i)
208
+ except ValueError:
209
+ spec[spec == 0] = np.median(spec)
210
+ y = img / spec[None, :]
211
+ y = y.ravel()
212
+
213
+ x = x - ycen + 0.5
214
+ x = x.ravel()
215
+ return x, y
216
+
217
+
218
+ class Swath:
219
+ def __init__(self, nswath):
220
+ self.nswath = nswath
221
+ self.spec = [None] * nswath
222
+ self.slitf = [None] * nswath
223
+ self.model = [None] * nswath
224
+ self.unc = [None] * nswath
225
+ self.mask = [None] * nswath
226
+ self.info = [None] * nswath
227
+
228
+ def __len__(self):
229
+ return self.nswath
230
+
231
+ def __getitem__(self, key):
232
+ return (
233
+ self.spec[key],
234
+ self.slitf[key],
235
+ self.model[key],
236
+ self.unc[key],
237
+ self.mask[key],
238
+ self.info[key],
239
+ )
240
+
241
+ def __setitem__(self, key, value):
242
+ self.spec[key] = value[0]
243
+ self.slitf[key] = value[1]
244
+ self.model[key] = value[2]
245
+ self.unc[key] = value[3]
246
+ self.mask[key] = value[4]
247
+ self.info[key] = value[5]
248
+
249
+
250
+ def fix_parameters(xwd, cr, orders, nrow, ncol, nord, ignore_column_range=False):
251
+ """Fix extraction width and column range, so that all pixels used are within the image.
252
+ I.e. the column range is cut so that the everything is within the image
253
+
254
+ Parameters
255
+ ----------
256
+ xwd : float, array
257
+ Extraction width, either one value for all orders, or the whole array
258
+ cr : 2-tuple(int), array
259
+ Column range, either one value for all orders, or the whole array
260
+ orders : array
261
+ polynomial coefficients that describe each order
262
+ nrow : int
263
+ Number of rows in the image
264
+ ncol : int
265
+ Number of columns in the image
266
+ nord : int
267
+ Number of orders in the image
268
+ ignore_column_range : bool, optional
269
+ if true does not change the column range, however this may lead to problems with the extraction, by default False
270
+
271
+ Returns
272
+ -------
273
+ xwd : array
274
+ fixed extraction width
275
+ cr : array
276
+ fixed column range
277
+ orders : array
278
+ the same orders as before
279
+ """
280
+
281
+ if xwd is None:
282
+ xwd = 0.5
283
+ if np.isscalar(xwd):
284
+ xwd = np.tile([xwd, xwd], (nord, 1))
285
+ else:
286
+ xwd = np.asarray(xwd)
287
+ if xwd.ndim == 1:
288
+ xwd = np.tile(xwd, (nord, 1))
289
+
290
+ if cr is None:
291
+ cr = np.tile([0, ncol], (nord, 1))
292
+ else:
293
+ cr = np.asarray(cr)
294
+ if cr.ndim == 1:
295
+ cr = np.tile(cr, (nord, 1))
296
+
297
+ orders = np.asarray(orders)
298
+
299
+ xwd = np.array([xwd[0], *xwd, xwd[-1]])
300
+ cr = np.array([cr[0], *cr, cr[-1]])
301
+ orders = extend_orders(orders, nrow)
302
+
303
+ xwd = fix_extraction_width(xwd, orders, cr, ncol)
304
+ if not ignore_column_range:
305
+ cr, orders = fix_column_range(cr, orders, xwd, nrow, ncol)
306
+
307
+ orders = orders[1:-1]
308
+ xwd = xwd[1:-1]
309
+ cr = cr[1:-1]
310
+
311
+ return xwd, cr, orders
312
+
313
+
314
+ def extend_orders(orders, nrow):
315
+ """Extrapolate extra orders above and below the existing ones
316
+
317
+ Parameters
318
+ ----------
319
+ orders : array[nord, degree]
320
+ order tracing coefficients
321
+ nrow : int
322
+ number of rows in the image
323
+
324
+ Returns
325
+ -------
326
+ orders : array[nord + 2, degree]
327
+ extended orders
328
+ """
329
+
330
+ nord, ncoef = orders.shape
331
+
332
+ if nord > 1:
333
+ order_low = 2 * orders[0] - orders[1]
334
+ order_high = 2 * orders[-1] - orders[-2]
335
+ else:
336
+ order_low = [0 for _ in range(ncoef)]
337
+ order_high = [0 for _ in range(ncoef - 1)] + [nrow]
338
+
339
+ return np.array([order_low, *orders, order_high])
340
+
341
+
342
+ def fix_extraction_width(xwd, orders, cr, ncol):
343
+ """Convert fractional extraction width to pixel range
344
+
345
+ Parameters
346
+ ----------
347
+ extraction_width : array[nord, 2]
348
+ current extraction width, in pixels or fractions (for values below 1.5)
349
+ orders : array[nord, degree]
350
+ order tracing coefficients
351
+ column_range : array[nord, 2]
352
+ column range to use
353
+ ncol : int
354
+ number of columns in image
355
+
356
+ Returns
357
+ -------
358
+ extraction_width : array[nord, 2]
359
+ updated extraction width in pixels
360
+ """
361
+
362
+ if not np.all(xwd > 1.5):
363
+ # if extraction width is in relative scale transform to pixel scale
364
+ x = np.arange(ncol)
365
+ for i in range(1, len(xwd) - 1):
366
+ for j in [0, 1]:
367
+ if xwd[i, j] < 1.5:
368
+ k = i - 1 if j == 0 else i + 1
369
+ left = max(cr[[i, k], 0])
370
+ right = min(cr[[i, k], 1])
371
+
372
+ if right < left:
373
+ raise ValueError(
374
+ f"Check your column ranges. Orders {i} and {k} are weird"
375
+ )
376
+
377
+ current = np.polyval(orders[i], x[left:right])
378
+ below = np.polyval(orders[k], x[left:right])
379
+ xwd[i, j] *= np.min(np.abs(current - below))
380
+
381
+ xwd[0] = xwd[1]
382
+ xwd[-1] = xwd[-2]
383
+
384
+ xwd = np.ceil(xwd).astype(int)
385
+
386
+ return xwd
387
+
388
+
389
+ def fix_column_range(column_range, orders, extraction_width, nrow, ncol):
390
+ """Fix the column range, so that no pixels outside the image will be accessed (Thus avoiding errors)
391
+
392
+ Parameters
393
+ ----------
394
+ img : array[nrow, ncol]
395
+ image
396
+ orders : array[nord, degree]
397
+ order tracing coefficients
398
+ extraction_width : array[nord, 2]
399
+ extraction width in pixels, (below, above)
400
+ column_range : array[nord, 2]
401
+ current column range
402
+ no_clip : bool, optional
403
+ if False, new column range will be smaller or equal to current column range, otherwise it can also be larger (default: False)
404
+
405
+ Returns
406
+ -------
407
+ column_range : array[nord, 2]
408
+ updated column range
409
+ orders : array[nord, degree]
410
+ order tracing coefficients (may have rows removed if no valid pixels)
411
+ """
412
+
413
+ ix = np.arange(ncol)
414
+ to_remove = []
415
+ # Loop over non extension orders
416
+ for i, order in zip(range(1, len(orders) - 1), orders[1:-1], strict=False):
417
+ # Shift order trace up/down by extraction_width
418
+ coeff_bot, coeff_top = np.copy(order), np.copy(order)
419
+ coeff_bot[-1] -= extraction_width[i, 0]
420
+ coeff_top[-1] += extraction_width[i, 1]
421
+
422
+ y_bot = np.polyval(coeff_bot, ix) # low edge of arc
423
+ y_top = np.polyval(coeff_top, ix) # high edge of arc
424
+
425
+ # find regions of pixels inside the image
426
+ # then use the region that most closely resembles the existing column range (from order tracing)
427
+ # but clip it to the existing column range (order tracing polynomials are not well defined outside the original range)
428
+ points_in_image = np.where((y_bot >= 0) & (y_top < nrow))[0]
429
+
430
+ if len(points_in_image) == 0:
431
+ # print(y_bot, y_top,nrow, ncol, points_in_image)
432
+ logger.warning(
433
+ f"No pixels are completely within the extraction width for order {i}, removing it."
434
+ )
435
+ to_remove += [i]
436
+ continue
437
+
438
+ regions = np.where(np.diff(points_in_image) != 1)[0]
439
+ regions = [(r, r + 1) for r in regions]
440
+ regions = [
441
+ points_in_image[0],
442
+ *points_in_image[(regions,)].ravel(),
443
+ points_in_image[-1],
444
+ ]
445
+ regions = [[regions[i], regions[i + 1] + 1] for i in range(0, len(regions), 2)]
446
+ overlap = [
447
+ min(reg[1], column_range[i, 1]) - max(reg[0], column_range[i, 0])
448
+ for reg in regions
449
+ ]
450
+ iregion = np.argmax(overlap)
451
+ column_range[i] = np.clip(
452
+ regions[iregion], column_range[i, 0], column_range[i, 1]
453
+ )
454
+
455
+ column_range[0] = column_range[1]
456
+ column_range[-1] = column_range[-2]
457
+
458
+ if to_remove:
459
+ column_range = np.delete(column_range, to_remove, axis=0)
460
+ orders = np.delete(orders, to_remove, axis=0)
461
+
462
+ return column_range, orders
463
+
464
+
465
+ def make_bins(swath_width, xlow, xhigh, ycen):
466
+ """Create bins for the swathes
467
+ Bins are roughly equally sized, have roughly length swath width (if given)
468
+ and overlap roughly half-half with each other
469
+
470
+ Parameters
471
+ ----------
472
+ swath_width : {int, None}
473
+ initial value for the swath_width, bins will have roughly that size, but exact value may change
474
+ if swath_width is None, determine a good value, from the data
475
+ xlow : int
476
+ lower bound for x values
477
+ xhigh : int
478
+ upper bound for x values
479
+ ycen : array[ncol]
480
+ center of the order trace
481
+
482
+ Returns
483
+ -------
484
+ nbin : int
485
+ number of bins
486
+ bins_start : array[nbin]
487
+ left(beginning) side of the bins
488
+ bins_end : array[nbin]
489
+ right(ending) side of the bins
490
+ """
491
+
492
+ if swath_width is None:
493
+ ncol = len(ycen)
494
+ i = np.unique(ycen.astype(int)) # Points of row crossing
495
+ # ni = len(i) # This is how many times this order crosses to the next row
496
+ if len(i) > 1: # Curved order crosses rows
497
+ i = np.sum(i[1:] - i[:-1]) / (len(i) - 1)
498
+ nbin = np.clip(
499
+ int(np.round(ncol / i)) // 3, 3, 20
500
+ ) # number of swaths along the order
501
+ else: # Perfectly aligned orders
502
+ nbin = np.clip(ncol // 400, 3, None) # Still follow the changes in PSF
503
+ nbin = nbin * (xhigh - xlow) // ncol # Adjust for the true order length
504
+ else:
505
+ nbin = np.clip(int(np.round((xhigh - xlow) / swath_width)), 1, None)
506
+
507
+ bins = np.linspace(xlow, xhigh, 2 * nbin + 1) # boundaries of bins
508
+ bins_start = np.ceil(bins[:-2]).astype(int) # beginning of each bin
509
+ bins_end = np.floor(bins[2:]).astype(int) # end of each bin
510
+
511
+ return nbin, bins_start, bins_end
512
+
513
+
514
+ def calc_telluric_correction(telluric, img): # pragma: no cover
515
+ """Calculate telluric correction
516
+
517
+ If set to specific integer larger than 1 is used as the
518
+ offset from the order center line. The sky is then estimated by computing
519
+ median signal between this offset and the upper/lower limit of the
520
+ extraction window.
521
+
522
+ Parameters
523
+ ----------
524
+ telluric : int
525
+ telluric correction parameter
526
+ img : array
527
+ image of the swath
528
+
529
+ Returns
530
+ -------
531
+ tell : array
532
+ telluric correction
533
+ """
534
+ width, height = img.shape
535
+
536
+ tel_lim = telluric if telluric > 5 and telluric < height / 2 else min(5, height / 3)
537
+ tel = np.sum(img, axis=0)
538
+ itel = np.arange(height)
539
+ itel = itel[np.abs(itel - height / 2) >= tel_lim]
540
+ tel = img[itel, :]
541
+ sc = np.zeros(width)
542
+
543
+ for itel in range(width):
544
+ sc[itel] = np.ma.median(tel[itel])
545
+
546
+ return sc
547
+
548
+
549
+ def calc_scatter_correction(scatter, index):
550
+ """Calculate scatter correction
551
+ by interpolating between values?
552
+
553
+ Parameters
554
+ ----------
555
+ scatter : array of shape (degree_x, degree_y)
556
+ 2D polynomial coefficients of the background scatter
557
+ index : tuple (array, array)
558
+ indices of the swath within the overall image
559
+
560
+ Returns
561
+ -------
562
+ scatter_correction : array of shape (swath_width, swath_height)
563
+ correction for scattered light
564
+ """
565
+
566
+ # The indices in the image are switched
567
+ y, x = index
568
+ scatter_correction = np.polynomial.polynomial.polyval2d(x, y, scatter)
569
+ return scatter_correction
570
+
571
+
572
+ def extract_spectrum(
573
+ img,
574
+ ycen,
575
+ yrange,
576
+ xrange,
577
+ gain=1,
578
+ readnoise=0,
579
+ lambda_sf=0.1,
580
+ lambda_sp=0,
581
+ osample=1,
582
+ swath_width=None,
583
+ maxiter=20,
584
+ telluric=None,
585
+ scatter=None,
586
+ normalize=False,
587
+ threshold=0,
588
+ tilt=None,
589
+ shear=None,
590
+ plot=False,
591
+ plot_title=None,
592
+ im_norm=None,
593
+ im_ordr=None,
594
+ out_spec=None,
595
+ out_sunc=None,
596
+ out_slitf=None,
597
+ out_mask=None,
598
+ progress=None,
599
+ ord_num=0,
600
+ **kwargs,
601
+ ):
602
+ """
603
+ Extract the spectrum of a single order from an image
604
+ The order is split into several swathes of roughly swath_width length, which overlap half-half
605
+ For each swath a spectrum and slitfunction are extracted
606
+ overlapping sections are combined using linear weights (centrum is strongest, falling off to the edges)
607
+ Here is the layout for the bins:
608
+
609
+ ::
610
+
611
+ 1st swath 3rd swath 5th swath ...
612
+ /============|============|============|============|============|
613
+
614
+ 2nd swath 4th swath 6th swath
615
+ |------------|------------|------------|------------|
616
+ |.....|
617
+ overlap
618
+
619
+ + ******* 1
620
+ + *
621
+ + *
622
+ * weights (+) previous swath, (*) current swath
623
+ * +
624
+ * +
625
+ * +++++++ 0
626
+
627
+ Parameters
628
+ ----------
629
+ img : array[nrow, ncol]
630
+ observation (or similar)
631
+ ycen : array[ncol]
632
+ order trace of the current order
633
+ yrange : tuple(int, int)
634
+ extraction width in pixles, below and above
635
+ xrange : tuple(int, int)
636
+ columns range to extract (low, high)
637
+ gain : float, optional
638
+ adu to electron, amplifier gain (default: 1)
639
+ readnoise : float, optional
640
+ read out noise factor (default: 0)
641
+ lambda_sf : float, optional
642
+ slit function smoothing parameter, usually very small (default: 0.1)
643
+ lambda_sp : int, optional
644
+ spectrum smoothing parameter, usually very small (default: 0)
645
+ osample : int, optional
646
+ oversampling factor, i.e. how many subpixels to create per pixel (default: 1, i.e. no oversampling)
647
+ swath_width : int, optional
648
+ swath width suggestion, actual width depends also on ncol, see make_bins (default: None, which will determine the width based on the order tracing)
649
+ telluric : {float, None}, optional
650
+ telluric correction factor (default: None, i.e. no telluric correction)
651
+ scatter : {array, None}, optional
652
+ background scatter as 2d polynomial coefficients (default: None, no correction)
653
+ normalize : bool, optional
654
+ whether to create a normalized image. If true, im_norm and im_ordr are used as output (default: False)
655
+ threshold : int, optional
656
+ threshold for normalization (default: 0)
657
+ tilt : array[ncol], optional
658
+ The tilt (1st order curvature) of the slit in this order for the curved extraction (default: None, i.e. tilt = 0)
659
+ shear : array[ncol], optional
660
+ The shear (2nd order curvature) of the slit in this order for the curved extraction (default: None, i.e. shear = 0)
661
+ plot : bool, optional
662
+ wether to plot the progress, plotting will slow down the procedure significantly (default: False)
663
+ ord_num : int, optional
664
+ current order number, just for plotting (default: 0)
665
+ im_norm : array[nrow, ncol], optional
666
+ normalized image, only output if normalize is True (default: None)
667
+ im_ordr : array[nrow, ncol], optional
668
+ image of the order blaze, only output if normalize is True (default: None)
669
+
670
+ Returns
671
+ -------
672
+ spec : array[ncol]
673
+ extracted spectrum
674
+ slitf : array[nslitf]
675
+ extracted slitfunction
676
+ mask : array[ncol]
677
+ mask of the column range to use in the spectrum
678
+ unc : array[ncol]
679
+ uncertainty on the spectrum
680
+ """
681
+
682
+ _, ncol = img.shape
683
+ ylow, yhigh = yrange
684
+ xlow, xhigh = xrange
685
+ nslitf = osample * (ylow + yhigh + 2) + 1
686
+ yhigh + ylow + 1
687
+
688
+ ycen_int = np.floor(ycen).astype(int)
689
+
690
+ spec = np.zeros(ncol) if out_spec is None else out_spec
691
+ sunc = np.zeros(ncol) if out_sunc is None else out_sunc
692
+ mask = np.full(ncol, False) if out_mask is None else out_mask
693
+ slitf = np.zeros(nslitf) if out_slitf is None else out_slitf
694
+
695
+ nbin, bins_start, bins_end = make_bins(swath_width, xlow, xhigh, ycen)
696
+ nswath = 2 * nbin - 1
697
+ swath = Swath(nswath)
698
+ margin = np.zeros((nswath, 2), int)
699
+
700
+ if normalize:
701
+ norm_img = [None] * nswath
702
+ norm_model = [None] * nswath
703
+
704
+ # Perform slit decomposition within each swath stepping through the order with
705
+ # half swath width. Spectra for each decomposition are combined with linear weights.
706
+ with tqdm(
707
+ enumerate(zip(bins_start, bins_end, strict=False)),
708
+ total=len(bins_start),
709
+ leave=False,
710
+ desc="Swath",
711
+ ) as t:
712
+ for ihalf, (ibeg, iend) in t:
713
+ logger.debug("Extracting Swath %i, Columns: %i - %i", ihalf, ibeg, iend)
714
+
715
+ # Cut out swath from image
716
+ index = make_index(ycen_int - ylow, ycen_int + yhigh, ibeg, iend)
717
+ swath_img = img[index]
718
+ swath_ycen = ycen[ibeg:iend]
719
+
720
+ # Corrections
721
+ # TODO: what is it even supposed to do?
722
+ if telluric is not None: # pragma: no cover
723
+ telluric_correction = calc_telluric_correction(telluric, swath_img)
724
+ else:
725
+ telluric_correction = 0
726
+
727
+ if scatter is not None:
728
+ scatter_correction = calc_scatter_correction(scatter, index)
729
+ else:
730
+ scatter_correction = 0
731
+
732
+ swath_img -= scatter_correction + telluric_correction
733
+
734
+ # Do Slitfunction extraction
735
+ swath_tilt = tilt[ibeg:iend] if tilt is not None else 0
736
+ swath_shear = shear[ibeg:iend] if shear is not None else 0
737
+ swath[ihalf] = slitfunc_curved(
738
+ swath_img,
739
+ swath_ycen,
740
+ swath_tilt,
741
+ swath_shear,
742
+ lambda_sp=lambda_sp,
743
+ lambda_sf=lambda_sf,
744
+ osample=osample,
745
+ yrange=yrange,
746
+ maxiter=maxiter,
747
+ gain=gain,
748
+ )
749
+ t.set_postfix(chi=f"{swath[ihalf][5][1]:1.2f}")
750
+
751
+ if normalize:
752
+ # Save image and model for later
753
+ # Use np.divide to avoid divisions by zero
754
+ where = swath.model[ihalf] > threshold / gain
755
+ norm_img[ihalf] = np.ones_like(swath.model[ihalf])
756
+ np.divide(
757
+ np.abs(swath_img),
758
+ swath.model[ihalf],
759
+ where=where,
760
+ out=norm_img[ihalf],
761
+ )
762
+ norm_model[ihalf] = swath.model[ihalf]
763
+
764
+ if plot >= 2 and not np.all(np.isnan(swath_img)): # pragma: no cover
765
+ if progress is None:
766
+ progress = ProgressPlot(
767
+ swath_img.shape[0], swath_img.shape[1], nslitf, title=plot_title
768
+ )
769
+ progress.plot(
770
+ swath_img,
771
+ swath.spec[ihalf],
772
+ swath.slitf[ihalf],
773
+ swath.model[ihalf],
774
+ swath_ycen,
775
+ swath.mask[ihalf],
776
+ ord_num,
777
+ ibeg,
778
+ iend,
779
+ )
780
+
781
+ # Remove points at the border of the each swath, if order has tilt
782
+ # as those pixels have bad information
783
+ for i in range(nswath):
784
+ margin[i, :] = int(swath.info[i][4]) + 1
785
+
786
+ # Weight for combining swaths
787
+ weight = [np.ones(bins_end[i] - bins_start[i]) for i in range(nswath)]
788
+ weight[0][: margin[0, 0]] = 0
789
+ weight[-1][len(weight[-1]) - margin[-1, 1] :] = 0
790
+ for i, j in zip(range(0, nswath - 1), range(1, nswath), strict=False):
791
+ width = bins_end[i] - bins_start[i]
792
+ overlap = bins_end[i] - bins_start[j]
793
+
794
+ # Start and end indices for the two swaths
795
+ start_i = width - overlap + margin[j, 0]
796
+ end_i = width - margin[i, 1]
797
+
798
+ start_j = margin[j, 0]
799
+ end_j = overlap - margin[i, 1]
800
+
801
+ # Weights for one overlap from 0 to 1, but do not include those values (whats the point?)
802
+ triangle = np.linspace(0, 1, overlap + 1, endpoint=False)[1:]
803
+ # Cut away the margins at the corners
804
+ triangle = triangle[margin[j, 0] : len(triangle) - margin[i, 1]]
805
+
806
+ # Set values
807
+ weight[i][start_i:end_i] = 1 - triangle
808
+ weight[j][start_j:end_j] = triangle
809
+
810
+ # Don't use the pixels at the egdes (due to curvature)
811
+ weight[i][end_i:] = 0
812
+ weight[j][:start_j] = 0
813
+
814
+ # Update column range
815
+ xrange[0] += margin[0, 0]
816
+ xrange[1] -= margin[-1, 1]
817
+ mask[: xrange[0]] = True
818
+ mask[xrange[1] :] = True
819
+
820
+ # Apply weights
821
+ for i, (ibeg, iend) in enumerate(zip(bins_start, bins_end, strict=False)):
822
+ spec[ibeg:iend] += swath.spec[i] * weight[i]
823
+ sunc[ibeg:iend] += swath.unc[i] * weight[i]
824
+
825
+ if normalize:
826
+ for i, (ibeg, iend) in enumerate(zip(bins_start, bins_end, strict=False)):
827
+ index = make_index(ycen_int - ylow, ycen_int + yhigh, ibeg, iend)
828
+ im_norm[index] += norm_img[i] * weight[i]
829
+ im_ordr[index] += norm_model[i] * weight[i]
830
+
831
+ slitf[:] = np.mean(swath.slitf, axis=0)
832
+ sunc[:] = np.sqrt(sunc**2 + (readnoise / gain) ** 2)
833
+ return spec, slitf, mask, sunc
834
+
835
+
836
+ def model(spec, slitf):
837
+ return spec[None, :] * slitf[:, None]
838
+
839
+
840
+ def get_y_scale(ycen, xrange, extraction_width, nrow):
841
+ """Calculate the y limits of the order
842
+ This is especially important at the edges
843
+
844
+ Parameters
845
+ ----------
846
+ ycen : array[ncol]
847
+ order trace
848
+ xrange : tuple(int, int)
849
+ column range
850
+ extraction_width : tuple(int, int)
851
+ extraction width in pixels below and above the order
852
+ nrow : int
853
+ number of rows in the image, defines upper edge
854
+
855
+ Returns
856
+ -------
857
+ y_low, y_high : int, int
858
+ lower and upper y bound for extraction
859
+ """
860
+ ycen = ycen[xrange[0] : xrange[1]]
861
+
862
+ ymin = ycen - extraction_width[0]
863
+ ymin = np.floor(ymin)
864
+ if min(ymin) < 0:
865
+ ymin = ymin - min(ymin) # help for orders at edge
866
+ if max(ymin) >= nrow:
867
+ ymin = ymin - max(ymin) + nrow - 1 # helps at edge
868
+
869
+ ymax = ycen + extraction_width[1]
870
+ ymax = np.ceil(ymax)
871
+ if max(ymax) >= nrow:
872
+ ymax = ymax - max(ymax) + nrow - 1 # helps at edge
873
+
874
+ # Define a fixed height area containing one spectral order
875
+ y_lower_lim = int(np.min(ycen - ymin)) # Pixels below center line
876
+ y_upper_lim = int(np.min(ymax - ycen)) # Pixels above center line
877
+
878
+ return y_lower_lim, y_upper_lim
879
+
880
+
881
+ def optimal_extraction(
882
+ img,
883
+ orders,
884
+ extraction_width,
885
+ column_range,
886
+ tilt,
887
+ shear,
888
+ plot=False,
889
+ plot_title=None,
890
+ **kwargs,
891
+ ):
892
+ """Use optimal extraction to get spectra
893
+
894
+ This functions just loops over the orders, the actual work is done in extract_spectrum
895
+
896
+ Parameters
897
+ ----------
898
+ img : array[nrow, ncol]
899
+ image to extract
900
+ orders : array[nord, degree]
901
+ order tracing coefficients
902
+ extraction_width : array[nord, 2]
903
+ extraction width in pixels
904
+ column_range : array[nord, 2]
905
+ column range to use
906
+ scatter : array[nord, 4, ncol]
907
+ background scatter (or None)
908
+ **kwargs
909
+ other parameters for the extraction (see extract_spectrum)
910
+
911
+ Returns
912
+ -------
913
+ spectrum : array[nord, ncol]
914
+ extracted spectrum
915
+ slitfunction : array[nord, nslitf]
916
+ recovered slitfunction
917
+ uncertainties: array[nord, ncol]
918
+ uncertainties on the spectrum
919
+ """
920
+
921
+ logger.info("Using optimal extraction to produce spectrum")
922
+
923
+ nrow, ncol = img.shape
924
+ nord = len(orders)
925
+
926
+ spectrum = np.zeros((nord, ncol))
927
+ uncertainties = np.zeros((nord, ncol))
928
+ slitfunction = [None for _ in range(nord)]
929
+
930
+ if tilt is None:
931
+ tilt = [None for _ in range(nord)]
932
+ if shear is None:
933
+ shear = [None for _ in range(nord)]
934
+
935
+ # Add mask as defined by column ranges
936
+ mask = np.full((nord, ncol), True)
937
+ for i in range(nord):
938
+ mask[i, column_range[i, 0] : column_range[i, 1]] = False
939
+ spectrum = np.ma.array(spectrum, mask=mask)
940
+ uncertainties = np.ma.array(uncertainties, mask=mask)
941
+
942
+ ix = np.arange(ncol)
943
+ if plot >= 2: # pragma: no cover
944
+ ncol_swath = kwargs.get("swath_width", img.shape[1] // 400)
945
+ nrow_swath = np.sum(extraction_width, axis=1).max()
946
+ nslitf_swath = (nrow_swath + 2) * kwargs.get("osample", 1) + 1
947
+ progress = ProgressPlot(nrow_swath, ncol_swath, nslitf_swath, title=plot_title)
948
+ else:
949
+ progress = None
950
+
951
+ for i in tqdm(range(nord), desc="Order"):
952
+ logger.debug("Extracting relative order %i out of %i", i + 1, nord)
953
+
954
+ # Define a fixed height area containing one spectral order
955
+ ycen = np.polyval(orders[i], ix)
956
+ yrange = get_y_scale(ycen, column_range[i], extraction_width[i], nrow)
957
+
958
+ osample = kwargs.get("osample", 1)
959
+ slitfunction[i] = np.zeros(osample * (sum(yrange) + 2) + 1)
960
+
961
+ # Return values are set by reference, as the out parameters
962
+ # Also column_range is adjusted depending on the shear
963
+ # This is to avoid large chunks of memory of essentially duplicates
964
+ extract_spectrum(
965
+ img,
966
+ ycen,
967
+ yrange,
968
+ column_range[i],
969
+ tilt=tilt[i],
970
+ shear=shear[i],
971
+ out_spec=spectrum[i],
972
+ out_sunc=uncertainties[i],
973
+ out_slitf=slitfunction[i],
974
+ out_mask=mask[i],
975
+ progress=progress,
976
+ ord_num=i + 1,
977
+ plot=plot,
978
+ plot_title=plot_title,
979
+ **kwargs,
980
+ )
981
+
982
+ if plot >= 2: # pragma: no cover
983
+ progress.close()
984
+
985
+ if plot: # pragma: no cover
986
+ plot_comparison(
987
+ img,
988
+ orders,
989
+ spectrum,
990
+ slitfunction,
991
+ extraction_width,
992
+ column_range,
993
+ title=plot_title,
994
+ )
995
+
996
+ return spectrum, slitfunction, uncertainties
997
+
998
+
999
+ def correct_for_curvature(img_order, tilt, shear, xwd):
1000
+ # img_order = np.ma.filled(img_order, np.nan)
1001
+ mask = ~np.ma.getmaskarray(img_order)
1002
+
1003
+ xt = np.arange(img_order.shape[1])
1004
+ for y, yt in zip(range(xwd[0] + xwd[1]), range(-xwd[0], xwd[1]), strict=False):
1005
+ xi = xt + yt * tilt + yt**2 * shear
1006
+ img_order[y] = np.interp(
1007
+ xi, xt[mask[y]], img_order[y][mask[y]], left=0, right=0
1008
+ )
1009
+
1010
+ xt = np.arange(img_order.shape[0])
1011
+ for x in range(img_order.shape[1]):
1012
+ img_order[:, x] = np.interp(
1013
+ xt, xt[mask[:, x]], img_order[:, x][mask[:, x]], left=0, right=0
1014
+ )
1015
+
1016
+ return img_order
1017
+
1018
+
1019
+ def model_image(img, xwd, tilt, shear):
1020
+ # Correct image for curvature
1021
+ img.shape[0]
1022
+ img = correct_for_curvature(img, tilt, shear, xwd)
1023
+ # Find slitfunction using the median to avoid outliers
1024
+ slitf = np.ma.median(img, axis=1)
1025
+ slitf /= np.ma.sum(slitf)
1026
+ # Use the slitfunction to find spectrum
1027
+ spec = np.ma.median(img / slitf[:, None], axis=0)
1028
+ # Create model from slitfunction and spectrum
1029
+ model = spec[None, :] * slitf[:, None]
1030
+ # Reapply curvature to the model
1031
+ model = correct_for_curvature(model, -tilt, -shear, xwd)
1032
+ return model, spec, slitf
1033
+
1034
+
1035
+ def get_mask(img, model):
1036
+ # 99.73 = 3 sigma, 2 * 3 = 6 sigma
1037
+ residual = np.ma.abs(img - model)
1038
+ median, vmax = np.percentile(np.ma.compressed(residual), (50, 99.73))
1039
+ vmax = median + 2 * (vmax - median)
1040
+ return residual > vmax
1041
+
1042
+
1043
+ def arc_extraction(
1044
+ img,
1045
+ orders,
1046
+ extraction_width,
1047
+ column_range,
1048
+ gain=1,
1049
+ readnoise=0,
1050
+ dark=0,
1051
+ plot=False,
1052
+ plot_title=None,
1053
+ tilt=None,
1054
+ shear=None,
1055
+ collapse_function="median",
1056
+ **kwargs,
1057
+ ):
1058
+ """Use "simple" arc extraction to get a spectrum
1059
+ Arc extraction simply takes the sum orthogonal to the order for extraction width pixels
1060
+
1061
+ This extraction makes a few rough assumptions and does not provide the most accurate results,
1062
+ but rather a good approximation
1063
+
1064
+ Parameters
1065
+ ----------
1066
+ img : array[nrow, ncol]
1067
+ image to extract
1068
+ orders : array[nord, order]
1069
+ order tracing coefficients
1070
+ extraction_width : array[nord, 2]
1071
+ extraction width in pixels
1072
+ column_range : array[nord, 2]
1073
+ column range to use
1074
+ gain : float, optional
1075
+ adu to electron, amplifier gain (default: 1)
1076
+ readnoise : float, optional
1077
+ read out noise (default: 0)
1078
+ dark : float, optional
1079
+ dark current noise (default: 0)
1080
+ plot : bool, optional
1081
+ wether to plot the results (default: False)
1082
+
1083
+ Returns
1084
+ -------
1085
+ spectrum : array[nord, ncol]
1086
+ extracted spectrum
1087
+ uncertainties : array[nord, ncol]
1088
+ uncertainties on extracted spectrum
1089
+ """
1090
+
1091
+ logger.info("Using arc extraction to produce spectrum")
1092
+ _, ncol = img.shape
1093
+ nord, _ = orders.shape
1094
+
1095
+ spectrum = np.zeros((nord, ncol))
1096
+ uncertainties = np.zeros((nord, ncol))
1097
+
1098
+ # Add mask as defined by column ranges
1099
+ mask = np.full((nord, ncol), True)
1100
+ for i in range(nord):
1101
+ mask[i, column_range[i, 0] : column_range[i, 1]] = False
1102
+ spectrum = np.ma.array(spectrum, mask=mask)
1103
+ uncertainties = np.ma.array(uncertainties, mask=mask)
1104
+
1105
+ x = np.arange(ncol)
1106
+
1107
+ for i in tqdm(range(nord), desc="Order"):
1108
+ logger.debug("Calculating order %i out of %i", i + 1, nord)
1109
+
1110
+ x_left_lim = column_range[i, 0]
1111
+ x_right_lim = column_range[i, 1]
1112
+
1113
+ # Rectify the image, i.e. remove the shape of the order
1114
+ # Then the center of the order is within one pixel variations
1115
+ ycen = np.polyval(orders[i], x).astype(int)
1116
+ yb, yt = ycen - extraction_width[i, 0], ycen + extraction_width[i, 1]
1117
+ extraction_width[i, 0] + extraction_width[i, 1] + 1
1118
+ index = make_index(yb, yt, x_left_lim, x_right_lim)
1119
+ img_order = img[index]
1120
+
1121
+ # Correct for tilt and shear
1122
+ # For each row of the rectified order, interpolate onto the shifted row
1123
+ # Masked pixels are set to 0, similar to the summation
1124
+ if tilt is not None and shear is not None:
1125
+ img_order = correct_for_curvature(
1126
+ img_order,
1127
+ tilt[i, x_left_lim:x_right_lim],
1128
+ shear[i, x_left_lim:x_right_lim],
1129
+ extraction_width[i],
1130
+ )
1131
+
1132
+ # Sum over the prepared image
1133
+ if collapse_function == "sum":
1134
+ arc = np.ma.sum(img_order, axis=0)
1135
+ elif collapse_function == "mean":
1136
+ arc = np.ma.mean(img_order, axis=0) * img_order.shape[0]
1137
+ elif collapse_function == "median":
1138
+ arc = np.ma.median(img_order, axis=0) * img_order.shape[0]
1139
+ else:
1140
+ raise ValueError(
1141
+ f"Could not determine the arc method, expected one of ('sum', 'mean', 'median'), but got {collapse_function}"
1142
+ )
1143
+
1144
+ # Store results
1145
+ spectrum[i, x_left_lim:x_right_lim] = arc
1146
+ uncertainties[i, x_left_lim:x_right_lim] = (
1147
+ np.sqrt(np.abs(arc * gain + dark + readnoise**2)) / gain
1148
+ )
1149
+
1150
+ if plot: # pragma: no cover
1151
+ plot_comparison(
1152
+ img,
1153
+ orders,
1154
+ spectrum,
1155
+ None,
1156
+ extraction_width,
1157
+ column_range,
1158
+ title=plot_title,
1159
+ )
1160
+
1161
+ return spectrum, uncertainties
1162
+
1163
+
1164
+ def plot_comparison(
1165
+ original, orders, spectrum, slitf, extraction_width, column_range, title=None
1166
+ ): # pragma: no cover
1167
+ nrow, ncol = original.shape
1168
+ nord = len(orders)
1169
+ output = np.zeros((np.sum(extraction_width) + nord, ncol))
1170
+ pos = [0]
1171
+ x = np.arange(ncol)
1172
+ for i in range(nord):
1173
+ ycen = np.polyval(orders[i], x)
1174
+ yb = ycen - extraction_width[i, 0]
1175
+ yt = ycen + extraction_width[i, 1]
1176
+ xl, xr = column_range[i]
1177
+ index = make_index(yb, yt, xl, xr)
1178
+ yl = pos[i]
1179
+ yr = pos[i] + index[0].shape[0]
1180
+ output[yl:yr, xl:xr] = original[index]
1181
+
1182
+ vmin, vmax = np.percentile(output[yl:yr, xl:xr], (5, 95))
1183
+ output[yl:yr, xl:xr] = np.clip(output[yl:yr, xl:xr], vmin, vmax)
1184
+ output[yl:yr, xl:xr] -= vmin
1185
+ output[yl:yr, xl:xr] /= vmax - vmin
1186
+
1187
+ pos += [yr]
1188
+
1189
+ plt.imshow(output, origin="lower", aspect="auto")
1190
+
1191
+ for i in range(nord):
1192
+ try:
1193
+ tmp = spectrum[i, column_range[i, 0] : column_range[i, 1]]
1194
+ # if len(tmp)
1195
+ vmin = np.min(tmp[tmp != 0])
1196
+ tmp = np.copy(spectrum[i])
1197
+ tmp[tmp != 0] -= vmin
1198
+ np.log(tmp, out=tmp, where=tmp > 0)
1199
+ tmp = tmp / np.max(tmp) * 0.9 * (pos[i + 1] - pos[i])
1200
+ tmp += pos[i]
1201
+ tmp[tmp < pos[i]] = pos[i]
1202
+ plt.plot(x, tmp, "r")
1203
+ except:
1204
+ pass
1205
+
1206
+ locs = np.sum(extraction_width, axis=1) + 1
1207
+ locs = np.array([0, *np.cumsum(locs)[:-1]])
1208
+ locs[:-1] += (np.diff(locs) * 0.5).astype(int)
1209
+ locs[-1] += ((output.shape[0] - locs[-1]) * 0.5).astype(int)
1210
+ plt.yticks(locs, range(len(locs)))
1211
+
1212
+ plot_title = "Extracted Spectrum vs. Rectified Image"
1213
+ if title is not None:
1214
+ plot_title = f"{title}\n{plot_title}"
1215
+ plt.title(plot_title)
1216
+ plt.xlabel("x [pixel]")
1217
+ plt.ylabel("order")
1218
+ util.show_or_save("extract_rectify")
1219
+
1220
+
1221
+ def extract(
1222
+ img,
1223
+ orders,
1224
+ column_range=None,
1225
+ order_range=None,
1226
+ extraction_width=0.5,
1227
+ extraction_type="optimal",
1228
+ tilt=None,
1229
+ shear=None,
1230
+ sigma_cutoff=0,
1231
+ **kwargs,
1232
+ ):
1233
+ """
1234
+ Extract the spectrum from an image
1235
+
1236
+ Parameters
1237
+ ----------
1238
+ img : array[nrow, ncol](float)
1239
+ observation to extract
1240
+ orders : array[nord, degree](float)
1241
+ polynomial coefficients of the order tracing
1242
+ column_range : array[nord, 2](int), optional
1243
+ range of pixels to use for each order (default: use all)
1244
+ order_range : array[2](int), optional
1245
+ range of orders to extract, orders have to be consecutive (default: use all)
1246
+ extraction_width : array[nord, 2]({float, int}), optional
1247
+ extraction width above and below each order, values below 1.5 are considered relative, while values above are absolute (default: 0.5)
1248
+ extraction_type : {"optimal", "arc", "normalize"}, optional
1249
+ which extracttion algorithm to use, "optimal" uses optimal extraction, "arc" uses simple arc extraction, and "normalize" also uses optimal extraction, but returns the normalized image (default: "optimal")
1250
+ tilt : float or array[nord, ncol], optional
1251
+ The tilt (1st order curvature) of the slit for curved extraction. Will use vertical extraction if no tilt is set. (default: None, i.e. tilt = 0)
1252
+ shear : float or array[nord, ncol], optional
1253
+ The shear (2nd order curvature) of the slit for curved extraction (default: None, i.e. shear = 0)
1254
+ polarization : bool, optional
1255
+ if true, pairs of orders are considered to belong to the same order, but different polarization. Only affects the scatter (default: False)
1256
+ **kwargs, optional
1257
+ parameters for extraction functions
1258
+
1259
+ Returns
1260
+ -------
1261
+ spec : array[nord, ncol](float)
1262
+ extracted spectrum for each order
1263
+ uncertainties : array[nord, ncol](float)
1264
+ uncertainties on the spectrum
1265
+
1266
+ if extraction_type == "normalize" instead return
1267
+
1268
+ im_norm : array[nrow, ncol](float)
1269
+ normalized image
1270
+ im_ordr : array[nrow, ncol](float)
1271
+ image with just the orders
1272
+ blaze : array[nord, ncol](float)
1273
+ extracted spectrum (equals blaze if img was the flat field)
1274
+ """
1275
+
1276
+ nrow, ncol = img.shape
1277
+ nord, _ = orders.shape
1278
+ if order_range is None:
1279
+ order_range = (0, nord)
1280
+ if np.isscalar(tilt):
1281
+ n = order_range[1] - order_range[0]
1282
+ tilt = np.full((n, ncol), tilt)
1283
+ if np.isscalar(shear):
1284
+ n = order_range[1] - order_range[0]
1285
+ shear = np.full((n, ncol), shear)
1286
+
1287
+ # Fix the input parameters
1288
+ extraction_width, column_range, orders = fix_parameters(
1289
+ extraction_width, column_range, orders, nrow, ncol, nord
1290
+ )
1291
+ # Limit orders (and related properties) to orders in range
1292
+ nord = order_range[1] - order_range[0]
1293
+ orders = orders[order_range[0] : order_range[1]]
1294
+ column_range = column_range[order_range[0] : order_range[1]]
1295
+ extraction_width = extraction_width[order_range[0] : order_range[1]]
1296
+
1297
+ # if sigma_cutoff > 0:
1298
+ # # Blur the image and mask outliers
1299
+ # img = np.ma.masked_invalid(img, copy=False)
1300
+ # img.data[img.mask] = 0
1301
+ # # Use the median of the sorounding pixels (excluding the pixel itself)
1302
+ # footprint = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
1303
+ # dilated = median_filter(img, footprint=footprint)
1304
+ # diff = np.ma.abs(img - dilated)
1305
+ # # median = 50%; 3 sigma = 99.73 %
1306
+ # median, std = np.percentile(diff.compressed(), (50, 99.73))
1307
+ # mask = diff > median + sigma_cutoff * std / 3
1308
+ # img[mask] = np.ma.masked
1309
+
1310
+ if extraction_type == "optimal":
1311
+ # the "normal" case, except for wavelength calibration files
1312
+ spectrum, slitfunction, uncertainties = optimal_extraction(
1313
+ img,
1314
+ orders,
1315
+ extraction_width,
1316
+ column_range,
1317
+ tilt=tilt,
1318
+ shear=shear,
1319
+ **kwargs,
1320
+ )
1321
+ elif extraction_type == "normalize":
1322
+ # TODO
1323
+ # Prepare normalized flat field image if necessary
1324
+ # These will be passed and "returned" by reference
1325
+ # I dont like it, but it works for now
1326
+ im_norm = np.zeros_like(img)
1327
+ im_ordr = np.zeros_like(img)
1328
+
1329
+ blaze, _, _ = optimal_extraction(
1330
+ img,
1331
+ orders,
1332
+ extraction_width,
1333
+ column_range,
1334
+ tilt=tilt,
1335
+ shear=shear,
1336
+ normalize=True,
1337
+ im_norm=im_norm,
1338
+ im_ordr=im_ordr,
1339
+ **kwargs,
1340
+ )
1341
+ threshold_lower = kwargs.get("threshold_lower", 0)
1342
+ im_norm[im_norm <= threshold_lower] = 1
1343
+ im_ordr[im_ordr <= threshold_lower] = 1
1344
+ return im_norm, im_ordr, blaze, column_range
1345
+ elif extraction_type == "arc":
1346
+ # Simpler extraction, just summing along the arc of the order
1347
+ spectrum, uncertainties = arc_extraction(
1348
+ img,
1349
+ orders,
1350
+ extraction_width,
1351
+ column_range,
1352
+ tilt=tilt,
1353
+ shear=shear,
1354
+ **kwargs,
1355
+ )
1356
+ slitfunction = None
1357
+ else:
1358
+ raise ValueError(
1359
+ f"Parameter 'extraction_type' not understood. Expected 'optimal', 'normalize', or 'arc' bug got {extraction_type}."
1360
+ )
1361
+
1362
+ return spectrum, uncertainties, slitfunction, column_range