pyreduce-astro 0.7a4__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. pyreduce/__init__.py +67 -0
  2. pyreduce/__main__.py +322 -0
  3. pyreduce/cli.py +342 -0
  4. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.exp +0 -0
  5. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.lib +0 -0
  6. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.exp +0 -0
  7. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.lib +0 -0
  8. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.exp +0 -0
  9. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.lib +0 -0
  10. pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.exp +0 -0
  11. pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.lib +0 -0
  12. pyreduce/clib/Release/_slitfunc_2d.obj +0 -0
  13. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.exp +0 -0
  14. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.lib +0 -0
  15. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.exp +0 -0
  16. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.lib +0 -0
  17. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.exp +0 -0
  18. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.lib +0 -0
  19. pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.exp +0 -0
  20. pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.lib +0 -0
  21. pyreduce/clib/Release/_slitfunc_bd.obj +0 -0
  22. pyreduce/clib/__init__.py +0 -0
  23. pyreduce/clib/_slitfunc_2d.cp311-win_amd64.pyd +0 -0
  24. pyreduce/clib/_slitfunc_2d.cp312-win_amd64.pyd +0 -0
  25. pyreduce/clib/_slitfunc_2d.cp313-win_amd64.pyd +0 -0
  26. pyreduce/clib/_slitfunc_2d.cp314-win_amd64.pyd +0 -0
  27. pyreduce/clib/_slitfunc_bd.cp311-win_amd64.pyd +0 -0
  28. pyreduce/clib/_slitfunc_bd.cp312-win_amd64.pyd +0 -0
  29. pyreduce/clib/_slitfunc_bd.cp313-win_amd64.pyd +0 -0
  30. pyreduce/clib/_slitfunc_bd.cp314-win_amd64.pyd +0 -0
  31. pyreduce/clib/build_extract.py +75 -0
  32. pyreduce/clib/slit_func_2d_xi_zeta_bd.c +1313 -0
  33. pyreduce/clib/slit_func_2d_xi_zeta_bd.h +55 -0
  34. pyreduce/clib/slit_func_bd.c +362 -0
  35. pyreduce/clib/slit_func_bd.h +17 -0
  36. pyreduce/clipnflip.py +147 -0
  37. pyreduce/combine_frames.py +861 -0
  38. pyreduce/configuration.py +191 -0
  39. pyreduce/continuum_normalization.py +329 -0
  40. pyreduce/cwrappers.py +404 -0
  41. pyreduce/datasets.py +238 -0
  42. pyreduce/echelle.py +413 -0
  43. pyreduce/estimate_background_scatter.py +130 -0
  44. pyreduce/extract.py +1362 -0
  45. pyreduce/extraction_width.py +77 -0
  46. pyreduce/instruments/__init__.py +0 -0
  47. pyreduce/instruments/aj.py +9 -0
  48. pyreduce/instruments/aj.yaml +51 -0
  49. pyreduce/instruments/andes.py +102 -0
  50. pyreduce/instruments/andes.yaml +72 -0
  51. pyreduce/instruments/common.py +711 -0
  52. pyreduce/instruments/common.yaml +57 -0
  53. pyreduce/instruments/crires_plus.py +103 -0
  54. pyreduce/instruments/crires_plus.yaml +101 -0
  55. pyreduce/instruments/filters.py +195 -0
  56. pyreduce/instruments/harpn.py +203 -0
  57. pyreduce/instruments/harpn.yaml +140 -0
  58. pyreduce/instruments/harps.py +312 -0
  59. pyreduce/instruments/harps.yaml +144 -0
  60. pyreduce/instruments/instrument_info.py +140 -0
  61. pyreduce/instruments/jwst_miri.py +29 -0
  62. pyreduce/instruments/jwst_miri.yaml +53 -0
  63. pyreduce/instruments/jwst_niriss.py +98 -0
  64. pyreduce/instruments/jwst_niriss.yaml +60 -0
  65. pyreduce/instruments/lick_apf.py +35 -0
  66. pyreduce/instruments/lick_apf.yaml +60 -0
  67. pyreduce/instruments/mcdonald.py +123 -0
  68. pyreduce/instruments/mcdonald.yaml +56 -0
  69. pyreduce/instruments/metis_ifu.py +45 -0
  70. pyreduce/instruments/metis_ifu.yaml +62 -0
  71. pyreduce/instruments/metis_lss.py +45 -0
  72. pyreduce/instruments/metis_lss.yaml +62 -0
  73. pyreduce/instruments/micado.py +45 -0
  74. pyreduce/instruments/micado.yaml +62 -0
  75. pyreduce/instruments/models.py +257 -0
  76. pyreduce/instruments/neid.py +156 -0
  77. pyreduce/instruments/neid.yaml +61 -0
  78. pyreduce/instruments/nirspec.py +215 -0
  79. pyreduce/instruments/nirspec.yaml +63 -0
  80. pyreduce/instruments/nte.py +42 -0
  81. pyreduce/instruments/nte.yaml +55 -0
  82. pyreduce/instruments/uves.py +46 -0
  83. pyreduce/instruments/uves.yaml +65 -0
  84. pyreduce/instruments/xshooter.py +39 -0
  85. pyreduce/instruments/xshooter.yaml +63 -0
  86. pyreduce/make_shear.py +607 -0
  87. pyreduce/masks/mask_crires_plus_det1.fits.gz +0 -0
  88. pyreduce/masks/mask_crires_plus_det2.fits.gz +0 -0
  89. pyreduce/masks/mask_crires_plus_det3.fits.gz +0 -0
  90. pyreduce/masks/mask_ctio_chiron.fits.gz +0 -0
  91. pyreduce/masks/mask_elodie.fits.gz +0 -0
  92. pyreduce/masks/mask_feros3.fits.gz +0 -0
  93. pyreduce/masks/mask_flames_giraffe.fits.gz +0 -0
  94. pyreduce/masks/mask_harps_blue.fits.gz +0 -0
  95. pyreduce/masks/mask_harps_red.fits.gz +0 -0
  96. pyreduce/masks/mask_hds_blue.fits.gz +0 -0
  97. pyreduce/masks/mask_hds_red.fits.gz +0 -0
  98. pyreduce/masks/mask_het_hrs_2x5.fits.gz +0 -0
  99. pyreduce/masks/mask_jwst_miri_lrs_slitless.fits.gz +0 -0
  100. pyreduce/masks/mask_jwst_niriss_gr700xd.fits.gz +0 -0
  101. pyreduce/masks/mask_lick_apf_.fits.gz +0 -0
  102. pyreduce/masks/mask_mcdonald.fits.gz +0 -0
  103. pyreduce/masks/mask_nes.fits.gz +0 -0
  104. pyreduce/masks/mask_nirspec_nirspec.fits.gz +0 -0
  105. pyreduce/masks/mask_sarg.fits.gz +0 -0
  106. pyreduce/masks/mask_sarg_2x2a.fits.gz +0 -0
  107. pyreduce/masks/mask_sarg_2x2b.fits.gz +0 -0
  108. pyreduce/masks/mask_subaru_hds_red.fits.gz +0 -0
  109. pyreduce/masks/mask_uves_blue.fits.gz +0 -0
  110. pyreduce/masks/mask_uves_blue_binned_2_2.fits.gz +0 -0
  111. pyreduce/masks/mask_uves_middle.fits.gz +0 -0
  112. pyreduce/masks/mask_uves_middle_2x2_split.fits.gz +0 -0
  113. pyreduce/masks/mask_uves_middle_binned_2_2.fits.gz +0 -0
  114. pyreduce/masks/mask_uves_red.fits.gz +0 -0
  115. pyreduce/masks/mask_uves_red_2x2.fits.gz +0 -0
  116. pyreduce/masks/mask_uves_red_2x2_split.fits.gz +0 -0
  117. pyreduce/masks/mask_uves_red_binned_2_2.fits.gz +0 -0
  118. pyreduce/masks/mask_xshooter_nir.fits.gz +0 -0
  119. pyreduce/pipeline.py +619 -0
  120. pyreduce/rectify.py +138 -0
  121. pyreduce/reduce.py +2065 -0
  122. pyreduce/settings/settings_AJ.json +19 -0
  123. pyreduce/settings/settings_ANDES.json +89 -0
  124. pyreduce/settings/settings_CRIRES_PLUS.json +89 -0
  125. pyreduce/settings/settings_HARPN.json +73 -0
  126. pyreduce/settings/settings_HARPS.json +69 -0
  127. pyreduce/settings/settings_JWST_MIRI.json +55 -0
  128. pyreduce/settings/settings_JWST_NIRISS.json +55 -0
  129. pyreduce/settings/settings_LICK_APF.json +62 -0
  130. pyreduce/settings/settings_MCDONALD.json +58 -0
  131. pyreduce/settings/settings_METIS_IFU.json +77 -0
  132. pyreduce/settings/settings_METIS_LSS.json +77 -0
  133. pyreduce/settings/settings_MICADO.json +78 -0
  134. pyreduce/settings/settings_NEID.json +73 -0
  135. pyreduce/settings/settings_NIRSPEC.json +58 -0
  136. pyreduce/settings/settings_NTE.json +60 -0
  137. pyreduce/settings/settings_UVES.json +54 -0
  138. pyreduce/settings/settings_XSHOOTER.json +78 -0
  139. pyreduce/settings/settings_pyreduce.json +184 -0
  140. pyreduce/settings/settings_schema.json +850 -0
  141. pyreduce/tools/__init__.py +0 -0
  142. pyreduce/tools/combine.py +117 -0
  143. pyreduce/trace.py +979 -0
  144. pyreduce/util.py +1366 -0
  145. pyreduce/wavecal/MICADO_HK_3arcsec_chip5.npz +0 -0
  146. pyreduce/wavecal/atlas/thar.fits +4946 -13
  147. pyreduce/wavecal/atlas/thar_list.txt +4172 -0
  148. pyreduce/wavecal/atlas/une.fits +0 -0
  149. pyreduce/wavecal/convert.py +38 -0
  150. pyreduce/wavecal/crires_plus_J1228_Open_det1.npz +0 -0
  151. pyreduce/wavecal/crires_plus_J1228_Open_det2.npz +0 -0
  152. pyreduce/wavecal/crires_plus_J1228_Open_det3.npz +0 -0
  153. pyreduce/wavecal/harpn_harpn_2D.npz +0 -0
  154. pyreduce/wavecal/harps_blue_2D.npz +0 -0
  155. pyreduce/wavecal/harps_blue_pol_2D.npz +0 -0
  156. pyreduce/wavecal/harps_red_2D.npz +0 -0
  157. pyreduce/wavecal/harps_red_pol_2D.npz +0 -0
  158. pyreduce/wavecal/mcdonald.npz +0 -0
  159. pyreduce/wavecal/metis_lss_l_2D.npz +0 -0
  160. pyreduce/wavecal/metis_lss_m_2D.npz +0 -0
  161. pyreduce/wavecal/nirspec_K2.npz +0 -0
  162. pyreduce/wavecal/uves_blue_360nm_2D.npz +0 -0
  163. pyreduce/wavecal/uves_blue_390nm_2D.npz +0 -0
  164. pyreduce/wavecal/uves_blue_437nm_2D.npz +0 -0
  165. pyreduce/wavecal/uves_middle_2x2_2D.npz +0 -0
  166. pyreduce/wavecal/uves_middle_565nm_2D.npz +0 -0
  167. pyreduce/wavecal/uves_middle_580nm_2D.npz +0 -0
  168. pyreduce/wavecal/uves_middle_600nm_2D.npz +0 -0
  169. pyreduce/wavecal/uves_middle_665nm_2D.npz +0 -0
  170. pyreduce/wavecal/uves_middle_860nm_2D.npz +0 -0
  171. pyreduce/wavecal/uves_red_580nm_2D.npz +0 -0
  172. pyreduce/wavecal/uves_red_600nm_2D.npz +0 -0
  173. pyreduce/wavecal/uves_red_665nm_2D.npz +0 -0
  174. pyreduce/wavecal/uves_red_760nm_2D.npz +0 -0
  175. pyreduce/wavecal/uves_red_860nm_2D.npz +0 -0
  176. pyreduce/wavecal/xshooter_nir.npz +0 -0
  177. pyreduce/wavelength_calibration.py +1871 -0
  178. pyreduce_astro-0.7a4.dist-info/METADATA +106 -0
  179. pyreduce_astro-0.7a4.dist-info/RECORD +182 -0
  180. pyreduce_astro-0.7a4.dist-info/WHEEL +4 -0
  181. pyreduce_astro-0.7a4.dist-info/entry_points.txt +2 -0
  182. pyreduce_astro-0.7a4.dist-info/licenses/LICENSE +674 -0
pyreduce/cwrappers.py ADDED
@@ -0,0 +1,404 @@
1
+ """
2
+ Wrapper for REDUCE C functions
3
+
4
+ This module provides access to the extraction algorithms in the
5
+ C libraries and sanitizes the input parameters.
6
+
7
+ """
8
+
9
+ import ctypes
10
+ import logging
11
+
12
+ import numpy as np
13
+ from scipy.ndimage import median_filter
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+ try:
18
+ from .clib._slitfunc_2d import ffi
19
+ from .clib._slitfunc_2d import lib as slitfunc_2dlib
20
+ from .clib._slitfunc_bd import lib as slitfunclib
21
+ except ImportError: # pragma: no cover
22
+ logger.error(
23
+ "C libraries could not be found. Compiling them by running build_extract.py"
24
+ )
25
+ from .clib import build_extract
26
+
27
+ build_extract.build()
28
+ del build_extract
29
+
30
+ from .clib._slitfunc_2d import ffi
31
+ from .clib._slitfunc_2d import lib as slitfunc_2dlib
32
+ from .clib._slitfunc_bd import lib as slitfunclib
33
+
34
+
35
+ c_double = ctypes.c_double
36
+ c_int = ctypes.c_int
37
+ c_mask = ctypes.c_ubyte
38
+
39
+
40
+ def slitfunc(img, ycen, lambda_sp=0, lambda_sf=0.1, osample=1):
41
+ """Decompose image into spectrum and slitfunction
42
+
43
+ This is for horizontal straight orders only, for curved orders use slitfunc_curved instead
44
+
45
+ Parameters
46
+ ----------
47
+ img : array[n, m]
48
+ image to decompose, should just contain a small part of the overall image
49
+ ycen : array[n]
50
+ traces the center of the order along the image, relative to the center of the image?
51
+ lambda_sp : float, optional
52
+ smoothing parameter of the spectrum (the default is 0, which no smoothing)
53
+ lambda_sf : float, optional
54
+ smoothing parameter of the slitfunction (the default is 0.1, which )
55
+ osample : int, optional
56
+ Subpixel ovsersampling factor (the default is 1, which no oversampling)
57
+
58
+ Returns
59
+ -------
60
+ sp, sl, model, unc
61
+ spectrum, slitfunction, model, spectrum uncertainties
62
+ """
63
+
64
+ # Convert input to expected datatypes
65
+ lambda_sf = float(lambda_sf)
66
+ lambda_sp = float(lambda_sp)
67
+ osample = int(osample)
68
+ img = np.asanyarray(img, dtype=c_double)
69
+ ycen = np.asarray(ycen, dtype=c_double)
70
+
71
+ assert img.ndim == 2, "Image must be 2 dimensional"
72
+ assert ycen.ndim == 1, "Ycen must be 1 dimensional"
73
+
74
+ assert img.shape[1] == ycen.size, (
75
+ f"Image and Ycen shapes are incompatible, got {img.shape} and {ycen.shape}"
76
+ )
77
+
78
+ assert osample > 0, f"Oversample rate must be positive, but got {osample}"
79
+ assert lambda_sf >= 0, (
80
+ f"Slitfunction smoothing must be positive, but got {lambda_sf}"
81
+ )
82
+ assert lambda_sp >= 0, f"Spectrum smoothing must be positive, but got {lambda_sp}"
83
+
84
+ # Get some derived values
85
+ nrows, ncols = img.shape
86
+ ny = osample * (nrows + 1) + 1
87
+ ycen = ycen - ycen.astype(c_int)
88
+
89
+ # Prepare all arrays
90
+ # Inital guess for slit function and spectrum
91
+ sp = np.ma.sum(img, axis=0)
92
+ requirements = ["C", "A", "W", "O"]
93
+ sp = np.require(sp, dtype=c_double, requirements=requirements)
94
+
95
+ sl = np.zeros(ny, dtype=c_double)
96
+
97
+ mask = ~np.ma.getmaskarray(img)
98
+ mask = np.require(mask, dtype=c_int, requirements=requirements)
99
+
100
+ img = np.ma.getdata(img)
101
+ img = np.require(img, dtype=c_double, requirements=requirements)
102
+
103
+ pix_unc = np.zeros_like(img)
104
+ pix_unc = np.require(pix_unc, dtype=c_double, requirements=requirements)
105
+
106
+ ycen = np.require(ycen, dtype=c_double, requirements=requirements)
107
+ model = np.zeros((nrows, ncols), dtype=c_double)
108
+ unc = np.zeros(ncols, dtype=c_double)
109
+
110
+ # Call the C function
111
+ slitfunclib.slit_func_vert(
112
+ ffi.cast("int", ncols),
113
+ ffi.cast("int", nrows),
114
+ ffi.cast("double *", img.ctypes.data),
115
+ ffi.cast("double *", pix_unc.ctypes.data),
116
+ ffi.cast("int *", mask.ctypes.data),
117
+ ffi.cast("double *", ycen.ctypes.data),
118
+ ffi.cast("int", osample),
119
+ ffi.cast("double", lambda_sp),
120
+ ffi.cast("double", lambda_sf),
121
+ ffi.cast("double *", sp.ctypes.data),
122
+ ffi.cast("double *", sl.ctypes.data),
123
+ ffi.cast("double *", model.ctypes.data),
124
+ ffi.cast("double *", unc.ctypes.data),
125
+ )
126
+ mask = ~mask.astype(bool)
127
+
128
+ return sp, sl, model, unc, mask
129
+
130
+
131
+ def slitfunc_curved(
132
+ img, ycen, tilt, shear, lambda_sp, lambda_sf, osample, yrange, maxiter=20, gain=1
133
+ ):
134
+ """Decompose an image into a spectrum and a slitfunction, image may be curved
135
+
136
+ Parameters
137
+ ----------
138
+ img : array[n, m]
139
+ input image
140
+ ycen : array[n]
141
+ traces the center of the order
142
+ tilt : array[n]
143
+ tilt (1st order curvature) of the order along the image, set to 0 if order straight
144
+ shear : array[n]
145
+ shear (2nd order curvature) of the order along the image, set to 0 if order straight
146
+ osample : int
147
+ Subpixel ovsersampling factor (the default is 1, no oversampling)
148
+ lambda_sp : float
149
+ smoothing factor spectrum (the default is 0, no smoothing)
150
+ lambda_sl : float
151
+ smoothing factor slitfunction (the default is 0.1, small smoothing)
152
+ yrange : array[2]
153
+ number of pixels below and above the central line that have been cut out
154
+ maxiter : int, optional
155
+ maximumim number of iterations, by default 20
156
+ gain : float, optional
157
+ gain of the image, by default 1
158
+
159
+ Returns
160
+ -------
161
+ sp, sl, model, unc
162
+ spectrum, slitfunction, model, spectrum uncertainties
163
+ """
164
+
165
+ # Convert datatypes to expected values
166
+ lambda_sf = float(lambda_sf)
167
+ lambda_sp = float(lambda_sp)
168
+ osample = int(osample)
169
+ maxiter = int(maxiter)
170
+ img = np.asanyarray(img, dtype=c_double)
171
+ ycen = np.asarray(ycen, dtype=c_double)
172
+ yrange = np.asarray(yrange, dtype=int)
173
+
174
+ assert img.ndim == 2, "Image must be 2 dimensional"
175
+ assert ycen.ndim == 1, "Ycen must be 1 dimensional"
176
+ assert maxiter > 0, "Maximum iterations must be positive"
177
+
178
+ if np.isscalar(tilt):
179
+ tilt = np.full(img.shape[1], tilt, dtype=c_double)
180
+ else:
181
+ tilt = np.asarray(tilt, dtype=c_double)
182
+ if np.isscalar(shear):
183
+ shear = np.full(img.shape[1], shear, dtype=c_double)
184
+ else:
185
+ shear = np.asarray(shear, dtype=c_double)
186
+
187
+ assert img.shape[1] == ycen.size, (
188
+ f"Image and Ycen shapes are incompatible, got {img.shape} and {ycen.shape}"
189
+ )
190
+ assert img.shape[1] == tilt.size, (
191
+ f"Image and Tilt shapes are incompatible, got {img.shape} and {tilt.shape}"
192
+ )
193
+ assert img.shape[1] == shear.size, (
194
+ f"Image and Shear shapes are incompatible, got {img.shape} and {shear.shape}"
195
+ )
196
+
197
+ assert osample > 0, f"Oversample rate must be positive, but got {osample}"
198
+ assert lambda_sf >= 0, (
199
+ f"Slitfunction smoothing must be positive, but got {lambda_sf}"
200
+ )
201
+ assert lambda_sp >= 0, f"Spectrum smoothing must be positive, but got {lambda_sp}"
202
+
203
+ # assert np.ma.all(np.isfinite(img)), "All values in the image must be finite"
204
+ assert np.all(np.isfinite(ycen)), "All values in ycen must be finite"
205
+ assert np.all(np.isfinite(tilt)), "All values in tilt must be finite"
206
+ assert np.all(np.isfinite(shear)), "All values in shear must be finite"
207
+
208
+ assert yrange.ndim == 1, "Yrange must be 1 dimensional"
209
+ assert yrange.size == 2, "Yrange must have 2 elements"
210
+ assert yrange[0] + yrange[1] + 1 == img.shape[0], (
211
+ "Yrange must cover the whole image"
212
+ )
213
+ assert yrange[0] >= 0, "Yrange must be positive"
214
+ assert yrange[1] >= 0, "Yrange must be positive"
215
+
216
+ # Retrieve some derived values
217
+ nrows, ncols = img.shape
218
+ ny = osample * (nrows + 1) + 1
219
+
220
+ ycen_offset = ycen.astype(c_int)
221
+ ycen_int = ycen - ycen_offset
222
+ y_lower_lim = int(yrange[0])
223
+
224
+ mask = np.ma.getmaskarray(img)
225
+ img = np.ma.getdata(img)
226
+ mask2 = ~np.isfinite(img)
227
+ img[mask2] = 0
228
+ mask |= ~np.isfinite(img)
229
+
230
+ # sp should never be all zero (thats a horrible guess) and leads to all nans
231
+ # This is a simplified run of the algorithm without oversampling or curvature
232
+ # But strong smoothing
233
+ # To remove the most egregious outliers, which would ruin the fit
234
+ sp = np.sum(img, axis=0)
235
+ median_filter(sp, 5, output=sp)
236
+ sl = np.median(img, axis=1)
237
+ sl /= np.sum(sl)
238
+
239
+ model = sl[:, None] * sp[None, :]
240
+ diff = model - img
241
+ mask[np.abs(diff) > 10 * diff.std()] = True
242
+
243
+ sp = np.sum(img, axis=0)
244
+
245
+ mask = np.where(mask, c_int(0), c_int(1))
246
+ # Determine the shot noise
247
+ # by converting electrons to photonsm via the gain
248
+ pix_unc = np.nan_to_num(np.abs(img), copy=False)
249
+ pix_unc *= gain
250
+ np.sqrt(pix_unc, out=pix_unc)
251
+ pix_unc[pix_unc < 1] = 1
252
+
253
+ psf_curve = np.zeros((ncols, 3), dtype=c_double)
254
+ psf_curve[:, 1] = tilt
255
+ psf_curve[:, 2] = shear
256
+
257
+ # Initialize arrays and ensure the correct datatype for C
258
+ requirements = ["C", "A", "W", "O"]
259
+ sp = np.require(sp, dtype=c_double, requirements=requirements)
260
+ mask = np.require(mask, dtype=c_mask, requirements=requirements)
261
+ img = np.require(img, dtype=c_double, requirements=requirements)
262
+ pix_unc = np.require(pix_unc, dtype=c_double, requirements=requirements)
263
+ ycen_int = np.require(ycen_int, dtype=c_double, requirements=requirements)
264
+ ycen_offset = np.require(ycen_offset, dtype=c_int, requirements=requirements)
265
+
266
+ # This memory could be reused between swaths
267
+ sl = np.zeros(ny, dtype=c_double)
268
+ model = np.zeros((nrows, ncols), dtype=c_double)
269
+ unc = np.zeros(ncols, dtype=c_double)
270
+
271
+ # Info contains the folowing: sucess, cost, status, iteration, delta_x
272
+ info = np.zeros(5, dtype=c_double)
273
+
274
+ col = np.sum(mask, axis=0) == 0
275
+ if np.any(col):
276
+ mask[mask.shape[0] // 2, col] = 1
277
+ # assert not np.any(np.sum(mask, axis=0) == 0), "At least one mask column is all 0."
278
+
279
+ # Call the C function
280
+ slitfunc_2dlib.slit_func_curved(
281
+ ffi.cast("int", ncols),
282
+ ffi.cast("int", nrows),
283
+ ffi.cast("int", ny),
284
+ ffi.cast("double *", img.ctypes.data),
285
+ ffi.cast("double *", pix_unc.ctypes.data),
286
+ ffi.cast("unsigned char *", mask.ctypes.data),
287
+ ffi.cast("double *", ycen_int.ctypes.data),
288
+ ffi.cast("int *", ycen_offset.ctypes.data),
289
+ ffi.cast("int", y_lower_lim),
290
+ ffi.cast("int", osample),
291
+ ffi.cast("double", lambda_sp),
292
+ ffi.cast("double", lambda_sf),
293
+ ffi.cast("int", maxiter),
294
+ ffi.cast("double *", psf_curve.ctypes.data),
295
+ ffi.cast("double *", sp.ctypes.data),
296
+ ffi.cast("double *", sl.ctypes.data),
297
+ ffi.cast("double *", model.ctypes.data),
298
+ ffi.cast("double *", unc.ctypes.data),
299
+ ffi.cast("double *", info.ctypes.data),
300
+ )
301
+
302
+ if np.any(np.isnan(sp)):
303
+ logger.error("NaNs in the spectrum")
304
+
305
+ # The decomposition failed
306
+ if info[0] == 0:
307
+ status = info[2]
308
+ if status == 0:
309
+ msg = "I dont't know what happened"
310
+ elif status == -1:
311
+ msg = f"Did not finish convergence after maxiter ({maxiter}) iterations"
312
+ elif status == -2:
313
+ msg = "Curvature is larger than the swath. Check the curvature!"
314
+ else:
315
+ msg = f"Check the C code, for status = {status}"
316
+ logger.error(msg)
317
+ # raise RuntimeError(msg)
318
+
319
+ mask = mask == 0
320
+
321
+ return sp, sl, model, unc, mask, info
322
+
323
+
324
+ # x, y, w
325
+ xi_ref = [("x", c_int), ("y", c_int), ("w", c_double)]
326
+ # x, iy, w
327
+ zeta_ref = [("x", c_int), ("iy", c_int), ("w", c_double)]
328
+
329
+
330
+ def xi_zeta_tensors(
331
+ ncols: int,
332
+ nrows: int,
333
+ ycen: np.ndarray,
334
+ yrange, # (int, int)
335
+ osample: int,
336
+ tilt: np.ndarray,
337
+ shear: np.ndarray,
338
+ ):
339
+ ncols = int(ncols)
340
+ nrows = int(nrows)
341
+ osample = int(osample)
342
+ ny = osample * (nrows + 1) + 1
343
+
344
+ ycen_offset = ycen.astype(c_int)
345
+ ycen_int = ycen - ycen_offset
346
+ y_lower_lim = int(yrange[0])
347
+
348
+ psf_curve = np.zeros((ncols, 3), dtype=c_double)
349
+ psf_curve[:, 1] = tilt
350
+ psf_curve[:, 2] = shear
351
+
352
+ requirements = ["C", "A", "W", "O"]
353
+ ycen_int = np.require(ycen_int, dtype=c_double, requirements=requirements)
354
+ ycen_offset = np.require(ycen_offset, dtype=c_int, requirements=requirements)
355
+
356
+ xi = np.empty((ncols, ny, 4), dtype=xi_ref)
357
+ zeta = np.empty((ncols, nrows, 3 * (osample + 1)), dtype=zeta_ref)
358
+ m_zeta = np.empty((ncols, nrows), dtype=c_int)
359
+
360
+ slitfunc_2dlib.xi_zeta_tensors(
361
+ ffi.cast("int", ncols),
362
+ ffi.cast("int", nrows),
363
+ ffi.cast("int", ny),
364
+ ffi.cast("double *", ycen_int.ctypes.data),
365
+ ffi.cast("int *", ycen_offset.ctypes.data),
366
+ ffi.cast("int", y_lower_lim),
367
+ ffi.cast("int", osample),
368
+ ffi.cast("double *", psf_curve.ctypes.data),
369
+ ffi.cast("xi_ref *", xi.ctypes.data),
370
+ ffi.cast("zeta_ref *", zeta.ctypes.data),
371
+ ffi.cast("int *", m_zeta.ctypes.data),
372
+ )
373
+
374
+ return xi, zeta, m_zeta
375
+
376
+
377
+ def create_spectral_model(
378
+ ncols: int,
379
+ nrows: int,
380
+ osample: int,
381
+ xi: "xi_ref",
382
+ spec: np.ndarray,
383
+ slitfunc: np.ndarray,
384
+ ):
385
+ ncols = int(ncols)
386
+ nrows = int(nrows)
387
+
388
+ requirements = ["C", "A", "W", "O"]
389
+ spec = np.require(spec, dtype=c_double, requirements=requirements)
390
+ slitfunc = np.require(slitfunc, dtype=c_double, requirements=requirements)
391
+ xi = np.require(xi, dtype=xi_ref, requirements=requirements)
392
+
393
+ img = np.empty((nrows + 1, ncols), dtype=c_double)
394
+
395
+ slitfunc_2dlib.create_spectral_model(
396
+ ffi.cast("int", ncols),
397
+ ffi.cast("int", nrows),
398
+ ffi.cast("int", osample),
399
+ ffi.cast("xi_ref *", xi.ctypes.data),
400
+ ffi.cast("double *", spec.ctypes.data),
401
+ ffi.cast("double *", slitfunc.ctypes.data),
402
+ ffi.cast("double *", img.ctypes.data),
403
+ )
404
+ return img
pyreduce/datasets.py ADDED
@@ -0,0 +1,238 @@
1
+ """
2
+ Provides example datasets for the examples
3
+
4
+ This requires the server to be up and running,
5
+ if data needs to be downloaded
6
+ """
7
+
8
+ import logging
9
+ import os
10
+ import tarfile
11
+ from os.path import isfile, join
12
+
13
+ import wget
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def get_data_dir():
19
+ """Get the default data directory.
20
+
21
+ Returns $REDUCE_DATA if set, otherwise ~/REDUCE_DATA
22
+ """
23
+ return os.environ.get("REDUCE_DATA", os.path.expanduser("~/REDUCE_DATA"))
24
+
25
+
26
+ def load_data_from_server(filename, directory):
27
+ server = r"http://sme.astro.uu.se/pyreduce/"
28
+ url = server + filename
29
+ directory = join(directory, filename)
30
+ wget.download(url, out=directory)
31
+
32
+
33
+ def get_dataset(name, local_dir=None):
34
+ """Load a dataset
35
+
36
+ Note
37
+ ----
38
+ This method will not override existing files with the same
39
+ name, even if they have a different content. Therefore
40
+ if the files were changed for any reason, the user has to
41
+ manually delete them from the disk before using this method.
42
+
43
+ Parameters
44
+ ----------
45
+ name : str
46
+ Name of the dataset
47
+ local_dir : str, optional
48
+ directory to save data at (default: $REDUCE_DATA or ~/REDUCE_DATA)
49
+
50
+ Returns
51
+ -------
52
+ dataset_dir : str
53
+ directory where the data was saved
54
+ """
55
+
56
+ if local_dir is None:
57
+ local_dir = get_data_dir()
58
+
59
+ # load data if necessary
60
+ fname = f"{name}.tar.gz"
61
+ data_dir = join(local_dir, name)
62
+ filename = join(data_dir, fname)
63
+
64
+ os.makedirs(data_dir, exist_ok=True)
65
+ if not os.path.isfile(filename):
66
+ logger.info("Downloading dataset %s", name)
67
+ logger.info("Data is stored at %s", data_dir)
68
+ load_data_from_server(fname, data_dir)
69
+ else:
70
+ logger.info("Using existing dataset %s", name)
71
+
72
+ # Extract the downloaded .tar.gz file
73
+ with tarfile.open(filename) as file:
74
+ raw_dir = join(data_dir, "raw")
75
+ names = [f for f in file if not isfile(join(raw_dir, f.name))]
76
+ if len(names) != 0:
77
+ logger.info("Extracting data from tarball")
78
+ file.extractall(path=raw_dir, members=names)
79
+
80
+ return data_dir
81
+
82
+
83
+ def UVES(local_dir=None): # pragma: no cover
84
+ """Load an example dataset
85
+ instrument: UVES
86
+ target: HD132205
87
+
88
+ Parameters
89
+ ----------
90
+ local_dir : str, optional
91
+ directory to save data at (default: "./")
92
+
93
+ Returns
94
+ -------
95
+ dataset_dir : str
96
+ directory where the data was saved
97
+ """
98
+
99
+ return get_dataset("UVES", local_dir)
100
+
101
+
102
+ def HARPS(local_dir=None): # pragma: no cover
103
+ """Load an example dataset
104
+ instrument: HARPS
105
+ target: HD109200
106
+
107
+ Parameters
108
+ ----------
109
+ local_dir : str, optional
110
+ directory to save data at (default: "./")
111
+
112
+ Returns
113
+ -------
114
+ dataset_dir : str
115
+ directory where the data was saved
116
+ """
117
+
118
+ return get_dataset("HARPS", local_dir)
119
+
120
+
121
+ def LICK_APF(local_dir=None): # pragma: no cover
122
+ """Load an example dataset
123
+ instrument: LICK_APF
124
+ target: KIC05005618
125
+
126
+ Parameters
127
+ ----------
128
+ local_dir : str, optional
129
+ directory to save data at (default: "./")
130
+
131
+ Returns
132
+ -------
133
+ dataset_dir : str
134
+ directory where the data was saved
135
+ """
136
+
137
+ return get_dataset("APF", local_dir)
138
+
139
+
140
+ def MCDONALD(local_dir=None): # pragma: no cover
141
+ """Load an example dataset
142
+ instrument: JWST_MIRI
143
+ target: ?
144
+
145
+ Data simulated with MIRIsim
146
+
147
+ Parameters
148
+ ----------
149
+ local_dir : str, optional
150
+ directory to save data at (default: "./")
151
+
152
+ Returns
153
+ -------
154
+ dataset_dir : str
155
+ directory where the data was saved
156
+ """
157
+
158
+ return get_dataset("MCDONALD", local_dir)
159
+
160
+
161
+ def JWST_MIRI(local_dir=None): # pragma: no cover
162
+ """Load an example dataset
163
+ instrument: JWST_MIRI
164
+ target: ?
165
+
166
+ Data simulated with MIRIsim
167
+
168
+ Parameters
169
+ ----------
170
+ local_dir : str, optional
171
+ directory to save data at (default: "./")
172
+
173
+ Returns
174
+ -------
175
+ dataset_dir : str
176
+ directory where the data was saved
177
+ """
178
+
179
+ return get_dataset("MIRI", local_dir)
180
+
181
+
182
+ def JWST_NIRISS(local_dir=None): # pragma: no cover
183
+ """Load an example dataset
184
+ instrument: JWST_NIRISS
185
+ target: ?
186
+
187
+ Data simulated with awesimsoss
188
+
189
+ Parameters
190
+ ----------
191
+ local_dir : str, optional
192
+ directory to save data at (default: "./")
193
+
194
+ Returns
195
+ -------
196
+ dataset_dir : str
197
+ directory where the data was saved
198
+ """
199
+
200
+ return get_dataset("NIRISS", local_dir)
201
+
202
+
203
+ def KECK_NIRSPEC(local_dir=None): # pragma: no cover
204
+ """Load an example dataset
205
+ instrument: KECK_NIRSPEC
206
+ target: GJ1214
207
+
208
+ Parameters
209
+ ----------
210
+ local_dir : str, optional
211
+ directory to save data at (default: "./")
212
+
213
+ Returns
214
+ -------
215
+ dataset_dir : str
216
+ directory where the data was saved
217
+ """
218
+
219
+ return get_dataset("NIRSPEC", local_dir)
220
+
221
+
222
+ def XSHOOTER(local_dir=None): # pragma: no cover
223
+ """Load an example dataset
224
+ instrument: XSHOOTER
225
+ target: Ux-Ori
226
+
227
+ Parameters
228
+ ----------
229
+ local_dir : str, optional
230
+ directory to save data at (default: "./")
231
+
232
+ Returns
233
+ -------
234
+ dataset_dir : str
235
+ directory where the data was saved
236
+ """
237
+
238
+ return get_dataset("XSHOOTER", local_dir)