pyreduce-astro 0.7a4__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. pyreduce/__init__.py +67 -0
  2. pyreduce/__main__.py +322 -0
  3. pyreduce/cli.py +342 -0
  4. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.exp +0 -0
  5. pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.lib +0 -0
  6. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.exp +0 -0
  7. pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.lib +0 -0
  8. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.exp +0 -0
  9. pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.lib +0 -0
  10. pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.exp +0 -0
  11. pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.lib +0 -0
  12. pyreduce/clib/Release/_slitfunc_2d.obj +0 -0
  13. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.exp +0 -0
  14. pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.lib +0 -0
  15. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.exp +0 -0
  16. pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.lib +0 -0
  17. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.exp +0 -0
  18. pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.lib +0 -0
  19. pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.exp +0 -0
  20. pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.lib +0 -0
  21. pyreduce/clib/Release/_slitfunc_bd.obj +0 -0
  22. pyreduce/clib/__init__.py +0 -0
  23. pyreduce/clib/_slitfunc_2d.cp311-win_amd64.pyd +0 -0
  24. pyreduce/clib/_slitfunc_2d.cp312-win_amd64.pyd +0 -0
  25. pyreduce/clib/_slitfunc_2d.cp313-win_amd64.pyd +0 -0
  26. pyreduce/clib/_slitfunc_2d.cp314-win_amd64.pyd +0 -0
  27. pyreduce/clib/_slitfunc_bd.cp311-win_amd64.pyd +0 -0
  28. pyreduce/clib/_slitfunc_bd.cp312-win_amd64.pyd +0 -0
  29. pyreduce/clib/_slitfunc_bd.cp313-win_amd64.pyd +0 -0
  30. pyreduce/clib/_slitfunc_bd.cp314-win_amd64.pyd +0 -0
  31. pyreduce/clib/build_extract.py +75 -0
  32. pyreduce/clib/slit_func_2d_xi_zeta_bd.c +1313 -0
  33. pyreduce/clib/slit_func_2d_xi_zeta_bd.h +55 -0
  34. pyreduce/clib/slit_func_bd.c +362 -0
  35. pyreduce/clib/slit_func_bd.h +17 -0
  36. pyreduce/clipnflip.py +147 -0
  37. pyreduce/combine_frames.py +861 -0
  38. pyreduce/configuration.py +191 -0
  39. pyreduce/continuum_normalization.py +329 -0
  40. pyreduce/cwrappers.py +404 -0
  41. pyreduce/datasets.py +238 -0
  42. pyreduce/echelle.py +413 -0
  43. pyreduce/estimate_background_scatter.py +130 -0
  44. pyreduce/extract.py +1362 -0
  45. pyreduce/extraction_width.py +77 -0
  46. pyreduce/instruments/__init__.py +0 -0
  47. pyreduce/instruments/aj.py +9 -0
  48. pyreduce/instruments/aj.yaml +51 -0
  49. pyreduce/instruments/andes.py +102 -0
  50. pyreduce/instruments/andes.yaml +72 -0
  51. pyreduce/instruments/common.py +711 -0
  52. pyreduce/instruments/common.yaml +57 -0
  53. pyreduce/instruments/crires_plus.py +103 -0
  54. pyreduce/instruments/crires_plus.yaml +101 -0
  55. pyreduce/instruments/filters.py +195 -0
  56. pyreduce/instruments/harpn.py +203 -0
  57. pyreduce/instruments/harpn.yaml +140 -0
  58. pyreduce/instruments/harps.py +312 -0
  59. pyreduce/instruments/harps.yaml +144 -0
  60. pyreduce/instruments/instrument_info.py +140 -0
  61. pyreduce/instruments/jwst_miri.py +29 -0
  62. pyreduce/instruments/jwst_miri.yaml +53 -0
  63. pyreduce/instruments/jwst_niriss.py +98 -0
  64. pyreduce/instruments/jwst_niriss.yaml +60 -0
  65. pyreduce/instruments/lick_apf.py +35 -0
  66. pyreduce/instruments/lick_apf.yaml +60 -0
  67. pyreduce/instruments/mcdonald.py +123 -0
  68. pyreduce/instruments/mcdonald.yaml +56 -0
  69. pyreduce/instruments/metis_ifu.py +45 -0
  70. pyreduce/instruments/metis_ifu.yaml +62 -0
  71. pyreduce/instruments/metis_lss.py +45 -0
  72. pyreduce/instruments/metis_lss.yaml +62 -0
  73. pyreduce/instruments/micado.py +45 -0
  74. pyreduce/instruments/micado.yaml +62 -0
  75. pyreduce/instruments/models.py +257 -0
  76. pyreduce/instruments/neid.py +156 -0
  77. pyreduce/instruments/neid.yaml +61 -0
  78. pyreduce/instruments/nirspec.py +215 -0
  79. pyreduce/instruments/nirspec.yaml +63 -0
  80. pyreduce/instruments/nte.py +42 -0
  81. pyreduce/instruments/nte.yaml +55 -0
  82. pyreduce/instruments/uves.py +46 -0
  83. pyreduce/instruments/uves.yaml +65 -0
  84. pyreduce/instruments/xshooter.py +39 -0
  85. pyreduce/instruments/xshooter.yaml +63 -0
  86. pyreduce/make_shear.py +607 -0
  87. pyreduce/masks/mask_crires_plus_det1.fits.gz +0 -0
  88. pyreduce/masks/mask_crires_plus_det2.fits.gz +0 -0
  89. pyreduce/masks/mask_crires_plus_det3.fits.gz +0 -0
  90. pyreduce/masks/mask_ctio_chiron.fits.gz +0 -0
  91. pyreduce/masks/mask_elodie.fits.gz +0 -0
  92. pyreduce/masks/mask_feros3.fits.gz +0 -0
  93. pyreduce/masks/mask_flames_giraffe.fits.gz +0 -0
  94. pyreduce/masks/mask_harps_blue.fits.gz +0 -0
  95. pyreduce/masks/mask_harps_red.fits.gz +0 -0
  96. pyreduce/masks/mask_hds_blue.fits.gz +0 -0
  97. pyreduce/masks/mask_hds_red.fits.gz +0 -0
  98. pyreduce/masks/mask_het_hrs_2x5.fits.gz +0 -0
  99. pyreduce/masks/mask_jwst_miri_lrs_slitless.fits.gz +0 -0
  100. pyreduce/masks/mask_jwst_niriss_gr700xd.fits.gz +0 -0
  101. pyreduce/masks/mask_lick_apf_.fits.gz +0 -0
  102. pyreduce/masks/mask_mcdonald.fits.gz +0 -0
  103. pyreduce/masks/mask_nes.fits.gz +0 -0
  104. pyreduce/masks/mask_nirspec_nirspec.fits.gz +0 -0
  105. pyreduce/masks/mask_sarg.fits.gz +0 -0
  106. pyreduce/masks/mask_sarg_2x2a.fits.gz +0 -0
  107. pyreduce/masks/mask_sarg_2x2b.fits.gz +0 -0
  108. pyreduce/masks/mask_subaru_hds_red.fits.gz +0 -0
  109. pyreduce/masks/mask_uves_blue.fits.gz +0 -0
  110. pyreduce/masks/mask_uves_blue_binned_2_2.fits.gz +0 -0
  111. pyreduce/masks/mask_uves_middle.fits.gz +0 -0
  112. pyreduce/masks/mask_uves_middle_2x2_split.fits.gz +0 -0
  113. pyreduce/masks/mask_uves_middle_binned_2_2.fits.gz +0 -0
  114. pyreduce/masks/mask_uves_red.fits.gz +0 -0
  115. pyreduce/masks/mask_uves_red_2x2.fits.gz +0 -0
  116. pyreduce/masks/mask_uves_red_2x2_split.fits.gz +0 -0
  117. pyreduce/masks/mask_uves_red_binned_2_2.fits.gz +0 -0
  118. pyreduce/masks/mask_xshooter_nir.fits.gz +0 -0
  119. pyreduce/pipeline.py +619 -0
  120. pyreduce/rectify.py +138 -0
  121. pyreduce/reduce.py +2065 -0
  122. pyreduce/settings/settings_AJ.json +19 -0
  123. pyreduce/settings/settings_ANDES.json +89 -0
  124. pyreduce/settings/settings_CRIRES_PLUS.json +89 -0
  125. pyreduce/settings/settings_HARPN.json +73 -0
  126. pyreduce/settings/settings_HARPS.json +69 -0
  127. pyreduce/settings/settings_JWST_MIRI.json +55 -0
  128. pyreduce/settings/settings_JWST_NIRISS.json +55 -0
  129. pyreduce/settings/settings_LICK_APF.json +62 -0
  130. pyreduce/settings/settings_MCDONALD.json +58 -0
  131. pyreduce/settings/settings_METIS_IFU.json +77 -0
  132. pyreduce/settings/settings_METIS_LSS.json +77 -0
  133. pyreduce/settings/settings_MICADO.json +78 -0
  134. pyreduce/settings/settings_NEID.json +73 -0
  135. pyreduce/settings/settings_NIRSPEC.json +58 -0
  136. pyreduce/settings/settings_NTE.json +60 -0
  137. pyreduce/settings/settings_UVES.json +54 -0
  138. pyreduce/settings/settings_XSHOOTER.json +78 -0
  139. pyreduce/settings/settings_pyreduce.json +184 -0
  140. pyreduce/settings/settings_schema.json +850 -0
  141. pyreduce/tools/__init__.py +0 -0
  142. pyreduce/tools/combine.py +117 -0
  143. pyreduce/trace.py +979 -0
  144. pyreduce/util.py +1366 -0
  145. pyreduce/wavecal/MICADO_HK_3arcsec_chip5.npz +0 -0
  146. pyreduce/wavecal/atlas/thar.fits +4946 -13
  147. pyreduce/wavecal/atlas/thar_list.txt +4172 -0
  148. pyreduce/wavecal/atlas/une.fits +0 -0
  149. pyreduce/wavecal/convert.py +38 -0
  150. pyreduce/wavecal/crires_plus_J1228_Open_det1.npz +0 -0
  151. pyreduce/wavecal/crires_plus_J1228_Open_det2.npz +0 -0
  152. pyreduce/wavecal/crires_plus_J1228_Open_det3.npz +0 -0
  153. pyreduce/wavecal/harpn_harpn_2D.npz +0 -0
  154. pyreduce/wavecal/harps_blue_2D.npz +0 -0
  155. pyreduce/wavecal/harps_blue_pol_2D.npz +0 -0
  156. pyreduce/wavecal/harps_red_2D.npz +0 -0
  157. pyreduce/wavecal/harps_red_pol_2D.npz +0 -0
  158. pyreduce/wavecal/mcdonald.npz +0 -0
  159. pyreduce/wavecal/metis_lss_l_2D.npz +0 -0
  160. pyreduce/wavecal/metis_lss_m_2D.npz +0 -0
  161. pyreduce/wavecal/nirspec_K2.npz +0 -0
  162. pyreduce/wavecal/uves_blue_360nm_2D.npz +0 -0
  163. pyreduce/wavecal/uves_blue_390nm_2D.npz +0 -0
  164. pyreduce/wavecal/uves_blue_437nm_2D.npz +0 -0
  165. pyreduce/wavecal/uves_middle_2x2_2D.npz +0 -0
  166. pyreduce/wavecal/uves_middle_565nm_2D.npz +0 -0
  167. pyreduce/wavecal/uves_middle_580nm_2D.npz +0 -0
  168. pyreduce/wavecal/uves_middle_600nm_2D.npz +0 -0
  169. pyreduce/wavecal/uves_middle_665nm_2D.npz +0 -0
  170. pyreduce/wavecal/uves_middle_860nm_2D.npz +0 -0
  171. pyreduce/wavecal/uves_red_580nm_2D.npz +0 -0
  172. pyreduce/wavecal/uves_red_600nm_2D.npz +0 -0
  173. pyreduce/wavecal/uves_red_665nm_2D.npz +0 -0
  174. pyreduce/wavecal/uves_red_760nm_2D.npz +0 -0
  175. pyreduce/wavecal/uves_red_860nm_2D.npz +0 -0
  176. pyreduce/wavecal/xshooter_nir.npz +0 -0
  177. pyreduce/wavelength_calibration.py +1871 -0
  178. pyreduce_astro-0.7a4.dist-info/METADATA +106 -0
  179. pyreduce_astro-0.7a4.dist-info/RECORD +182 -0
  180. pyreduce_astro-0.7a4.dist-info/WHEEL +4 -0
  181. pyreduce_astro-0.7a4.dist-info/entry_points.txt +2 -0
  182. pyreduce_astro-0.7a4.dist-info/licenses/LICENSE +674 -0
@@ -0,0 +1,861 @@
1
+ """
2
+ Combine several fits files into one master frame
3
+
4
+ Used to create master bias and master flat
5
+ """
6
+
7
+ import datetime
8
+ import logging
9
+ import os
10
+ import warnings
11
+
12
+ import astropy.io.fits as fits
13
+ import matplotlib.pyplot as plt
14
+ import numpy as np
15
+ from scipy.ndimage.filters import median_filter
16
+ from tqdm import tqdm
17
+
18
+ from . import util
19
+ from .clipnflip import clipnflip
20
+ from .instruments.instrument_info import load_instrument
21
+ from .util import gaussbroad, gaussfit
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ def running_median(arr, size):
27
+ """Calculate the running median of a 2D sequence
28
+
29
+ Parameters
30
+ ----------
31
+ seq : 2d array [n, l]
32
+ n datasets of length l
33
+ size : int
34
+ number of elements to consider for each median
35
+ Returns
36
+ -------
37
+ 2d array [n, l-size]
38
+ running median
39
+ """
40
+
41
+ ret = np.array([median_filter(s, size=size, mode="constant") for s in arr])
42
+ m = size // 2
43
+ return ret[:, m:-m]
44
+
45
+
46
+ def running_sum(arr, size):
47
+ """Calculate the running sum over the 2D sequence
48
+
49
+ Parameters
50
+ ----------
51
+ arr : array[n, l]
52
+ sequence to calculate running sum over, n datasets of length l
53
+ size : int
54
+ number of elements to sum
55
+ Returns
56
+ -------
57
+ 2D array
58
+ running sum
59
+ """
60
+
61
+ ret = np.cumsum(arr, axis=1)
62
+ ret[:, size:] -= ret[:, :-size]
63
+ return ret[:, size - 1 :]
64
+
65
+
66
+ def calculate_probability(buffer, window, method="sum"):
67
+ """
68
+ Construct a probability function based on buffer data.
69
+
70
+ Parameters
71
+ ----------
72
+ buffer : array of shape (nx, ny)
73
+ buffer
74
+ window : int
75
+ size of the running window
76
+ method : {"sum", "median"}, optional
77
+ which method to use to average the probabilities (default: "sum")
78
+ "sum" is much faster, but "median" is more resistant to outliers
79
+
80
+ Returns
81
+ -------
82
+ weights : array of shape (nx, ny - 2 * window)
83
+ probabilities
84
+ """
85
+
86
+ buffer = np.require(buffer, dtype=float)
87
+
88
+ # Take the median/sum for each file
89
+ if method == "median":
90
+ # Running median is slow
91
+ weights = running_median(buffer, 2 * window + 1)
92
+ sum_of_weights = np.mean(weights, axis=0)
93
+ if method == "sum":
94
+ # Running sum is fast
95
+ weights = running_sum(buffer, 2 * window + 1)
96
+ sum_of_weights = np.sum(weights, axis=0)
97
+
98
+ # norm probability
99
+ np.divide(weights, sum_of_weights, where=sum_of_weights > 0, out=weights)
100
+ return weights
101
+
102
+
103
+ def fix_bad_pixels(probability, buffer, readnoise, gain, threshold):
104
+ """
105
+ find and fix bad pixels
106
+
107
+ Parameters
108
+ ----------
109
+ probability : array(float)
110
+ probabilities
111
+ buffer : array(int)
112
+ image buffer
113
+ readnoise : float
114
+ readnoise of current amplifier
115
+ gain : float
116
+ gain of current amplifier
117
+ threshold : float
118
+ sigma threshold between observation and fit for bad pixels
119
+
120
+ Returns
121
+ -------
122
+ array(int)
123
+ input buffer, with bad pixels fixed
124
+ """
125
+ # Fit signal
126
+ ratio = np.zeros_like(probability)
127
+ np.divide(buffer, probability, where=probability > 0, out=ratio)
128
+ # ratio = np.where(probability > 0, buffer / probability, 0.)
129
+ amplitude = (
130
+ np.sum(ratio, axis=0) - np.min(ratio, axis=0) - np.max(ratio, axis=0)
131
+ ) / (buffer.shape[0] - 2)
132
+
133
+ fitted_signal = np.where(probability > 0, amplitude[None, :] * probability, 0)
134
+ predicted_noise = np.zeros_like(fitted_signal)
135
+ tmp = readnoise**2 + (fitted_signal / gain)
136
+ np.sqrt(tmp, where=tmp >= 0, out=predicted_noise)
137
+
138
+ # Identify outliers
139
+ badpixels = buffer - fitted_signal > threshold * predicted_noise
140
+ nbad = len(np.nonzero(badpixels.flat)[0])
141
+
142
+ # Construct the summed flat
143
+ corrected_signal = np.where(badpixels, fitted_signal, buffer)
144
+ corrected_signal = np.sum(corrected_signal, axis=0)
145
+ return corrected_signal, nbad
146
+
147
+
148
+ def combine_frames_simple(
149
+ files, instrument, arm, extension=None, dtype=np.float32, **kwargs
150
+ ):
151
+ """
152
+ Simple addition of similar images.
153
+
154
+ Parameters
155
+ ----------
156
+ files : list(str)
157
+ list of fits files to combine
158
+ instrument : str
159
+ instrument id for modinfo
160
+ arm : str
161
+ instrument arm
162
+ extension : int, optional
163
+ fits extension to load (default: 1)
164
+ dtype : np.dtype, optional
165
+ datatype of the combined image (default float32)
166
+
167
+ Returns
168
+ -------
169
+ combined_data, header
170
+ combined image data, header
171
+ """
172
+
173
+ if len(files) == 0:
174
+ raise ValueError("No files given for combine frames")
175
+
176
+ # Load the first file to get the shape and header
177
+ result, head = instrument.load_fits(
178
+ files[0], arm, dtype=dtype, extension=extension, **kwargs
179
+ )
180
+
181
+ # Sum the remaining files
182
+ for fname in files[1:]:
183
+ data, _ = instrument.load_fits(
184
+ fname, arm, dtype=dtype, extension=extension, **kwargs
185
+ )
186
+ result += data
187
+
188
+ # Update the header
189
+ head["NIMAGES"] = (len(files), "number of images summed")
190
+ head["EXPTIME"] = (head["EXPTIME"] * len(files), "total exposure time")
191
+ head["DARKTIME"] = (
192
+ head.get("DARKTIME", head["EXPTIME"]) * len(files),
193
+ "total dark time",
194
+ )
195
+
196
+ # Update the readout noise
197
+ if "RDNOISE" in head:
198
+ head["RDNOISE"] = (
199
+ head["RDNOISE"] * np.sqrt(len(files)),
200
+ "readout noise in combined image",
201
+ )
202
+
203
+ head.add_history(f"Combined {len(files)} images by simple addition")
204
+
205
+ return result, head
206
+
207
+
208
+ def combine_frames(
209
+ files,
210
+ instrument,
211
+ arm,
212
+ extension=None,
213
+ threshold=3.5,
214
+ window=50,
215
+ dtype=np.float32,
216
+ **kwargs,
217
+ ):
218
+ """
219
+ Subroutine to correct cosmic rays blemishes, while adding otherwise
220
+ similar images.
221
+
222
+ combine_frames co-adds a group of FITS files with 2D images of identical dimensions.
223
+ In the process it rejects cosmic ray, detector defects etc. It is capable of
224
+ handling images that have strip pattern (e.g. echelle spectra) using the REDUCE
225
+ arminfo conventions to figure out image orientation and useful pixel ranges.
226
+ It can handle many frames. Special cases: 1 file in the list (the input is returned as output)
227
+ and 2 files (straight sum is returned).
228
+
229
+ If the image orientation is not predominantly vertical, the image is rotated 90 degrees (and rotated back afterwards).
230
+
231
+ Open all FITS files in the list.
232
+ Loop through the rows. Read next row from each file into a row buffer mBuff[nCol, nFil].
233
+ Optionally correct the data for non-linearity.
234
+
235
+ calc_probability::
236
+
237
+ Go through the row creating "probability" vector. That is for column iCol take the median of
238
+ the part of the row mBuff[iCol-win:iCol+win,iFil] for each file and divide these medians by the
239
+ mean of them computed across the stack of files. In other words:
240
+ >>> filwt[iFil] = median(mBuff[iCol-win:iCol+win,iFil])
241
+ >>> norm_filwt = mean(filwt)
242
+ >>> prob[iCol,iFil] = (norm_filtwt>0)?filwt[iCol]/norm_filwt:filwt[iCol]
243
+
244
+ This is done for all iCol in the range of [win:nCol-win-1]. It is then linearly extrapolated to
245
+ the win zones of both ends. E.g. for iCol in [0:win-1] range:
246
+ >>> prob[iCol,iFil]=2*prob[win,iFil]-prob[2*win-iCol,iFil]
247
+
248
+ For the other end ([nCol-win:nCol-1]) it is similar:
249
+ >>> prob[iCol,iFil]=2*prob[nCol-win-1,iFil]-prob[2*(nCol-win-1)-iCol,iFil]
250
+
251
+ fix_bad_pixels::
252
+
253
+ Once the probailities are constructed we can do the fitting, measure scatter and detect outliers.
254
+ We ignore negative or zero probabilities as it should not happen. For each iCol with (some)
255
+ positive probabilities we compute tha ratios of the original data to the probabilities and get
256
+ the mean amplitude of these ratios after rejecting extreme values:
257
+ >>> ratio = mBuff[iCol,iFil]/prob[iCol,iFil]
258
+ >>> amp = (total(ratio)-min(ratio)-max(ratio))/(nFil-2)
259
+ >>> mFit[iCol,iFil] = amp*prob[iCol,iFil]
260
+
261
+ Note that for iFil whereprob[iCol,iFil] is zero we simply set mFit to zero. The scatter (noise)
262
+ consists readout noise and shot noise of the model (fit) co-added in quadratures:
263
+ >>> sig=sqrt(rdnoise*rdnoise + abs(mFit[iCol,iFil]/gain))
264
+
265
+ and the outliers are defined as:
266
+ >>> iBad=where(mBuff-mFit gt thresh*sig)
267
+
268
+ >>> Bad values are replaced from the fit:
269
+ >>> mBuff[iBad]=mFit[iBad]
270
+
271
+ and mBuff is summed across the file dimension to create an output row.
272
+
273
+ Parameters
274
+ ----------
275
+ files : list(str)
276
+ list of fits files to combine
277
+ instrument : str
278
+ instrument id for arminfo
279
+ arm : str
280
+ instrument arm
281
+ extension : int, optional
282
+ fits extension to load (default: 1)
283
+ threshold : float, optional
284
+ threshold for bad pixels (default: 3.5)
285
+ window : int, optional
286
+ horizontal window size (default: 50)
287
+ mask : array(bool), optional
288
+ mask for the fits image (default: None)
289
+ xr : int, optional
290
+ xrange (default: None)
291
+ yr : int, optional
292
+ yrange (default: None)
293
+ debug : bool, optional
294
+ show debug plot of noise distribution (default: False)
295
+ dtype : np.dtype, optional
296
+ datatype of the combined image (default float32)
297
+
298
+ Returns
299
+ -------
300
+ combined_data, header
301
+ combined image data, header
302
+ """
303
+
304
+ DEBUG_NROWS = 100 # print status update every DEBUG_NROWS rows (if debug is True)
305
+ if instrument is None or isinstance(instrument, str):
306
+ instrument = load_instrument(instrument)
307
+
308
+ # summarize file info
309
+ logger.debug("Files:")
310
+ for i, fname in zip(range(len(files)), files, strict=False):
311
+ logger.debug("%i\t%s", i, fname)
312
+
313
+ # Only one image
314
+ if len(files) == 0:
315
+ raise ValueError("No files given for combine frames")
316
+ elif len(files) == 1:
317
+ result, head = instrument.load_fits(
318
+ files[0], arm, dtype=dtype, extension=extension, **kwargs
319
+ )
320
+ readnoise = np.atleast_1d(head.get("e_readn", 0))
321
+ total_exposure_time = head.get("exptime", 0)
322
+ n_fixed = 0
323
+ linear = head.get("e_linear", True)
324
+
325
+ # Two images
326
+ elif len(files) == 2:
327
+ bias1, head1 = instrument.load_fits(
328
+ files[0], arm, dtype=dtype, extension=extension, **kwargs
329
+ )
330
+ exp1 = head1.get("exptime", 0)
331
+
332
+ bias2, head2 = instrument.load_fits(
333
+ files[1], arm, dtype=dtype, extension=extension, **kwargs
334
+ )
335
+ exp2 = head2.get("exptime", 0)
336
+ readnoise = head2.get("e_readn", 0)
337
+
338
+ result = bias2 + bias1
339
+ head = head2
340
+
341
+ total_exposure_time = exp1 + exp2
342
+ readnoise = np.atleast_1d(readnoise)
343
+ n_fixed = 0
344
+ linear = head.get("e_linear", True)
345
+
346
+ else: # More than two images
347
+ # Get information from headers
348
+ # TODO: check if all values are the same in all the headers?
349
+
350
+ heads = [
351
+ instrument.load_fits(
352
+ f, arm, header_only=True, dtype=dtype, extension=extension, **kwargs
353
+ )
354
+ for f in files
355
+ ]
356
+ head = heads[0]
357
+
358
+ # if sizes vary, it will show during loading of the data
359
+ n_columns = head["naxis1"]
360
+ n_rows = head["naxis2"]
361
+
362
+ # check if we deal with multiple amplifiers
363
+ n_amplifier = head.get("e_ampl", 1)
364
+ # check orientation of the image
365
+ # orient 0, 2, 5, 7: orders are horizontal
366
+ # orient 1, 3, 4, 6: orders are vertical
367
+ orientation = head.get("e_orient", 0)
368
+ head.get("e_transpose", False)
369
+ orientation = orientation % 8
370
+ # check if non-linearity correction
371
+ linear = head.get("e_linear", True)
372
+
373
+ # section(s) of the detector to process, x_low, x_high, y_low, y_high
374
+ # head["e_xlo*"] will find all entries with * as a wildcard
375
+ # we also ensure that we will have one dimensional arrays (not just the value)
376
+ cards = sorted(head["e_xlo*"].cards, key=lambda c: c[0])
377
+ x_low = [c[1] for c in cards]
378
+ cards = sorted(head["e_xhi*"].cards, key=lambda c: c[0])
379
+ x_high = [c[1] for c in cards]
380
+ cards = sorted(head["e_ylo*"].cards, key=lambda c: c[0])
381
+ y_low = [c[1] for c in cards]
382
+ cards = sorted(head["e_yhi*"].cards, key=lambda c: c[0])
383
+ y_high = [c[1] for c in cards]
384
+
385
+ cards = sorted(head["e_gain*"].cards, key=lambda c: c[0])
386
+ gain = [c[1] for c in cards]
387
+ cards = sorted(head["e_readn*"].cards, key=lambda c: c[0])
388
+ readnoise = [c[1] for c in cards]
389
+ total_exposure_time = sum(h.get("exptime", 0) for h in heads)
390
+
391
+ # Scaling for image data
392
+ bscale = [h.get("bscale", 1) for h in heads]
393
+ bzero = [h.get("bzero", 0) for h in heads]
394
+
395
+ result = np.zeros((n_rows, n_columns), dtype=dtype) # the combined image
396
+ n_fixed = 0 # number of fixed pixels
397
+
398
+ # Load all image hdus, but leave the data on the disk, using memmap
399
+ # Need to scale data later
400
+ if extension is None:
401
+ extension = [instrument.get_extension(h, arm) for h in heads]
402
+ else:
403
+ extension = [extension] * len(heads)
404
+
405
+ data = [
406
+ fits.open(f, memmap=True, do_not_scale_image_data=True)[e]
407
+ for f, e in zip(files, extension, strict=False)
408
+ ]
409
+
410
+ if window >= n_columns / 2:
411
+ window = n_columns // 10
412
+ logger.warning("Reduce Window size to fit the image")
413
+
414
+ # depending on the orientation the indexing changes and the borders of the image change
415
+ if orientation in [1, 3, 4, 6]:
416
+ # idx gives the index for accessing the data in the image, which is rotated depending on the orientation
417
+ # We could just rotate the whole image, but that requires reading the whole image at once
418
+ def index(row, x_left, x_right):
419
+ return (slice(x_left, x_right), row)
420
+
421
+ # Exchange the borders of the image
422
+ x_low, x_high, y_low, y_high = y_low, y_high, x_low, x_high
423
+ else:
424
+
425
+ def index(row, x_left, x_right):
426
+ return (row, slice(x_left, x_right))
427
+
428
+ # For several amplifiers, different sections of the image are set
429
+ # One for each amplifier, each amplifier is treated seperately
430
+ for amplifier in range(n_amplifier):
431
+ # Pick data for current amplifier
432
+ x_left = x_low[amplifier]
433
+ x_right = x_high[amplifier]
434
+ y_bottom = y_low[amplifier]
435
+ y_top = y_high[amplifier]
436
+
437
+ gain_amp = gain[amplifier]
438
+ readnoise_amp = readnoise[amplifier]
439
+
440
+ # Prepare temporary arrays
441
+ buffer = np.zeros((len(files), x_right - x_left))
442
+ probability = np.zeros((len(files), x_right - x_left))
443
+
444
+ # for each row
445
+ for row in tqdm(range(y_bottom, y_top), desc="Rows"):
446
+ if (row) % DEBUG_NROWS == 0:
447
+ logger.debug(
448
+ "%i rows processed - %i pixels fixed so far", row, n_fixed
449
+ )
450
+
451
+ # load current row
452
+ idx = index(row, x_left, x_right)
453
+ for i in range(len(files)):
454
+ # If the following causes int16 overflow, add .astype('float64')
455
+ # to the first term. The receiving buffer is f64 anyway.
456
+ buffer[i, :] = (
457
+ data[i].data[idx].astype("float64") * bscale[i] + bzero[i]
458
+ )
459
+
460
+ # Calculate probabilities
461
+ probability[:, window:-window] = calculate_probability(buffer, window)
462
+
463
+ # extrapolate to edges
464
+ probability[:, :window] = (
465
+ 2 * probability[:, window][:, None]
466
+ - probability[:, 2 * window : window : -1]
467
+ )
468
+ probability[:, -window:] = (
469
+ 2 * probability[:, -window - 1][:, None]
470
+ - probability[:, -window - 1 : -2 * window - 1 : -1]
471
+ )
472
+
473
+ # fix bad pixels
474
+ result[idx], n_bad = fix_bad_pixels(
475
+ probability, buffer, readnoise_amp, gain_amp, threshold
476
+ )
477
+ n_fixed += n_bad
478
+
479
+ logger.debug("total cosmic ray hits identified and removed: %i", n_fixed)
480
+
481
+ result = clipnflip(result, head)
482
+ result = np.ma.masked_array(result, mask=kwargs.get("mask"))
483
+
484
+ for d in data:
485
+ d._file.close() # Close open FITS files
486
+
487
+ # Add info to header.
488
+ head["bzero"] = 0.0
489
+ head["bscale"] = 1.0
490
+ head["exptime"] = total_exposure_time
491
+ head["darktime"] = total_exposure_time
492
+ # Because we do not divide the signal by the number of files the
493
+ # read-out noise goes up by the square root of the number of files
494
+
495
+ for n_amp, rdn in enumerate(readnoise):
496
+ head[f"rdnoise{n_amp + 1:0>1}"] = (
497
+ rdn * np.sqrt(len(files)),
498
+ " noise in combined image, electrons",
499
+ )
500
+
501
+ head["nimages"] = (len(files), " number of images summed")
502
+ head["npixfix"] = (n_fixed, " pixels corrected for cosmic rays")
503
+ head.add_history(
504
+ f"images coadded by combine_frames.py on {datetime.datetime.now()}"
505
+ )
506
+
507
+ if not linear: # pragma: no cover
508
+ # non-linearity was fixed. mark this in the header
509
+ raise NotImplementedError() # TODO Nonlinear
510
+ # i = np.where(head["e_linear"] >= 0)
511
+ # head[i] = np.array((head[0 : i - 1 + 1], head[i + 1 :]))
512
+ # head["e_linear"] = ("t", " image corrected of non-linearity")
513
+
514
+ # ii = np.where(head["e_gain*"] >= 0)
515
+ # if len(ii[0]) > 0:
516
+ # for i in range(len(ii[0])):
517
+ # k = ii[i]
518
+ # head = np.array((head[0 : k - 1 + 1], head[k + 1 :]))
519
+ # head["e_gain"] = (1, " image was converted to e-")
520
+
521
+ return result, head
522
+
523
+
524
+ def combine_calibrate(
525
+ files,
526
+ instrument,
527
+ arm,
528
+ mask=None,
529
+ bias=None,
530
+ bhead=None,
531
+ norm=None,
532
+ bias_scaling="exposure_time",
533
+ norm_scaling="divide",
534
+ plot=False,
535
+ plot_title=None,
536
+ **kwargs,
537
+ ):
538
+ """
539
+ Combine the input files and then calibrate the image with the bias
540
+ and normalized flat field if provided
541
+
542
+ Parameters
543
+ ----------
544
+ files : list
545
+ list of file names to load
546
+ instrument : Instrument
547
+ PyReduce instrument object with load_fits method
548
+ arm : str
549
+ descriptor of the instrument arm
550
+ mask : array
551
+ 2D Bad Pixel Mask to apply to the master image
552
+ bias : tuple(bias, bhead), optional
553
+ bias correction to apply to the combiend image, if bias has 3 dimensions
554
+ it is used as polynomial coefficients scaling with the exposure time, by default None
555
+ norm_flat : tuple(norm, blaze), optional
556
+ normalized flat to divide the combined image with after
557
+ the bias subtraction, by default None
558
+ bias_scaling : str, optional
559
+ defines how the bias is subtracted, by default "exposure_time"
560
+ plot : bool, optional
561
+ whether to plot the results, by default False
562
+ plot_title : str, optional
563
+ Name to put on the plot, by default None
564
+
565
+ Returns
566
+ -------
567
+ orig : array
568
+ combined image with calibrations applied
569
+ thead : Header
570
+ header of the combined image
571
+
572
+ Raises
573
+ ------
574
+ ValueError
575
+ Unrecognised bias_scaling option
576
+ """
577
+ # Combine the images and try to remove bad pixels
578
+ orig, thead = combine_frames(files, instrument, arm, mask=mask, **kwargs)
579
+
580
+ # Subtract bias
581
+ if bias is not None and bias_scaling is not None and bias_scaling != "none":
582
+ if bias.ndim == 2:
583
+ if bhead["exptime"] == 0 and bias_scaling == "exposure_time":
584
+ logger.warning(
585
+ "No exposure time set in bias, using number of files instead"
586
+ )
587
+ bias_scaling = "number_of_files"
588
+ if bias_scaling == "exposure_time":
589
+ orig -= bias * thead["exptime"] / bhead["exptime"]
590
+ elif bias_scaling == "number_of_files":
591
+ orig -= bias * len(files)
592
+ elif bias_scaling == "mean":
593
+ orig -= bias * np.ma.mean(orig) / np.ma.mean(bias)
594
+ elif bias_scaling == "median":
595
+ orig -= bias * np.ma.median(orig) / np.ma.median(bias)
596
+ else:
597
+ raise ValueError(
598
+ f"Unexpected value for 'bias_scaling', expected one of ['exposure_time', 'number_of_files', 'mean', 'median', 'none'], but got {bias_scaling}"
599
+ )
600
+ else:
601
+ bias.shape[0]
602
+ if bias_scaling == "exposure_time":
603
+ orig -= np.polyval(bias, thead["exptime"])
604
+ # elif bias_scaling == "number_of_files":
605
+ # flat -= bias * len(files)
606
+ # elif bias_scaling == "mean":
607
+ # flat -= bias * np.ma.mean(flat) / np.ma.mean(bias)
608
+ # elif bias_scaling == "median":
609
+ # flat -= bias * np.ma.median(flat) / np.ma.median(bias)
610
+ else:
611
+ raise ValueError(
612
+ f"Unexpected value for 'bias_scaling', expected one of ['exposure_time'], but got {bias_scaling}"
613
+ )
614
+
615
+ # Remove the Flat
616
+ if norm is not None and norm_scaling != "none":
617
+ if norm_scaling == "divide":
618
+ orig /= norm
619
+ else:
620
+ raise ValueError(
621
+ f"Unexpected value for 'norm_scaling', expected one of ['divide', 'none'], but got {norm_scaling}"
622
+ )
623
+
624
+ if plot: # pragma: no cover
625
+ title = "Master"
626
+ if plot_title is not None:
627
+ title = f"{plot_title}\n{title}"
628
+ plt.title(title)
629
+ plt.xlabel("x [pixel]")
630
+ plt.ylabel("y [pixel]")
631
+ bot, top = np.percentile(orig[orig != 0], (10, 90))
632
+ plt.imshow(orig, vmin=bot, vmax=top, origin="lower")
633
+ util.show_or_save("combine_master")
634
+
635
+ return orig, thead
636
+
637
+
638
+ def combine_polynomial(
639
+ files, instrument, arm, mask, degree=1, plot=False, plot_title=None
640
+ ):
641
+ """
642
+ Combine the input files by fitting a polynomial of the pixel value versus
643
+ the exposure time of each pixel
644
+
645
+ Parameters
646
+ ----------
647
+ files : list
648
+ list of file names
649
+ instrument : Instrument
650
+ PyReduce instrument object with load_fits method
651
+ arm : str
652
+ arm identifier for this instrument
653
+ mask : array
654
+ bad pixel mask to apply to the coefficients
655
+ degree : int, optional
656
+ polynomial degree of the fit, by default 1
657
+ plot : bool, optional
658
+ whether to plot the results, by default False
659
+ plot_title : str, optional
660
+ Title of the plot, by default None
661
+
662
+ Returns
663
+ -------
664
+ bias : array
665
+ 3d array with the coefficients for each pixel
666
+ bhead : Header
667
+ combined FITS header of the coefficients
668
+ """
669
+ hdus = [instrument.load_fits(f, arm) for f in tqdm(files)]
670
+ data = np.array([h[0] for h in hdus])
671
+ exptimes = np.array([h[1]["EXPTIME"] for h in hdus])
672
+ # Numpy polyfit can fit all polynomials at the same time
673
+ # but we need to flatten the pixels into 1 dimension
674
+ data_flat = data.reshape((len(exptimes), -1))
675
+ with warnings.catch_warnings():
676
+ warnings.simplefilter("ignore", np.exceptions.RankWarning)
677
+ coeffs = np.polyfit(exptimes, data_flat, degree)
678
+ # Afterwards we reshape the coefficients into the image shape
679
+ shape = (degree + 1, data.shape[1], data.shape[2])
680
+ coeffs = coeffs.reshape(shape)
681
+ # And apply the mask to each image of coefficients
682
+ if mask is not None:
683
+ bias = np.ma.masked_array(coeffs, mask=[mask for _ in range(degree + 1)])
684
+ # We arbitralily pick the first header as the bias header
685
+ # and change the exposure time
686
+ bhead = hdus[0][1]
687
+ bhead["EXPTIME"] = np.sum(exptimes)
688
+
689
+ if plot:
690
+ title = "Master"
691
+ if plot_title is not None:
692
+ title = f"{plot_title}\n{title}"
693
+
694
+ for i in range(degree + 1):
695
+ plt.subplot(1, degree + 1, i + 1)
696
+ plt.title("Coefficient %i" % (degree - i))
697
+ plt.xlabel("x [pixel]")
698
+ plt.ylabel("y [pixel]")
699
+ bot, top = np.percentile(bias[i], (10, 90))
700
+ plt.imshow(bias[i], vmin=bot, vmax=top, origin="lower")
701
+
702
+ plt.suptitle(title)
703
+ util.show_or_save("combine_polynomial")
704
+
705
+ return bias, bhead
706
+
707
+
708
+ def combine_bias(
709
+ files,
710
+ instrument,
711
+ arm,
712
+ extension=None,
713
+ plot=False,
714
+ plot_title=None,
715
+ science_observation_time=None,
716
+ **kwargs,
717
+ ):
718
+ """
719
+ Combine bias frames, determine read noise, reject bad pixels.
720
+ Read noise calculation only valid if both lists yield similar noise.
721
+
722
+ Parameters
723
+ ----------
724
+ files : list(str)
725
+ bias files to combine
726
+ instrument : str
727
+ instrument arm for arminfo
728
+ extension : {int, str}, optional
729
+ fits extension to use (default: 1)
730
+ xr : 2-tuple(int), optional
731
+ x range to use (default: None, i.e. whole image)
732
+ yr : 2-tuple(int), optional
733
+ y range to use (default: None, i.e. whole image)
734
+ dtype : np.dtype, optional
735
+ datatype of the combined bias frame (default: float32)
736
+ Returns
737
+ -------
738
+ bias, bhead
739
+ bias image and header
740
+ """
741
+
742
+ debug = kwargs.get("debug", False)
743
+
744
+ n = len(files)
745
+ if n == 0:
746
+ raise FileNotFoundError("No bias file(s) given")
747
+ elif n == 1:
748
+ # if there is just one element compare it with itself, not really useful, but it works
749
+ list1 = list2 = files
750
+ n = 2
751
+ else:
752
+ list1, list2 = files[: n // 2], files[n // 2 :]
753
+
754
+ # Lists of images.
755
+ n1 = len(list1)
756
+ n2 = len(list2)
757
+
758
+ # Separately images in two groups.
759
+ bias1, head1 = combine_frames(list1, instrument, arm, extension, **kwargs)
760
+ bias1 /= n1
761
+
762
+ bias2, head = combine_frames(list2, instrument, arm, extension, **kwargs)
763
+ bias2 /= n2
764
+
765
+ # Make sure we know the gain.
766
+ gain = head.get("e_gain*", (1,))[0]
767
+
768
+ # Construct normalized sum.
769
+ bias = (bias1 * n1 + bias2 * n2) / n
770
+ exptime_1 = head1.get("exptime", 0)
771
+ exptime_2 = head.get("exptime", 0)
772
+ head["exptime"] = (exptime_1 + exptime_2) / n
773
+
774
+ # Compute noise in difference image by fitting Gaussian to distribution.
775
+ diff = 0.5 * (bias1 - bias2)
776
+ if np.min(diff) != np.max(diff):
777
+ crude = np.ma.median(np.abs(diff)) # estimate of noise
778
+ hmin = -5.0 * crude
779
+ hmax = +5.0 * crude
780
+ bin_size = np.clip(2 / n, 0.5, None)
781
+ nbins = int((hmax - hmin) / bin_size)
782
+
783
+ h, _ = np.histogram(diff, range=(hmin, hmax), bins=nbins)
784
+ xh = hmin + bin_size * (np.arange(0.0, nbins) + 0.5)
785
+
786
+ hfit, par = gaussfit(xh, h)
787
+ noise = abs(par[2]) # noise in diff, bias
788
+
789
+ # Determine where wings of distribution become significantly non-Gaussian.
790
+ contam = (h - hfit) / np.sqrt(np.clip(hfit, 1, None))
791
+ imid = np.where(abs(xh) < 2 * noise)
792
+ consig = np.std(contam[imid])
793
+
794
+ smcontam = gaussbroad(xh, contam, 0.1 * noise)
795
+ igood = np.where(smcontam < 3 * consig)
796
+ gmin = np.min(xh[igood])
797
+ gmax = np.max(xh[igood])
798
+
799
+ # Find and fix bad pixels.
800
+ ibad = np.where((diff <= gmin) | (diff >= gmax))
801
+ nbad = len(ibad[0])
802
+
803
+ bias[ibad] = np.clip(bias1[ibad], None, bias2[ibad])
804
+
805
+ # Compute read noise.
806
+ biasnoise = gain * noise
807
+ bgnoise = biasnoise * np.sqrt(n)
808
+
809
+ # Print diagnostics.
810
+ logger.debug("change in bias between image sets= %f electrons", gain * par[1])
811
+ logger.debug("measured background noise per image= %f", bgnoise)
812
+ logger.debug("background noise in combined image= %f", biasnoise)
813
+ logger.debug("fixing %i bad pixels", nbad)
814
+
815
+ if debug: # pragma: no cover
816
+ # Plot noise distribution.
817
+ plt.subplot(211)
818
+ plt.plot(xh, h)
819
+ plt.plot(xh, hfit, c="r")
820
+ plt.title("noise distribution")
821
+ plt.axvline(gmin, c="b")
822
+ plt.axvline(gmax, c="b")
823
+
824
+ # Plot contamination estimation.
825
+ plt.subplot(212)
826
+ plt.plot(xh, contam)
827
+ plt.plot(xh, smcontam, c="r")
828
+ plt.axhline(3 * consig, c="b")
829
+ plt.axvline(gmin, c="b")
830
+ plt.axvline(gmax, c="b")
831
+ plt.title("contamination estimation")
832
+ util.show_or_save("bias_contamination")
833
+ else:
834
+ diff = 0
835
+ biasnoise = 1.0
836
+ nbad = 0
837
+
838
+ if plot: # pragma: no cover
839
+ title = "Master Bias"
840
+ if plot_title is not None:
841
+ title = f"{plot_title}\n{title}"
842
+ plt.title(title)
843
+ plt.xlabel("x [pixel]")
844
+ plt.ylabel("y [pixel]")
845
+ # Handle non-finite values for plotting
846
+ plot_data = np.where(np.isfinite(bias), bias, np.nan)
847
+ valid = np.isfinite(plot_data)
848
+ if np.any(valid):
849
+ bot, top = np.percentile(plot_data[valid], (1, 99))
850
+ if bot >= top:
851
+ bot, top = None, None
852
+ else:
853
+ bot, top = None, None
854
+ plt.imshow(plot_data, vmin=bot, vmax=top, origin="lower")
855
+ util.show_or_save("bias_master")
856
+
857
+ head["obslist"] = " ".join([os.path.basename(f) for f in files])
858
+ head["nimages"] = (n, "number of images summed")
859
+ head["npixfix"] = (nbad, "pixels corrected for cosmic rays")
860
+ head["bgnoise"] = (biasnoise, "noise in combined image, electrons")
861
+ return bias, head