pyreduce-astro 0.6.0b5__cp312-cp312-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (154) hide show
  1. pyreduce/__init__.py +67 -0
  2. pyreduce/__main__.py +106 -0
  3. pyreduce/clib/__init__.py +0 -0
  4. pyreduce/clib/_slitfunc_2d.cpython-311-darwin.so +0 -0
  5. pyreduce/clib/_slitfunc_2d.cpython-312-darwin.so +0 -0
  6. pyreduce/clib/_slitfunc_bd.cpython-311-darwin.so +0 -0
  7. pyreduce/clib/_slitfunc_bd.cpython-312-darwin.so +0 -0
  8. pyreduce/clib/build_extract.py +75 -0
  9. pyreduce/clib/slit_func_2d_xi_zeta_bd.c +1313 -0
  10. pyreduce/clib/slit_func_2d_xi_zeta_bd.h +55 -0
  11. pyreduce/clib/slit_func_bd.c +362 -0
  12. pyreduce/clib/slit_func_bd.h +17 -0
  13. pyreduce/clipnflip.py +147 -0
  14. pyreduce/combine_frames.py +855 -0
  15. pyreduce/configuration.py +186 -0
  16. pyreduce/continuum_normalization.py +329 -0
  17. pyreduce/cwrappers.py +404 -0
  18. pyreduce/datasets.py +231 -0
  19. pyreduce/echelle.py +413 -0
  20. pyreduce/estimate_background_scatter.py +129 -0
  21. pyreduce/extract.py +1361 -0
  22. pyreduce/extraction_width.py +77 -0
  23. pyreduce/instruments/__init__.py +0 -0
  24. pyreduce/instruments/andes.json +61 -0
  25. pyreduce/instruments/andes.py +102 -0
  26. pyreduce/instruments/common.json +46 -0
  27. pyreduce/instruments/common.py +675 -0
  28. pyreduce/instruments/crires_plus.json +63 -0
  29. pyreduce/instruments/crires_plus.py +103 -0
  30. pyreduce/instruments/filters.py +195 -0
  31. pyreduce/instruments/harpn.json +136 -0
  32. pyreduce/instruments/harpn.py +201 -0
  33. pyreduce/instruments/harps.json +155 -0
  34. pyreduce/instruments/harps.py +310 -0
  35. pyreduce/instruments/instrument_info.py +140 -0
  36. pyreduce/instruments/instrument_schema.json +221 -0
  37. pyreduce/instruments/jwst_miri.json +53 -0
  38. pyreduce/instruments/jwst_miri.py +29 -0
  39. pyreduce/instruments/jwst_niriss.json +52 -0
  40. pyreduce/instruments/jwst_niriss.py +98 -0
  41. pyreduce/instruments/lick_apf.json +53 -0
  42. pyreduce/instruments/lick_apf.py +35 -0
  43. pyreduce/instruments/mcdonald.json +59 -0
  44. pyreduce/instruments/mcdonald.py +123 -0
  45. pyreduce/instruments/metis_ifu.json +63 -0
  46. pyreduce/instruments/metis_ifu.py +45 -0
  47. pyreduce/instruments/metis_lss.json +65 -0
  48. pyreduce/instruments/metis_lss.py +45 -0
  49. pyreduce/instruments/micado.json +53 -0
  50. pyreduce/instruments/micado.py +45 -0
  51. pyreduce/instruments/neid.json +51 -0
  52. pyreduce/instruments/neid.py +154 -0
  53. pyreduce/instruments/nirspec.json +56 -0
  54. pyreduce/instruments/nirspec.py +215 -0
  55. pyreduce/instruments/nte.json +47 -0
  56. pyreduce/instruments/nte.py +42 -0
  57. pyreduce/instruments/uves.json +59 -0
  58. pyreduce/instruments/uves.py +46 -0
  59. pyreduce/instruments/xshooter.json +66 -0
  60. pyreduce/instruments/xshooter.py +39 -0
  61. pyreduce/make_shear.py +606 -0
  62. pyreduce/masks/mask_crires_plus_det1.fits.gz +0 -0
  63. pyreduce/masks/mask_crires_plus_det2.fits.gz +0 -0
  64. pyreduce/masks/mask_crires_plus_det3.fits.gz +0 -0
  65. pyreduce/masks/mask_ctio_chiron.fits.gz +0 -0
  66. pyreduce/masks/mask_elodie.fits.gz +0 -0
  67. pyreduce/masks/mask_feros3.fits.gz +0 -0
  68. pyreduce/masks/mask_flames_giraffe.fits.gz +0 -0
  69. pyreduce/masks/mask_harps_blue.fits.gz +0 -0
  70. pyreduce/masks/mask_harps_red.fits.gz +0 -0
  71. pyreduce/masks/mask_hds_blue.fits.gz +0 -0
  72. pyreduce/masks/mask_hds_red.fits.gz +0 -0
  73. pyreduce/masks/mask_het_hrs_2x5.fits.gz +0 -0
  74. pyreduce/masks/mask_jwst_miri_lrs_slitless.fits.gz +0 -0
  75. pyreduce/masks/mask_jwst_niriss_gr700xd.fits.gz +0 -0
  76. pyreduce/masks/mask_lick_apf_.fits.gz +0 -0
  77. pyreduce/masks/mask_mcdonald.fits.gz +0 -0
  78. pyreduce/masks/mask_nes.fits.gz +0 -0
  79. pyreduce/masks/mask_nirspec_nirspec.fits.gz +0 -0
  80. pyreduce/masks/mask_sarg.fits.gz +0 -0
  81. pyreduce/masks/mask_sarg_2x2a.fits.gz +0 -0
  82. pyreduce/masks/mask_sarg_2x2b.fits.gz +0 -0
  83. pyreduce/masks/mask_subaru_hds_red.fits.gz +0 -0
  84. pyreduce/masks/mask_uves_blue.fits.gz +0 -0
  85. pyreduce/masks/mask_uves_blue_binned_2_2.fits.gz +0 -0
  86. pyreduce/masks/mask_uves_middle.fits.gz +0 -0
  87. pyreduce/masks/mask_uves_middle_2x2_split.fits.gz +0 -0
  88. pyreduce/masks/mask_uves_middle_binned_2_2.fits.gz +0 -0
  89. pyreduce/masks/mask_uves_red.fits.gz +0 -0
  90. pyreduce/masks/mask_uves_red_2x2.fits.gz +0 -0
  91. pyreduce/masks/mask_uves_red_2x2_split.fits.gz +0 -0
  92. pyreduce/masks/mask_uves_red_binned_2_2.fits.gz +0 -0
  93. pyreduce/masks/mask_xshooter_nir.fits.gz +0 -0
  94. pyreduce/rectify.py +138 -0
  95. pyreduce/reduce.py +2205 -0
  96. pyreduce/settings/settings_ANDES.json +89 -0
  97. pyreduce/settings/settings_CRIRES_PLUS.json +89 -0
  98. pyreduce/settings/settings_HARPN.json +73 -0
  99. pyreduce/settings/settings_HARPS.json +69 -0
  100. pyreduce/settings/settings_JWST_MIRI.json +55 -0
  101. pyreduce/settings/settings_JWST_NIRISS.json +55 -0
  102. pyreduce/settings/settings_LICK_APF.json +62 -0
  103. pyreduce/settings/settings_MCDONALD.json +58 -0
  104. pyreduce/settings/settings_METIS_IFU.json +77 -0
  105. pyreduce/settings/settings_METIS_LSS.json +77 -0
  106. pyreduce/settings/settings_MICADO.json +78 -0
  107. pyreduce/settings/settings_NEID.json +73 -0
  108. pyreduce/settings/settings_NIRSPEC.json +58 -0
  109. pyreduce/settings/settings_NTE.json +60 -0
  110. pyreduce/settings/settings_UVES.json +54 -0
  111. pyreduce/settings/settings_XSHOOTER.json +78 -0
  112. pyreduce/settings/settings_pyreduce.json +178 -0
  113. pyreduce/settings/settings_schema.json +827 -0
  114. pyreduce/tools/__init__.py +0 -0
  115. pyreduce/tools/combine.py +117 -0
  116. pyreduce/trace_orders.py +645 -0
  117. pyreduce/util.py +1288 -0
  118. pyreduce/wavecal/MICADO_HK_3arcsec_chip5.npz +0 -0
  119. pyreduce/wavecal/atlas/thar.fits +4946 -13
  120. pyreduce/wavecal/atlas/thar_list.txt +4172 -0
  121. pyreduce/wavecal/atlas/une.fits +0 -0
  122. pyreduce/wavecal/convert.py +38 -0
  123. pyreduce/wavecal/crires_plus_J1228_Open_det1.npz +0 -0
  124. pyreduce/wavecal/crires_plus_J1228_Open_det2.npz +0 -0
  125. pyreduce/wavecal/crires_plus_J1228_Open_det3.npz +0 -0
  126. pyreduce/wavecal/harpn_harpn_2D.npz +0 -0
  127. pyreduce/wavecal/harps_blue_2D.npz +0 -0
  128. pyreduce/wavecal/harps_blue_pol_2D.npz +0 -0
  129. pyreduce/wavecal/harps_red_2D.npz +0 -0
  130. pyreduce/wavecal/harps_red_pol_2D.npz +0 -0
  131. pyreduce/wavecal/mcdonald.npz +0 -0
  132. pyreduce/wavecal/metis_lss_l_2D.npz +0 -0
  133. pyreduce/wavecal/metis_lss_m_2D.npz +0 -0
  134. pyreduce/wavecal/nirspec_K2.npz +0 -0
  135. pyreduce/wavecal/uves_blue_360nm_2D.npz +0 -0
  136. pyreduce/wavecal/uves_blue_390nm_2D.npz +0 -0
  137. pyreduce/wavecal/uves_blue_437nm_2D.npz +0 -0
  138. pyreduce/wavecal/uves_middle_2x2_2D.npz +0 -0
  139. pyreduce/wavecal/uves_middle_565nm_2D.npz +0 -0
  140. pyreduce/wavecal/uves_middle_580nm_2D.npz +0 -0
  141. pyreduce/wavecal/uves_middle_600nm_2D.npz +0 -0
  142. pyreduce/wavecal/uves_middle_665nm_2D.npz +0 -0
  143. pyreduce/wavecal/uves_middle_860nm_2D.npz +0 -0
  144. pyreduce/wavecal/uves_red_580nm_2D.npz +0 -0
  145. pyreduce/wavecal/uves_red_600nm_2D.npz +0 -0
  146. pyreduce/wavecal/uves_red_665nm_2D.npz +0 -0
  147. pyreduce/wavecal/uves_red_760nm_2D.npz +0 -0
  148. pyreduce/wavecal/uves_red_860nm_2D.npz +0 -0
  149. pyreduce/wavecal/xshooter_nir.npz +0 -0
  150. pyreduce/wavelength_calibration.py +1873 -0
  151. pyreduce_astro-0.6.0b5.dist-info/METADATA +113 -0
  152. pyreduce_astro-0.6.0b5.dist-info/RECORD +154 -0
  153. pyreduce_astro-0.6.0b5.dist-info/WHEEL +6 -0
  154. pyreduce_astro-0.6.0b5.dist-info/licenses/LICENSE +674 -0
@@ -0,0 +1,855 @@
1
+ """
2
+ Combine several fits files into one master frame
3
+
4
+ Used to create master bias and master flat
5
+ """
6
+
7
+ import datetime
8
+ import logging
9
+ import os
10
+
11
+ import astropy.io.fits as fits
12
+ import matplotlib.pyplot as plt
13
+ import numpy as np
14
+ from scipy.ndimage.filters import median_filter
15
+ from tqdm import tqdm
16
+
17
+ from .clipnflip import clipnflip
18
+ from .instruments.instrument_info import load_instrument
19
+ from .util import gaussbroad, gaussfit
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ def running_median(arr, size):
25
+ """Calculate the running median of a 2D sequence
26
+
27
+ Parameters
28
+ ----------
29
+ seq : 2d array [n, l]
30
+ n datasets of length l
31
+ size : int
32
+ number of elements to consider for each median
33
+ Returns
34
+ -------
35
+ 2d array [n, l-size]
36
+ running median
37
+ """
38
+
39
+ ret = np.array([median_filter(s, size=size, mode="constant") for s in arr])
40
+ m = size // 2
41
+ return ret[:, m:-m]
42
+
43
+
44
+ def running_sum(arr, size):
45
+ """Calculate the running sum over the 2D sequence
46
+
47
+ Parameters
48
+ ----------
49
+ arr : array[n, l]
50
+ sequence to calculate running sum over, n datasets of length l
51
+ size : int
52
+ number of elements to sum
53
+ Returns
54
+ -------
55
+ 2D array
56
+ running sum
57
+ """
58
+
59
+ ret = np.cumsum(arr, axis=1)
60
+ ret[:, size:] -= ret[:, :-size]
61
+ return ret[:, size - 1 :]
62
+
63
+
64
+ def calculate_probability(buffer, window, method="sum"):
65
+ """
66
+ Construct a probability function based on buffer data.
67
+
68
+ Parameters
69
+ ----------
70
+ buffer : array of shape (nx, ny)
71
+ buffer
72
+ window : int
73
+ size of the running window
74
+ method : {"sum", "median"}, optional
75
+ which method to use to average the probabilities (default: "sum")
76
+ "sum" is much faster, but "median" is more resistant to outliers
77
+
78
+ Returns
79
+ -------
80
+ weights : array of shape (nx, ny - 2 * window)
81
+ probabilities
82
+ """
83
+
84
+ buffer = np.require(buffer, dtype=float)
85
+
86
+ # Take the median/sum for each file
87
+ if method == "median":
88
+ # Running median is slow
89
+ weights = running_median(buffer, 2 * window + 1)
90
+ sum_of_weights = np.mean(weights, axis=0)
91
+ if method == "sum":
92
+ # Running sum is fast
93
+ weights = running_sum(buffer, 2 * window + 1)
94
+ sum_of_weights = np.sum(weights, axis=0)
95
+
96
+ # norm probability
97
+ np.divide(weights, sum_of_weights, where=sum_of_weights > 0, out=weights)
98
+ return weights
99
+
100
+
101
+ def fix_bad_pixels(probability, buffer, readnoise, gain, threshold):
102
+ """
103
+ find and fix bad pixels
104
+
105
+ Parameters
106
+ ----------
107
+ probability : array(float)
108
+ probabilities
109
+ buffer : array(int)
110
+ image buffer
111
+ readnoise : float
112
+ readnoise of current amplifier
113
+ gain : float
114
+ gain of current amplifier
115
+ threshold : float
116
+ sigma threshold between observation and fit for bad pixels
117
+
118
+ Returns
119
+ -------
120
+ array(int)
121
+ input buffer, with bad pixels fixed
122
+ """
123
+ # Fit signal
124
+ ratio = np.zeros_like(probability)
125
+ np.divide(buffer, probability, where=probability > 0, out=ratio)
126
+ # ratio = np.where(probability > 0, buffer / probability, 0.)
127
+ amplitude = (
128
+ np.sum(ratio, axis=0) - np.min(ratio, axis=0) - np.max(ratio, axis=0)
129
+ ) / (buffer.shape[0] - 2)
130
+
131
+ fitted_signal = np.where(probability > 0, amplitude[None, :] * probability, 0)
132
+ predicted_noise = np.zeros_like(fitted_signal)
133
+ tmp = readnoise**2 + (fitted_signal / gain)
134
+ np.sqrt(tmp, where=tmp >= 0, out=predicted_noise)
135
+
136
+ # Identify outliers
137
+ badpixels = buffer - fitted_signal > threshold * predicted_noise
138
+ nbad = len(np.nonzero(badpixels.flat)[0])
139
+
140
+ # Construct the summed flat
141
+ corrected_signal = np.where(badpixels, fitted_signal, buffer)
142
+ corrected_signal = np.sum(corrected_signal, axis=0)
143
+ return corrected_signal, nbad
144
+
145
+
146
+ def combine_frames_simple(
147
+ files, instrument, mode, extension=None, dtype=np.float32, **kwargs
148
+ ):
149
+ """
150
+ Simple addition of similar images.
151
+
152
+ Parameters
153
+ ----------
154
+ files : list(str)
155
+ list of fits files to combine
156
+ instrument : str
157
+ instrument id for modinfo
158
+ mode : str
159
+ instrument mode
160
+ extension : int, optional
161
+ fits extension to load (default: 1)
162
+ dtype : np.dtype, optional
163
+ datatype of the combined image (default float32)
164
+
165
+ Returns
166
+ -------
167
+ combined_data, header
168
+ combined image data, header
169
+ """
170
+
171
+ if len(files) == 0:
172
+ raise ValueError("No files given for combine frames")
173
+
174
+ # Load the first file to get the shape and header
175
+ result, head = instrument.load_fits(
176
+ files[0], mode, dtype=dtype, extension=extension, **kwargs
177
+ )
178
+
179
+ # Sum the remaining files
180
+ for fname in files[1:]:
181
+ data, _ = instrument.load_fits(
182
+ fname, mode, dtype=dtype, extension=extension, **kwargs
183
+ )
184
+ result += data
185
+
186
+ # Update the header
187
+ head["NIMAGES"] = (len(files), "number of images summed")
188
+ head["EXPTIME"] = (head["EXPTIME"] * len(files), "total exposure time")
189
+ head["DARKTIME"] = (
190
+ head.get("DARKTIME", head["EXPTIME"]) * len(files),
191
+ "total dark time",
192
+ )
193
+
194
+ # Update the readout noise
195
+ if "RDNOISE" in head:
196
+ head["RDNOISE"] = (
197
+ head["RDNOISE"] * np.sqrt(len(files)),
198
+ "readout noise in combined image",
199
+ )
200
+
201
+ head.add_history(f"Combined {len(files)} images by simple addition")
202
+
203
+ return result, head
204
+
205
+
206
+ def combine_frames(
207
+ files,
208
+ instrument,
209
+ mode,
210
+ extension=None,
211
+ threshold=3.5,
212
+ window=50,
213
+ dtype=np.float32,
214
+ **kwargs,
215
+ ):
216
+ """
217
+ Subroutine to correct cosmic rays blemishes, while adding otherwise
218
+ similar images.
219
+
220
+ combine_frames co-adds a group of FITS files with 2D images of identical dimensions.
221
+ In the process it rejects cosmic ray, detector defects etc. It is capable of
222
+ handling images that have strip pattern (e.g. echelle spectra) using the REDUCE
223
+ modinfo conventions to figure out image orientation and useful pixel ranges.
224
+ It can handle many frames. Special cases: 1 file in the list (the input is returned as output)
225
+ and 2 files (straight sum is returned).
226
+
227
+ If the image orientation is not predominantly vertical, the image is rotated 90 degrees (and rotated back afterwards).
228
+
229
+ Open all FITS files in the list.
230
+ Loop through the rows. Read next row from each file into a row buffer mBuff[nCol, nFil].
231
+ Optionally correct the data for non-linearity.
232
+
233
+ calc_probability::
234
+
235
+ Go through the row creating "probability" vector. That is for column iCol take the median of
236
+ the part of the row mBuff[iCol-win:iCol+win,iFil] for each file and divide these medians by the
237
+ mean of them computed across the stack of files. In other words:
238
+ >>> filwt[iFil] = median(mBuff[iCol-win:iCol+win,iFil])
239
+ >>> norm_filwt = mean(filwt)
240
+ >>> prob[iCol,iFil] = (norm_filtwt>0)?filwt[iCol]/norm_filwt:filwt[iCol]
241
+
242
+ This is done for all iCol in the range of [win:nCol-win-1]. It is then linearly extrapolated to
243
+ the win zones of both ends. E.g. for iCol in [0:win-1] range:
244
+ >>> prob[iCol,iFil]=2*prob[win,iFil]-prob[2*win-iCol,iFil]
245
+
246
+ For the other end ([nCol-win:nCol-1]) it is similar:
247
+ >>> prob[iCol,iFil]=2*prob[nCol-win-1,iFil]-prob[2*(nCol-win-1)-iCol,iFil]
248
+
249
+ fix_bad_pixels::
250
+
251
+ Once the probailities are constructed we can do the fitting, measure scatter and detect outliers.
252
+ We ignore negative or zero probabilities as it should not happen. For each iCol with (some)
253
+ positive probabilities we compute tha ratios of the original data to the probabilities and get
254
+ the mean amplitude of these ratios after rejecting extreme values:
255
+ >>> ratio = mBuff[iCol,iFil]/prob[iCol,iFil]
256
+ >>> amp = (total(ratio)-min(ratio)-max(ratio))/(nFil-2)
257
+ >>> mFit[iCol,iFil] = amp*prob[iCol,iFil]
258
+
259
+ Note that for iFil whereprob[iCol,iFil] is zero we simply set mFit to zero. The scatter (noise)
260
+ consists readout noise and shot noise of the model (fit) co-added in quadratures:
261
+ >>> sig=sqrt(rdnoise*rdnoise + abs(mFit[iCol,iFil]/gain))
262
+
263
+ and the outliers are defined as:
264
+ >>> iBad=where(mBuff-mFit gt thresh*sig)
265
+
266
+ >>> Bad values are replaced from the fit:
267
+ >>> mBuff[iBad]=mFit[iBad]
268
+
269
+ and mBuff is summed across the file dimension to create an output row.
270
+
271
+ Parameters
272
+ ----------
273
+ files : list(str)
274
+ list of fits files to combine
275
+ instrument : str
276
+ instrument id for modinfo
277
+ mode : str
278
+ instrument mode
279
+ extension : int, optional
280
+ fits extension to load (default: 1)
281
+ threshold : float, optional
282
+ threshold for bad pixels (default: 3.5)
283
+ window : int, optional
284
+ horizontal window size (default: 50)
285
+ mask : array(bool), optional
286
+ mask for the fits image (default: None)
287
+ xr : int, optional
288
+ xrange (default: None)
289
+ yr : int, optional
290
+ yrange (default: None)
291
+ debug : bool, optional
292
+ show debug plot of noise distribution (default: False)
293
+ dtype : np.dtype, optional
294
+ datatype of the combined image (default float32)
295
+
296
+ Returns
297
+ -------
298
+ combined_data, header
299
+ combined image data, header
300
+ """
301
+
302
+ DEBUG_NROWS = 100 # print status update every DEBUG_NROWS rows (if debug is True)
303
+ if instrument is None or isinstance(instrument, str):
304
+ instrument = load_instrument(instrument)
305
+
306
+ # summarize file info
307
+ logger.debug("Files:")
308
+ for i, fname in zip(range(len(files)), files, strict=False):
309
+ logger.debug("%i\t%s", i, fname)
310
+
311
+ # Only one image
312
+ if len(files) == 0:
313
+ raise ValueError("No files given for combine frames")
314
+ elif len(files) == 1:
315
+ result, head = instrument.load_fits(
316
+ files[0], mode, dtype=dtype, extension=extension, **kwargs
317
+ )
318
+ readnoise = np.atleast_1d(head.get("e_readn", 0))
319
+ total_exposure_time = head.get("exptime", 0)
320
+ n_fixed = 0
321
+ linear = head.get("e_linear", True)
322
+
323
+ # Two images
324
+ elif len(files) == 2:
325
+ bias1, head1 = instrument.load_fits(
326
+ files[0], mode, dtype=dtype, extension=extension, **kwargs
327
+ )
328
+ exp1 = head1.get("exptime", 0)
329
+
330
+ bias2, head2 = instrument.load_fits(
331
+ files[1], mode, dtype=dtype, extension=extension, **kwargs
332
+ )
333
+ exp2 = head2.get("exptime", 0)
334
+ readnoise = head2.get("e_readn", 0)
335
+
336
+ result = bias2 + bias1
337
+ head = head2
338
+
339
+ total_exposure_time = exp1 + exp2
340
+ readnoise = np.atleast_1d(readnoise)
341
+ n_fixed = 0
342
+ linear = head.get("e_linear", True)
343
+
344
+ else: # More than two images
345
+ # Get information from headers
346
+ # TODO: check if all values are the same in all the headers?
347
+
348
+ heads = [
349
+ instrument.load_fits(
350
+ f, mode, header_only=True, dtype=dtype, extension=extension, **kwargs
351
+ )
352
+ for f in files
353
+ ]
354
+ head = heads[0]
355
+
356
+ # if sizes vary, it will show during loading of the data
357
+ n_columns = head["naxis1"]
358
+ n_rows = head["naxis2"]
359
+
360
+ # check if we deal with multiple amplifiers
361
+ n_amplifier = head.get("e_ampl", 1)
362
+ # check orientation of the image
363
+ # orient 0, 2, 5, 7: orders are horizontal
364
+ # orient 1, 3, 4, 6: orders are vertical
365
+ orientation = head.get("e_orient", 0)
366
+ head.get("e_transpose", False)
367
+ orientation = orientation % 8
368
+ # check if non-linearity correction
369
+ linear = head.get("e_linear", True)
370
+
371
+ # section(s) of the detector to process, x_low, x_high, y_low, y_high
372
+ # head["e_xlo*"] will find all entries with * as a wildcard
373
+ # we also ensure that we will have one dimensional arrays (not just the value)
374
+ cards = sorted(head["e_xlo*"].cards, key=lambda c: c[0])
375
+ x_low = [c[1] for c in cards]
376
+ cards = sorted(head["e_xhi*"].cards, key=lambda c: c[0])
377
+ x_high = [c[1] for c in cards]
378
+ cards = sorted(head["e_ylo*"].cards, key=lambda c: c[0])
379
+ y_low = [c[1] for c in cards]
380
+ cards = sorted(head["e_yhi*"].cards, key=lambda c: c[0])
381
+ y_high = [c[1] for c in cards]
382
+
383
+ cards = sorted(head["e_gain*"].cards, key=lambda c: c[0])
384
+ gain = [c[1] for c in cards]
385
+ cards = sorted(head["e_readn*"].cards, key=lambda c: c[0])
386
+ readnoise = [c[1] for c in cards]
387
+ total_exposure_time = sum(h.get("exptime", 0) for h in heads)
388
+
389
+ # Scaling for image data
390
+ bscale = [h.get("bscale", 1) for h in heads]
391
+ bzero = [h.get("bzero", 0) for h in heads]
392
+
393
+ result = np.zeros((n_rows, n_columns), dtype=dtype) # the combined image
394
+ n_fixed = 0 # number of fixed pixels
395
+
396
+ # Load all image hdus, but leave the data on the disk, using memmap
397
+ # Need to scale data later
398
+ if extension is None:
399
+ extension = [instrument.get_extension(h, mode) for h in heads]
400
+ else:
401
+ extension = [extension] * len(heads)
402
+
403
+ data = [
404
+ fits.open(f, memmap=True, do_not_scale_image_data=True)[e]
405
+ for f, e in zip(files, extension, strict=False)
406
+ ]
407
+
408
+ if window >= n_columns / 2:
409
+ window = n_columns // 10
410
+ logger.warning("Reduce Window size to fit the image")
411
+
412
+ # depending on the orientation the indexing changes and the borders of the image change
413
+ if orientation in [1, 3, 4, 6]:
414
+ # idx gives the index for accessing the data in the image, which is rotated depending on the orientation
415
+ # We could just rotate the whole image, but that requires reading the whole image at once
416
+ def index(row, x_left, x_right):
417
+ return (slice(x_left, x_right), row)
418
+
419
+ # Exchange the borders of the image
420
+ x_low, x_high, y_low, y_high = y_low, y_high, x_low, x_high
421
+ else:
422
+
423
+ def index(row, x_left, x_right):
424
+ return (row, slice(x_left, x_right))
425
+
426
+ # For several amplifiers, different sections of the image are set
427
+ # One for each amplifier, each amplifier is treated seperately
428
+ for amplifier in range(n_amplifier):
429
+ # Pick data for current amplifier
430
+ x_left = x_low[amplifier]
431
+ x_right = x_high[amplifier]
432
+ y_bottom = y_low[amplifier]
433
+ y_top = y_high[amplifier]
434
+
435
+ gain_amp = gain[amplifier]
436
+ readnoise_amp = readnoise[amplifier]
437
+
438
+ # Prepare temporary arrays
439
+ buffer = np.zeros((len(files), x_right - x_left))
440
+ probability = np.zeros((len(files), x_right - x_left))
441
+
442
+ # for each row
443
+ for row in tqdm(range(y_bottom, y_top), desc="Rows"):
444
+ if (row) % DEBUG_NROWS == 0:
445
+ logger.debug(
446
+ "%i rows processed - %i pixels fixed so far", row, n_fixed
447
+ )
448
+
449
+ # load current row
450
+ idx = index(row, x_left, x_right)
451
+ for i in range(len(files)):
452
+ # If the following causes int16 overflow, add .astype('float64')
453
+ # to the first term. The receiving buffer is f64 anyway.
454
+ buffer[i, :] = (
455
+ data[i].data[idx].astype("float64") * bscale[i] + bzero[i]
456
+ )
457
+
458
+ # Calculate probabilities
459
+ probability[:, window:-window] = calculate_probability(buffer, window)
460
+
461
+ # extrapolate to edges
462
+ probability[:, :window] = (
463
+ 2 * probability[:, window][:, None]
464
+ - probability[:, 2 * window : window : -1]
465
+ )
466
+ probability[:, -window:] = (
467
+ 2 * probability[:, -window - 1][:, None]
468
+ - probability[:, -window - 1 : -2 * window - 1 : -1]
469
+ )
470
+
471
+ # fix bad pixels
472
+ result[idx], n_bad = fix_bad_pixels(
473
+ probability, buffer, readnoise_amp, gain_amp, threshold
474
+ )
475
+ n_fixed += n_bad
476
+
477
+ logger.debug("total cosmic ray hits identified and removed: %i", n_fixed)
478
+
479
+ result = clipnflip(result, head)
480
+ result = np.ma.masked_array(result, mask=kwargs.get("mask"))
481
+
482
+ for d in data:
483
+ d._file.close() # Close open FITS files
484
+
485
+ # Add info to header.
486
+ head["bzero"] = 0.0
487
+ head["bscale"] = 1.0
488
+ head["exptime"] = total_exposure_time
489
+ head["darktime"] = total_exposure_time
490
+ # Because we do not divide the signal by the number of files the
491
+ # read-out noise goes up by the square root of the number of files
492
+
493
+ for n_amp, rdn in enumerate(readnoise):
494
+ head[f"rdnoise{n_amp + 1:0>1}"] = (
495
+ rdn * np.sqrt(len(files)),
496
+ " noise in combined image, electrons",
497
+ )
498
+
499
+ head["nimages"] = (len(files), " number of images summed")
500
+ head["npixfix"] = (n_fixed, " pixels corrected for cosmic rays")
501
+ head.add_history(
502
+ f"images coadded by combine_frames.py on {datetime.datetime.now()}"
503
+ )
504
+
505
+ if not linear: # pragma: no cover
506
+ # non-linearity was fixed. mark this in the header
507
+ raise NotImplementedError() # TODO Nonlinear
508
+ # i = np.where(head["e_linear"] >= 0)
509
+ # head[i] = np.array((head[0 : i - 1 + 1], head[i + 1 :]))
510
+ # head["e_linear"] = ("t", " image corrected of non-linearity")
511
+
512
+ # ii = np.where(head["e_gain*"] >= 0)
513
+ # if len(ii[0]) > 0:
514
+ # for i in range(len(ii[0])):
515
+ # k = ii[i]
516
+ # head = np.array((head[0 : k - 1 + 1], head[k + 1 :]))
517
+ # head["e_gain"] = (1, " image was converted to e-")
518
+
519
+ return result, head
520
+
521
+
522
+ def combine_calibrate(
523
+ files,
524
+ instrument,
525
+ mode,
526
+ mask=None,
527
+ bias=None,
528
+ bhead=None,
529
+ norm=None,
530
+ bias_scaling="exposure_time",
531
+ norm_scaling="divide",
532
+ plot=False,
533
+ plot_title=None,
534
+ **kwargs,
535
+ ):
536
+ """
537
+ Combine the input files and then calibrate the image with the bias
538
+ and normalized flat field if provided
539
+
540
+ Parameters
541
+ ----------
542
+ files : list
543
+ list of file names to load
544
+ instrument : Instrument
545
+ PyReduce instrument object with load_fits method
546
+ mode : str
547
+ descriptor of the instrument mode
548
+ mask : array
549
+ 2D Bad Pixel Mask to apply to the master image
550
+ bias : tuple(bias, bhead), optional
551
+ bias correction to apply to the combiend image, if bias has 3 dimensions
552
+ it is used as polynomial coefficients scaling with the exposure time, by default None
553
+ norm_flat : tuple(norm, blaze), optional
554
+ normalized flat to divide the combined image with after
555
+ the bias subtraction, by default None
556
+ bias_scaling : str, optional
557
+ defines how the bias is subtracted, by default "exposure_time"
558
+ plot : bool, optional
559
+ whether to plot the results, by default False
560
+ plot_title : str, optional
561
+ Name to put on the plot, by default None
562
+
563
+ Returns
564
+ -------
565
+ orig : array
566
+ combined image with calibrations applied
567
+ thead : Header
568
+ header of the combined image
569
+
570
+ Raises
571
+ ------
572
+ ValueError
573
+ Unrecognised bias_scaling option
574
+ """
575
+ # Combine the images and try to remove bad pixels
576
+ orig, thead = combine_frames(files, instrument, mode, mask=mask, **kwargs)
577
+
578
+ # Subtract bias
579
+ if bias is not None and bias_scaling is not None and bias_scaling != "none":
580
+ if bias.ndim == 2:
581
+ if bhead["exptime"] == 0 and bias_scaling == "exposure_time":
582
+ logger.warning(
583
+ "No exposure time set in bias, using number of files instead"
584
+ )
585
+ bias_scaling = "number_of_files"
586
+ if bias_scaling == "exposure_time":
587
+ orig -= bias * thead["exptime"] / bhead["exptime"]
588
+ elif bias_scaling == "number_of_files":
589
+ orig -= bias * len(files)
590
+ elif bias_scaling == "mean":
591
+ orig -= bias * np.ma.mean(orig) / np.ma.mean(bias)
592
+ elif bias_scaling == "median":
593
+ orig -= bias * np.ma.median(orig) / np.ma.median(bias)
594
+ else:
595
+ raise ValueError(
596
+ f"Unexpected value for 'bias_scaling', expected one of ['exposure_time', 'number_of_files', 'mean', 'median', 'none'], but got {bias_scaling}"
597
+ )
598
+ else:
599
+ bias.shape[0]
600
+ if bias_scaling == "exposure_time":
601
+ orig -= np.polyval(bias, thead["exptime"])
602
+ # elif bias_scaling == "number_of_files":
603
+ # flat -= bias * len(files)
604
+ # elif bias_scaling == "mean":
605
+ # flat -= bias * np.ma.mean(flat) / np.ma.mean(bias)
606
+ # elif bias_scaling == "median":
607
+ # flat -= bias * np.ma.median(flat) / np.ma.median(bias)
608
+ else:
609
+ raise ValueError(
610
+ f"Unexpected value for 'bias_scaling', expected one of ['exposure_time'], but got {bias_scaling}"
611
+ )
612
+
613
+ # Remove the Flat
614
+ if norm is not None and norm_scaling != "none":
615
+ if norm_scaling == "divide":
616
+ orig /= norm
617
+ else:
618
+ raise ValueError(
619
+ f"Unexpected value for 'norm_scaling', expected one of ['divide', 'none'], but got {norm_scaling}"
620
+ )
621
+
622
+ if plot: # pragma: no cover
623
+ title = "Master"
624
+ if plot_title is not None:
625
+ title = f"{plot_title}\n{title}"
626
+ plt.title(title)
627
+ plt.xlabel("x [pixel]")
628
+ plt.ylabel("y [pixel]")
629
+ bot, top = np.percentile(orig[orig != 0], (10, 90))
630
+ plt.imshow(orig, vmin=bot, vmax=top, origin="lower")
631
+ if plot != "png":
632
+ plt.show()
633
+ else:
634
+ plt.savefig("crires_master_flat.png")
635
+
636
+ return orig, thead
637
+
638
+
639
+ def combine_polynomial(
640
+ files, instrument, mode, mask, degree=1, plot=False, plot_title=None
641
+ ):
642
+ """
643
+ Combine the input files by fitting a polynomial of the pixel value versus
644
+ the exposure time of each pixel
645
+
646
+ Parameters
647
+ ----------
648
+ files : list
649
+ list of file names
650
+ instrument : Instrument
651
+ PyReduce instrument object with load_fits method
652
+ mode : str
653
+ mode identifier for this instrument
654
+ mask : array
655
+ bad pixel mask to apply to the coefficients
656
+ degree : int, optional
657
+ polynomial degree of the fit, by default 1
658
+ plot : bool, optional
659
+ whether to plot the results, by default False
660
+ plot_title : str, optional
661
+ Title of the plot, by default None
662
+
663
+ Returns
664
+ -------
665
+ bias : array
666
+ 3d array with the coefficients for each pixel
667
+ bhead : Header
668
+ combined FITS header of the coefficients
669
+ """
670
+ hdus = [instrument.load_fits(f, mode) for f in tqdm(files)]
671
+ data = np.array([h[0] for h in hdus])
672
+ exptimes = np.array([h[1]["EXPTIME"] for h in hdus])
673
+ # Numpy polyfit can fit all polynomials at the same time
674
+ # but we need to flatten the pixels into 1 dimension
675
+ data_flat = data.reshape((len(exptimes), -1))
676
+ coeffs = np.polyfit(exptimes, data_flat, degree)
677
+ # Afterwards we reshape the coefficients into the image shape
678
+ shape = (degree + 1, data.shape[1], data.shape[2])
679
+ coeffs = coeffs.reshape(shape)
680
+ # And apply the mask to each image of coefficients
681
+ if mask is not None:
682
+ bias = np.ma.masked_array(coeffs, mask=[mask for _ in range(degree + 1)])
683
+ # We arbitralily pick the first header as the bias header
684
+ # and change the exposure time
685
+ bhead = hdus[0][1]
686
+ bhead["EXPTIME"] = np.sum(exptimes)
687
+
688
+ if plot:
689
+ title = "Master"
690
+ if plot_title is not None:
691
+ title = f"{plot_title}\n{title}"
692
+
693
+ for i in range(degree + 1):
694
+ plt.subplot(1, degree + 1, i + 1)
695
+ plt.title("Coefficient %i" % (degree - i))
696
+ plt.xlabel("x [pixel]")
697
+ plt.ylabel("y [pixel]")
698
+ bot, top = np.percentile(bias[i], (10, 90))
699
+ plt.imshow(bias[i], vmin=bot, vmax=top, origin="lower")
700
+
701
+ plt.suptitle(title)
702
+ if plot != "png":
703
+ plt.show()
704
+ else:
705
+ plt.savefig("master_bias.png")
706
+
707
+ return bias, bhead
708
+
709
+
710
+ def combine_bias(
711
+ files,
712
+ instrument,
713
+ mode,
714
+ extension=None,
715
+ plot=False,
716
+ plot_title=None,
717
+ science_observation_time=None,
718
+ **kwargs,
719
+ ):
720
+ """
721
+ Combine bias frames, determine read noise, reject bad pixels.
722
+ Read noise calculation only valid if both lists yield similar noise.
723
+
724
+ Parameters
725
+ ----------
726
+ files : list(str)
727
+ bias files to combine
728
+ instrument : str
729
+ instrument mode for modinfo
730
+ extension : {int, str}, optional
731
+ fits extension to use (default: 1)
732
+ xr : 2-tuple(int), optional
733
+ x range to use (default: None, i.e. whole image)
734
+ yr : 2-tuple(int), optional
735
+ y range to use (default: None, i.e. whole image)
736
+ dtype : np.dtype, optional
737
+ datatype of the combined bias frame (default: float32)
738
+ Returns
739
+ -------
740
+ bias, bhead
741
+ bias image and header
742
+ """
743
+
744
+ debug = kwargs.get("debug", False)
745
+
746
+ n = len(files)
747
+ if n == 0:
748
+ raise FileNotFoundError("No bias file(s) given")
749
+ elif n == 1:
750
+ # if there is just one element compare it with itself, not really useful, but it works
751
+ list1 = list2 = files
752
+ n = 2
753
+ else:
754
+ list1, list2 = files[: n // 2], files[n // 2 :]
755
+
756
+ # Lists of images.
757
+ n1 = len(list1)
758
+ n2 = len(list2)
759
+
760
+ # Separately images in two groups.
761
+ bias1, head1 = combine_frames(list1, instrument, mode, extension, **kwargs)
762
+ bias1 /= n1
763
+
764
+ bias2, head = combine_frames(list2, instrument, mode, extension, **kwargs)
765
+ bias2 /= n2
766
+
767
+ # Make sure we know the gain.
768
+ gain = head.get("e_gain*", (1,))[0]
769
+
770
+ # Construct normalized sum.
771
+ bias = (bias1 * n1 + bias2 * n2) / n
772
+ exptime_1 = head1.get("exptime", 0)
773
+ exptime_2 = head.get("exptime", 0)
774
+ head["exptime"] = (exptime_1 + exptime_2) / n
775
+
776
+ # Compute noise in difference image by fitting Gaussian to distribution.
777
+ diff = 0.5 * (bias1 - bias2)
778
+ if np.min(diff) != np.max(diff):
779
+ crude = np.ma.median(np.abs(diff)) # estimate of noise
780
+ hmin = -5.0 * crude
781
+ hmax = +5.0 * crude
782
+ bin_size = np.clip(2 / n, 0.5, None)
783
+ nbins = int((hmax - hmin) / bin_size)
784
+
785
+ h, _ = np.histogram(diff, range=(hmin, hmax), bins=nbins)
786
+ xh = hmin + bin_size * (np.arange(0.0, nbins) + 0.5)
787
+
788
+ hfit, par = gaussfit(xh, h)
789
+ noise = abs(par[2]) # noise in diff, bias
790
+
791
+ # Determine where wings of distribution become significantly non-Gaussian.
792
+ contam = (h - hfit) / np.sqrt(np.clip(hfit, 1, None))
793
+ imid = np.where(abs(xh) < 2 * noise)
794
+ consig = np.std(contam[imid])
795
+
796
+ smcontam = gaussbroad(xh, contam, 0.1 * noise)
797
+ igood = np.where(smcontam < 3 * consig)
798
+ gmin = np.min(xh[igood])
799
+ gmax = np.max(xh[igood])
800
+
801
+ # Find and fix bad pixels.
802
+ ibad = np.where((diff <= gmin) | (diff >= gmax))
803
+ nbad = len(ibad[0])
804
+
805
+ bias[ibad] = np.clip(bias1[ibad], None, bias2[ibad])
806
+
807
+ # Compute read noise.
808
+ biasnoise = gain * noise
809
+ bgnoise = biasnoise * np.sqrt(n)
810
+
811
+ # Print diagnostics.
812
+ logger.debug("change in bias between image sets= %f electrons", gain * par[1])
813
+ logger.debug("measured background noise per image= %f", bgnoise)
814
+ logger.debug("background noise in combined image= %f", biasnoise)
815
+ logger.debug("fixing %i bad pixels", nbad)
816
+
817
+ if debug: # pragma: no cover
818
+ # Plot noise distribution.
819
+ plt.subplot(211)
820
+ plt.plot(xh, h)
821
+ plt.plot(xh, hfit, c="r")
822
+ plt.title("noise distribution")
823
+ plt.axvline(gmin, c="b")
824
+ plt.axvline(gmax, c="b")
825
+
826
+ # Plot contamination estimation.
827
+ plt.subplot(212)
828
+ plt.plot(xh, contam)
829
+ plt.plot(xh, smcontam, c="r")
830
+ plt.axhline(3 * consig, c="b")
831
+ plt.axvline(gmin, c="b")
832
+ plt.axvline(gmax, c="b")
833
+ plt.title("contamination estimation")
834
+ plt.show()
835
+ else:
836
+ diff = 0
837
+ biasnoise = 1.0
838
+ nbad = 0
839
+
840
+ if plot: # pragma: no cover
841
+ title = "Master Bias"
842
+ if plot_title is not None:
843
+ title = f"{plot_title}\n{title}"
844
+ plt.title(title)
845
+ plt.xlabel("x [pixel]")
846
+ plt.ylabel("y [pixel]")
847
+ bot, top = np.percentile(bias, (1, 99))
848
+ plt.imshow(bias, vmin=bot, vmax=top, origin="lower")
849
+ plt.show()
850
+
851
+ head["obslist"] = " ".join([os.path.basename(f) for f in files])
852
+ head["nimages"] = (n, "number of images summed")
853
+ head["npixfix"] = (nbad, "pixels corrected for cosmic rays")
854
+ head["bgnoise"] = (biasnoise, "noise in combined image, electrons")
855
+ return bias, head