firepype 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- firepype/__init__.py +27 -0
- firepype/calibration.py +520 -0
- firepype/cli.py +296 -0
- firepype/coadd.py +105 -0
- firepype/config.py +55 -0
- firepype/detection.py +517 -0
- firepype/extraction.py +198 -0
- firepype/io.py +248 -0
- firepype/pipeline.py +339 -0
- firepype/plotting.py +234 -0
- firepype/telluric.py +1401 -0
- firepype/utils.py +344 -0
- firepype-0.0.1.dist-info/METADATA +153 -0
- firepype-0.0.1.dist-info/RECORD +18 -0
- firepype-0.0.1.dist-info/WHEEL +5 -0
- firepype-0.0.1.dist-info/entry_points.txt +3 -0
- firepype-0.0.1.dist-info/licenses/LICENSE +21 -0
- firepype-0.0.1.dist-info/top_level.txt +1 -0
firepype/telluric.py
ADDED
|
@@ -0,0 +1,1401 @@
|
|
|
1
|
+
# firepype/telluric.py
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Tuple
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
import numpy.linalg as npl
|
|
9
|
+
import matplotlib.pyplot as plt
|
|
10
|
+
from astropy.io import fits
|
|
11
|
+
from scipy.ndimage import gaussian_filter1d
|
|
12
|
+
from scipy.signal import find_peaks
|
|
13
|
+
from scipy import signal as sps
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _ensure_dir(p: str | Path):
|
|
17
|
+
"""
|
|
18
|
+
Purpose:
|
|
19
|
+
Ensure that directory exists (create parents if needed)
|
|
20
|
+
Inputs:
|
|
21
|
+
p: Directory path to create
|
|
22
|
+
Returns:
|
|
23
|
+
None
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
Path(p).mkdir(parents=True, exist_ok=True)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _load_fits_primary(path: str | Path):
|
|
30
|
+
"""
|
|
31
|
+
Purpose:
|
|
32
|
+
Load primary HDU data and header from FITS file
|
|
33
|
+
Inputs:
|
|
34
|
+
path: Path to FITS file
|
|
35
|
+
Returns:
|
|
36
|
+
tuple:
|
|
37
|
+
- data (np.ndarray): Primary HDU image data
|
|
38
|
+
- header (fits.Header): FITS header from the primary HDU
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
with fits.open(str(path)) as hdul:
|
|
42
|
+
return np.asarray(hdul[0].data), hdul[0].header.copy()
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _load_table_spectrum(path: str | Path):
|
|
46
|
+
"""
|
|
47
|
+
Purpose:
|
|
48
|
+
Load spectrum from FITS table with columns wavelength_um, flux, and optional flux_err
|
|
49
|
+
Inputs:
|
|
50
|
+
path: Path to FITS file with binary table in HDU 1
|
|
51
|
+
Returns:
|
|
52
|
+
tuple:
|
|
53
|
+
- wl (np.ndarray): Wavelengths in microns
|
|
54
|
+
- fx (np.ndarray): Flux array
|
|
55
|
+
- err (np.ndarray | None): Flux error array if present, else None
|
|
56
|
+
- header (fits.Header): Primary header
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
with fits.open(str(path)) as hdul:
|
|
60
|
+
hdr = hdul[0].header.copy()
|
|
61
|
+
tab = hdul[1].data
|
|
62
|
+
wl = np.asarray(tab["wavelength_um"], float)
|
|
63
|
+
fx = np.asarray(tab["flux"], float)
|
|
64
|
+
err = np.asarray(tab["flux_err"], float) if ("flux_err" in tab.names) else None
|
|
65
|
+
|
|
66
|
+
return wl, fx, err, hdr
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def _write_spectrum_with_err(
|
|
70
|
+
path: str | Path,
|
|
71
|
+
wl_um: np.ndarray,
|
|
72
|
+
flux: np.ndarray,
|
|
73
|
+
err: np.ndarray | None,
|
|
74
|
+
base_header: fits.Header | None,
|
|
75
|
+
history: list[str] | None = None,
|
|
76
|
+
):
|
|
77
|
+
"""
|
|
78
|
+
Purpose:
|
|
79
|
+
Write spectrum (and optional errors) to 2-HDU FITS file:
|
|
80
|
+
- Primary HDU with (optional) header and history
|
|
81
|
+
- Binary table HDU with wavelength_um, flux, and optional flux_err
|
|
82
|
+
Inputs:
|
|
83
|
+
path: Output FITS path
|
|
84
|
+
wl_um: 1D array of wavelengths in microns
|
|
85
|
+
flux: 1D array of flux values
|
|
86
|
+
err: 1D array of flux errors or None
|
|
87
|
+
base_header: FITS header to attach to the primary HDU (optional)
|
|
88
|
+
history: List of strings appended as HISTORY cards (optional)
|
|
89
|
+
Returns:
|
|
90
|
+
None
|
|
91
|
+
"""
|
|
92
|
+
|
|
93
|
+
cols = [
|
|
94
|
+
fits.Column(name="wavelength_um", array=np.asarray(wl_um, float), format="D"),
|
|
95
|
+
fits.Column(name="flux", array=np.asarray(flux, np.float32), format="E"),
|
|
96
|
+
]
|
|
97
|
+
|
|
98
|
+
if err is not None:
|
|
99
|
+
cols.append(
|
|
100
|
+
fits.Column(name="flux_err", array=np.asarray(err, np.float32), format="E")
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
hdu_tab = fits.BinTableHDU.from_columns(cols)
|
|
104
|
+
hdu_prim = fits.PrimaryHDU(header=base_header.copy() if base_header else fits.Header())
|
|
105
|
+
|
|
106
|
+
if history:
|
|
107
|
+
for h in history:
|
|
108
|
+
hdu_prim.header["HISTORY"] = str(h).encode("ascii", "ignore").decode("ascii")
|
|
109
|
+
|
|
110
|
+
hdul = fits.HDUList([hdu_prim, hdu_tab])
|
|
111
|
+
_ensure_dir(Path(path).parent)
|
|
112
|
+
hdul.writeto(str(path), overwrite=True)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _orient_to_increasing(wl, fx):
|
|
116
|
+
"""
|
|
117
|
+
Purpose:
|
|
118
|
+
Ensure wavelengths increase with index; if decreasing, reverse both arrays
|
|
119
|
+
Inputs:
|
|
120
|
+
wl: 1D array-like wavelengths
|
|
121
|
+
fx: 1D array-like flux aligned with wl
|
|
122
|
+
Returns:
|
|
123
|
+
tuple:
|
|
124
|
+
- wl_out (np.ndarray): Wavelength array in increasing order
|
|
125
|
+
- fx_out (np.ndarray): Flux array reoriented to match wl_out
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
wl = np.asarray(wl, float)
|
|
129
|
+
fx = np.asarray(fx, float)
|
|
130
|
+
|
|
131
|
+
return (wl[::-1], fx[::-1]) if (wl.size >= 2 and wl[0] > wl[-1]) else (wl, fx)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def _assert_monotonic_and_align(wl, fx):
|
|
135
|
+
"""
|
|
136
|
+
Purpose:
|
|
137
|
+
Clean and align wavelength and flux arrays:
|
|
138
|
+
- Remove non-finite entries
|
|
139
|
+
- Sort by wavelength
|
|
140
|
+
- Enforce strictly increasing wavelengths (drop ties)
|
|
141
|
+
Inputs:
|
|
142
|
+
wl: 1D array-like wavelengths
|
|
143
|
+
fx: 1D array-like flux aligned with wl
|
|
144
|
+
Returns:
|
|
145
|
+
tuple:
|
|
146
|
+
- wl_out (np.ndarray): Strictly increasing wavelengths
|
|
147
|
+
- fx_out (np.ndarray): Flux values aligned to wl_out
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
wl = np.asarray(wl, float)
|
|
151
|
+
fx = np.asarray(fx, float)
|
|
152
|
+
m = np.isfinite(wl) & np.isfinite(fx)
|
|
153
|
+
wl = wl[m]
|
|
154
|
+
fx = fx[m]
|
|
155
|
+
|
|
156
|
+
if wl.size == 0:
|
|
157
|
+
return wl, fx
|
|
158
|
+
|
|
159
|
+
idx = np.argsort(wl)
|
|
160
|
+
wl = wl[idx]
|
|
161
|
+
fx = fx[idx]
|
|
162
|
+
|
|
163
|
+
if wl.size >= 2:
|
|
164
|
+
good = np.concatenate(([True], np.diff(wl) > 0))
|
|
165
|
+
wl = wl[good]
|
|
166
|
+
fx = fx[good]
|
|
167
|
+
return wl, fx
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def _plot_1d(wl, fx, title, path_png, xlabel="Wavelength (um)", ylabel="Flux", show=False):
|
|
171
|
+
"""
|
|
172
|
+
Purpose:
|
|
173
|
+
Plot 1D spectrum and save to a PNG file
|
|
174
|
+
Inputs:
|
|
175
|
+
wl: 1D array-like wavelengths
|
|
176
|
+
fx: 1D array-like flux
|
|
177
|
+
title: Plot title
|
|
178
|
+
path_png: Output path
|
|
179
|
+
xlabel: X-axis label (default: 'Wavelength (um)')
|
|
180
|
+
ylabel: Y-axis label (default: 'Flux')
|
|
181
|
+
show: Display plot
|
|
182
|
+
Returns:
|
|
183
|
+
None
|
|
184
|
+
|
|
185
|
+
"""
|
|
186
|
+
|
|
187
|
+
wl = np.asarray(wl, float)
|
|
188
|
+
fx = np.asarray(fx, float)
|
|
189
|
+
m = np.isfinite(wl) & np.isfinite(fx)
|
|
190
|
+
|
|
191
|
+
if m.sum() < 5:
|
|
192
|
+
return
|
|
193
|
+
|
|
194
|
+
wl = wl[m]
|
|
195
|
+
fx = fx[m]
|
|
196
|
+
|
|
197
|
+
plt.figure(figsize=(8, 5))
|
|
198
|
+
plt.plot(wl, fx, lw=1.0, color="C3")
|
|
199
|
+
plt.xlabel(xlabel)
|
|
200
|
+
plt.ylabel(ylabel)
|
|
201
|
+
plt.title(title)
|
|
202
|
+
plt.grid(alpha=0.3)
|
|
203
|
+
plt.tight_layout()
|
|
204
|
+
_ensure_dir(Path(path_png).parent)
|
|
205
|
+
plt.savefig(path_png, dpi=140)
|
|
206
|
+
|
|
207
|
+
if show:
|
|
208
|
+
plt.show()
|
|
209
|
+
plt.close()
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def _cheb_design_matrix(x, deg):
|
|
213
|
+
"""
|
|
214
|
+
Purpose:
|
|
215
|
+
Build Chebyshev design matrix T_k(x) for k=0..deg on scaled inputs
|
|
216
|
+
Inputs:
|
|
217
|
+
x: 1D array-like input coordinates (not necessarily scaled)
|
|
218
|
+
deg: Non-negative integer polynomial degree
|
|
219
|
+
Returns:
|
|
220
|
+
np.ndarray:
|
|
221
|
+
Matrix of shape (N, deg+1) where column k is T_k(x)
|
|
222
|
+
"""
|
|
223
|
+
|
|
224
|
+
x = np.asarray(x, float)
|
|
225
|
+
X = np.ones((x.size, deg + 1), float)
|
|
226
|
+
|
|
227
|
+
if deg >= 1:
|
|
228
|
+
X[:, 1] = x
|
|
229
|
+
|
|
230
|
+
for k in range(2, deg + 1):
|
|
231
|
+
X[:, k] = 2.0 * x * X[:, k - 1] - X[:, k - 2]
|
|
232
|
+
|
|
233
|
+
return X
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def _robust_weights(res, c=4.685):
|
|
237
|
+
"""
|
|
238
|
+
Purpose:
|
|
239
|
+
Compute Tukey's biweight robust regression weights from residuals
|
|
240
|
+
Inputs:
|
|
241
|
+
res: 1D array-like residuals
|
|
242
|
+
c: Tuning constant controlling downweighting (default 4.685)
|
|
243
|
+
Returns:
|
|
244
|
+
np.ndarray:
|
|
245
|
+
Weights in [0,1] of same shape as res
|
|
246
|
+
"""
|
|
247
|
+
|
|
248
|
+
r = np.asarray(res, float)
|
|
249
|
+
s = np.nanmedian(np.abs(r - np.nanmedian(r))) * 1.4826 + 1e-12
|
|
250
|
+
u = r / (c * s)
|
|
251
|
+
w = (1 - u**2)
|
|
252
|
+
w[(np.abs(u) >= 1) | ~np.isfinite(w)] = 0.0
|
|
253
|
+
|
|
254
|
+
return w**2
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def _find_arc_peaks_1d(arc1d, min_prom_frac=0.008, sigma_lo=15, sigma_hi=0.8, distance=3):
|
|
258
|
+
"""
|
|
259
|
+
Purpose:
|
|
260
|
+
Detect positive and negative peaks in 1D arc profile via baseline removal
|
|
261
|
+
and adaptive prominence thresholding
|
|
262
|
+
Inputs:
|
|
263
|
+
arc1d: 1D array-like arc signal
|
|
264
|
+
min_prom_frac: Minimum prominence as fraction of robust range (default 0.008)
|
|
265
|
+
sigma_lo: Sigma of low-pass Gaussian for baseline (default 15)
|
|
266
|
+
sigma_hi: Sigma for smoothing residual (default 0.8)
|
|
267
|
+
distance: Minimum peak spacing in samples (default 3)
|
|
268
|
+
Returns:
|
|
269
|
+
tuple:
|
|
270
|
+
- pk (np.ndarray): Indices of candidate peaks (merged pos/neg), edge-trimmed
|
|
271
|
+
- sm (np.ndarray): High-pass–smoothed signal used for detection
|
|
272
|
+
"""
|
|
273
|
+
|
|
274
|
+
y = np.asarray(arc1d, float)
|
|
275
|
+
n = y.size
|
|
276
|
+
base = gaussian_filter1d(y, sigma=sigma_lo, mode="nearest")
|
|
277
|
+
sm = gaussian_filter1d(y - base, sigma=sigma_hi, mode="nearest")
|
|
278
|
+
p1, p99 = np.percentile(sm, [1, 99])
|
|
279
|
+
mad = np.median(np.abs(sm - np.median(sm))) + 1e-12
|
|
280
|
+
noise = 1.4826 * mad
|
|
281
|
+
prom = max(1.5 * noise, min_prom_frac * (p99 - p1))
|
|
282
|
+
pk_pos, _ = find_peaks(sm, prominence=float(max(prom, 1e-6)), distance=distance)
|
|
283
|
+
pk_neg, _ = find_peaks(-sm, prominence=float(max(prom, 1e-6)), distance=distance)
|
|
284
|
+
pk = np.unique(np.r_[pk_pos, pk_neg])
|
|
285
|
+
pk = pk[(pk > 3) & (pk < n - 3)]
|
|
286
|
+
|
|
287
|
+
return pk.astype(int), sm
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
def _match_peaks_to_refs(px_peaks, ref_lines_um, wl_lo, wl_hi, max_sep=0.012):
|
|
291
|
+
"""
|
|
292
|
+
Purpose:
|
|
293
|
+
Match detected pixel peaks to reference wavelengths using seeded linear
|
|
294
|
+
mapping and Hungarian assignment with maximum separation constraint
|
|
295
|
+
Inputs:
|
|
296
|
+
px_peaks: 1D array of peak pixel indices
|
|
297
|
+
ref_lines_um: 1D array of reference line wavelengths in microns
|
|
298
|
+
wl_lo: Lower bound of wavelength range (microns)
|
|
299
|
+
wl_hi: Upper bound of wavelength range (microns)
|
|
300
|
+
max_sep: Maximum allowed match separation in microns (default 0.012)
|
|
301
|
+
Returns:
|
|
302
|
+
tuple:
|
|
303
|
+
- px_m (np.ndarray): Matched pixel indices (sorted, strictly increasing in λ)
|
|
304
|
+
- wl_m (np.ndarray): Corresponding matched reference wavelengths
|
|
305
|
+
"""
|
|
306
|
+
|
|
307
|
+
px = np.asarray(px_peaks, float)
|
|
308
|
+
refs = np.asarray(ref_lines_um, float)
|
|
309
|
+
refs = refs[(refs >= wl_lo) & (refs <= wl_hi)]
|
|
310
|
+
|
|
311
|
+
if px.size < 8 or refs.size < 8:
|
|
312
|
+
return np.array([], int), np.array([], float)
|
|
313
|
+
|
|
314
|
+
x = (px - px.min()) / max(px.max() - px.min(), 1.0)
|
|
315
|
+
wl_seed = wl_lo + x * (wl_hi - wl_lo)
|
|
316
|
+
rows, cols, costs = [], [], []
|
|
317
|
+
|
|
318
|
+
for i, wl_s in enumerate(wl_seed):
|
|
319
|
+
j0 = np.searchsorted(refs, wl_s)
|
|
320
|
+
|
|
321
|
+
for j in (j0 - 4, j0 - 3, j0 - 2, j0 - 1, j0, j0 + 1, j0 + 2, j0 + 3, j0 + 4):
|
|
322
|
+
if 0 <= j < refs.size:
|
|
323
|
+
d = abs(refs[j] - wl_s)
|
|
324
|
+
|
|
325
|
+
if d <= 3 * max_sep:
|
|
326
|
+
rows.append(i)
|
|
327
|
+
cols.append(j)
|
|
328
|
+
costs.append(d)
|
|
329
|
+
if not rows:
|
|
330
|
+
return np.array([], int), np.array([], float)
|
|
331
|
+
|
|
332
|
+
from scipy.optimize import linear_sum_assignment
|
|
333
|
+
|
|
334
|
+
C = np.full((px.size, refs.size), 1e3, float)
|
|
335
|
+
C[rows, cols] = costs
|
|
336
|
+
r_idx, c_idx = linear_sum_assignment(C)
|
|
337
|
+
ok = C[r_idx, c_idx] <= max_sep
|
|
338
|
+
r_idx = r_idx[ok]
|
|
339
|
+
c_idx = c_idx[ok]
|
|
340
|
+
|
|
341
|
+
if r_idx.size < 6:
|
|
342
|
+
return np.array([], int), np.array([], float)
|
|
343
|
+
|
|
344
|
+
px_m = px[r_idx].astype(int)
|
|
345
|
+
wl_m = refs[c_idx].astype(float)
|
|
346
|
+
order = np.argsort(px_m)
|
|
347
|
+
px_m = px_m[order]
|
|
348
|
+
wl_m = wl_m[order]
|
|
349
|
+
good = np.concatenate(([True], np.diff(wl_m) > 0))
|
|
350
|
+
|
|
351
|
+
return px_m[good], wl_m[good]
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def _solve_dispersion_from_arc1d(arc1d, wl_range, ref_lines_um, deg=3):
|
|
355
|
+
"""
|
|
356
|
+
Purpose:
|
|
357
|
+
Solve wavelength solution from 1D arc spectrum by detecting peaks,
|
|
358
|
+
matching to reference lines, and fitting Chebyshev polynomial with robust weights
|
|
359
|
+
Inputs:
|
|
360
|
+
arc1d: 1D array-like arc spectrum
|
|
361
|
+
wl_range: (wl_lo, wl_hi) tuple in microns for target wavelength coverage
|
|
362
|
+
ref_lines_um: 1D array of reference lines in microns
|
|
363
|
+
deg: Polynomial degree for Chebyshev fit (default 3)
|
|
364
|
+
Returns:
|
|
365
|
+
np.ndarray:
|
|
366
|
+
Wavelength array (microns) of same length as arc1d, strictly increasing
|
|
367
|
+
and clipped to [wl_lo, wl_hi]
|
|
368
|
+
Raises:
|
|
369
|
+
RuntimeError: If too few peaks or matched lines are found
|
|
370
|
+
"""
|
|
371
|
+
|
|
372
|
+
y = np.asarray(arc1d, float)
|
|
373
|
+
n = y.size
|
|
374
|
+
wl_lo, wl_hi = float(wl_range[0]), float(wl_range[1])
|
|
375
|
+
pk, _ = _find_arc_peaks_1d(y)
|
|
376
|
+
|
|
377
|
+
if pk.size < 8:
|
|
378
|
+
raise RuntimeError("Too few arc peaks")
|
|
379
|
+
|
|
380
|
+
px_m, wl_m = _match_peaks_to_refs(pk, ref_lines_um, wl_lo, wl_hi, max_sep=0.012)
|
|
381
|
+
|
|
382
|
+
if px_m.size < max(8, deg + 3):
|
|
383
|
+
raise RuntimeError("Insufficient matched lines")
|
|
384
|
+
|
|
385
|
+
x_full = np.linspace(-1.0, 1.0, n)
|
|
386
|
+
x_m = np.interp(px_m, np.arange(n), x_full)
|
|
387
|
+
X = _cheb_design_matrix(x_m, deg)
|
|
388
|
+
w = np.ones_like(wl_m)
|
|
389
|
+
|
|
390
|
+
for _ in range(12):
|
|
391
|
+
coef, *_ = npl.lstsq(X * w[:, None], wl_m * w, rcond=None)
|
|
392
|
+
res = wl_m - X.dot(coef)
|
|
393
|
+
w_new = _robust_weights(res)
|
|
394
|
+
|
|
395
|
+
if np.allclose(w, w_new, atol=1e-3):
|
|
396
|
+
break
|
|
397
|
+
|
|
398
|
+
w = w_new
|
|
399
|
+
|
|
400
|
+
wl = np.polynomial.chebyshev.chebval(x_full, coef)
|
|
401
|
+
span_fit = wl[-1] - wl[0]
|
|
402
|
+
span_tar = wl_hi - wl_lo
|
|
403
|
+
|
|
404
|
+
if abs(span_fit - span_tar) / max(span_tar, 1e-12) > 0.002:
|
|
405
|
+
a = span_tar / (span_fit + 1e-12)
|
|
406
|
+
b = wl_lo - a * wl[0]
|
|
407
|
+
wl = a * wl + b
|
|
408
|
+
|
|
409
|
+
else:
|
|
410
|
+
wl += (wl_lo - wl[0])
|
|
411
|
+
|
|
412
|
+
for i in range(1, n):
|
|
413
|
+
if wl[i] <= wl[i - 1]:
|
|
414
|
+
wl[i] = wl[i - 1] + 1e-9
|
|
415
|
+
|
|
416
|
+
return np.clip(wl, wl_lo, wl_hi)
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+
def _extract_with_local_bg(img, center_col, ap=7, bg_in=12, bg_out=26):
|
|
420
|
+
"""
|
|
421
|
+
Purpose:
|
|
422
|
+
Extract 1D spectrum by median-collapsing columns around center column,
|
|
423
|
+
subtracting local background estimated from side bands
|
|
424
|
+
Inputs:
|
|
425
|
+
img: 2D array image (rows x cols)
|
|
426
|
+
center_col: Central column index for extraction
|
|
427
|
+
ap: Half-width of extraction aperture in columns (default 7)
|
|
428
|
+
bg_in: Inner offset (columns) from center to begin background windows (default 12)
|
|
429
|
+
bg_out: Outer offset (columns) from center to end background windows (default 26)
|
|
430
|
+
Returns:
|
|
431
|
+
np.ndarray:
|
|
432
|
+
1D extracted spectrum per row after background subtraction
|
|
433
|
+
"""
|
|
434
|
+
|
|
435
|
+
nrows, ncols = img.shape
|
|
436
|
+
lo = max(0, center_col - ap)
|
|
437
|
+
hi = min(ncols, center_col + ap + 1)
|
|
438
|
+
bg_left = img[:, max(0, center_col - bg_out) : max(0, center_col - bg_in)]
|
|
439
|
+
bg_right = img[:, min(ncols, center_col + bg_in) : min(ncols, center_col + bg_out)]
|
|
440
|
+
|
|
441
|
+
if bg_left.size == 0 and bg_right.size == 0:
|
|
442
|
+
bg = np.zeros(nrows, dtype=img.dtype)
|
|
443
|
+
|
|
444
|
+
else:
|
|
445
|
+
bg = np.median(np.concatenate([bg_left, bg_right], axis=1), axis=1)
|
|
446
|
+
|
|
447
|
+
return np.median(img[:, lo:hi], axis=1) - bg
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
def _extract_cols_median_with_err(img, center_col, half=1, ap=7, bg_in=12, bg_out=26):
|
|
451
|
+
"""
|
|
452
|
+
Purpose:
|
|
453
|
+
Extract 1D spectrum by median-combining multiple adjacent columns around
|
|
454
|
+
center_col with local background subtraction, and estimate per-row errors
|
|
455
|
+
Inputs:
|
|
456
|
+
img: 2D array image (rows x cols)
|
|
457
|
+
center_col: Central column index
|
|
458
|
+
half: Include columns [center_col-half, center_col+half] (default 1)
|
|
459
|
+
ap: Half-width of per-column extraction aperture (default 7)
|
|
460
|
+
bg_in: Inner offset of background windows (default 12)
|
|
461
|
+
bg_out: Outer offset of background windows (default 26)
|
|
462
|
+
Returns:
|
|
463
|
+
tuple:
|
|
464
|
+
- flux_1d (np.ndarray): Extracted 1D flux (rows)
|
|
465
|
+
- sigma_1d (np.ndarray): Estimated 1-sigma errors per row
|
|
466
|
+
"""
|
|
467
|
+
|
|
468
|
+
nrows, ncols = img.shape
|
|
469
|
+
cols = [c for c in range(center_col - half, center_col + half + 1) if 0 <= c < ncols]
|
|
470
|
+
specs, sigmas = [], []
|
|
471
|
+
|
|
472
|
+
for c in cols:
|
|
473
|
+
lo = max(0, c - ap)
|
|
474
|
+
hi = min(ncols, c + ap + 1)
|
|
475
|
+
bg_left = img[:, max(0, c - bg_out) : max(0, c - bg_in)]
|
|
476
|
+
bg_right = img[:, min(ncols, c + bg_in) : min(ncols, c + bg_out)]
|
|
477
|
+
|
|
478
|
+
if bg_left.size == 0 and bg_right.size == 0:
|
|
479
|
+
bg_med = np.zeros(nrows, dtype=float)
|
|
480
|
+
bg_std = np.zeros(nrows, dtype=float)
|
|
481
|
+
|
|
482
|
+
else:
|
|
483
|
+
bg_all = (
|
|
484
|
+
np.concatenate([bg_left, bg_right], axis=1)
|
|
485
|
+
if (bg_left.size and bg_right.size)
|
|
486
|
+
else (bg_left if bg_left.size else bg_right)
|
|
487
|
+
)
|
|
488
|
+
|
|
489
|
+
bg_med = np.median(bg_all, axis=1).astype(float)
|
|
490
|
+
mad = np.median(np.abs(bg_all - bg_med[:, None]), axis=1) + 1e-12
|
|
491
|
+
bg_std = 1.4826 * mad
|
|
492
|
+
|
|
493
|
+
sub = img[:, lo:hi] - bg_med[:, None]
|
|
494
|
+
spec = np.median(sub, axis=1).astype(float)
|
|
495
|
+
specs.append(spec)
|
|
496
|
+
n_eff = max(1, hi - lo)
|
|
497
|
+
sigma_row = np.sqrt(np.pi / 2) * bg_std / np.sqrt(n_eff)
|
|
498
|
+
sigmas.append(sigma_row)
|
|
499
|
+
|
|
500
|
+
spec_stack = np.vstack(specs)
|
|
501
|
+
sigma_stack = np.vstack(sigmas)
|
|
502
|
+
flux_1d = np.median(spec_stack, axis=0)
|
|
503
|
+
M = max(1, len(cols))
|
|
504
|
+
sigma_1d = np.sqrt(np.pi / 2) * np.median(sigma_stack**2, axis=0) ** 0.5 / np.sqrt(M)
|
|
505
|
+
|
|
506
|
+
return flux_1d.astype(float), sigma_1d.astype(float)
|
|
507
|
+
|
|
508
|
+
|
|
509
|
+
def _detect_slit_edges(data, x_hint=(900, 1300), hint_expand=150, row_frac=(0.35, 0.85)):
|
|
510
|
+
"""
|
|
511
|
+
Purpose:
|
|
512
|
+
Detect left/right slit edges from median spatial profile using gradient peaks
|
|
513
|
+
in restricted column window and row band
|
|
514
|
+
Inputs:
|
|
515
|
+
data: 2D array image (rows x cols)
|
|
516
|
+
x_hint: Tuple of approximate slit bounds (lo, hi) in columns
|
|
517
|
+
hint_expand: Additional search half-width to expand around x_hint (default 150)
|
|
518
|
+
row_frac: Fractional row range (lo, hi) for band extraction (default (0.35, 0.85))
|
|
519
|
+
Returns:
|
|
520
|
+
tuple:
|
|
521
|
+
- left_edge (int): Detected left edge column index
|
|
522
|
+
- right_edge (int): Detected right edge column index
|
|
523
|
+
- sm (np.ndarray): Smoothed high-pass profile used
|
|
524
|
+
- g (np.ndarray): Smoothed gradient used for peak finding
|
|
525
|
+
- (lo, hi) (tuple[int,int]): Effective search column range
|
|
526
|
+
- (r0, r1) (tuple[int,int]): Row band used for the profile
|
|
527
|
+
"""
|
|
528
|
+
|
|
529
|
+
nrows, ncols = data.shape
|
|
530
|
+
r0 = int(min(row_frac) * nrows)
|
|
531
|
+
r1 = int(max(row_frac) * nrows)
|
|
532
|
+
band = data[r0:r1, :]
|
|
533
|
+
lo_global, hi_global = int(0.03 * ncols), int(0.97 * ncols)
|
|
534
|
+
xmin, xmax = max(lo_global, min(x_hint)), min(hi_global, max(x_hint))
|
|
535
|
+
lo = max(lo_global, xmin - hint_expand)
|
|
536
|
+
hi = min(hi_global, xmax + hint_expand)
|
|
537
|
+
prof = np.median(band, axis=0).astype(float)
|
|
538
|
+
base = gaussian_filter1d(prof, 120, mode="nearest")
|
|
539
|
+
hp = prof - base
|
|
540
|
+
sm = gaussian_filter1d(hp, 3, mode="nearest")
|
|
541
|
+
g = gaussian_filter1d(np.gradient(sm), 2.2, mode="nearest")
|
|
542
|
+
prom = np.percentile(np.abs(g[lo:hi]), 70)
|
|
543
|
+
L_idx, _ = find_peaks(g[lo:hi], prominence=float(max(prom, 1e-6)), distance=12)
|
|
544
|
+
R_idx, _ = find_peaks(-g[lo:hi], prominence=float(max(prom, 1e-6)), distance=12)
|
|
545
|
+
|
|
546
|
+
if L_idx.size == 0 or R_idx.size == 0:
|
|
547
|
+
return lo, hi, sm, g, (lo, hi), (r0, r1)
|
|
548
|
+
|
|
549
|
+
L = lo + L_idx
|
|
550
|
+
R = lo + R_idx
|
|
551
|
+
best = (-1e9, lo, hi)
|
|
552
|
+
|
|
553
|
+
for l in L:
|
|
554
|
+
for r in R[R > l + 8]:
|
|
555
|
+
interior = np.median(sm[l:r])
|
|
556
|
+
left_bg = np.median(sm[max(lo, l - 60) : l])
|
|
557
|
+
right_bg = np.median(sm[r : min(hi, r + 60)])
|
|
558
|
+
score = abs(interior - 0.5 * (left_bg + right_bg))
|
|
559
|
+
|
|
560
|
+
if score > best[0]:
|
|
561
|
+
best = (score, l, r)
|
|
562
|
+
|
|
563
|
+
_, le, re = best
|
|
564
|
+
|
|
565
|
+
return int(le), int(re), sm, g, (lo, hi), (r0, r1)
|
|
566
|
+
|
|
567
|
+
|
|
568
|
+
def _detect_objects_in_slit(data, left_edge, right_edge, row_frac=(0.40, 0.80)):
|
|
569
|
+
"""
|
|
570
|
+
Purpose:
|
|
571
|
+
Detect bright and dark object centroids within the slit by analyzing
|
|
572
|
+
median column profile between edges
|
|
573
|
+
Inputs:
|
|
574
|
+
data: 2D array image (rows x cols)
|
|
575
|
+
left_edge: Left slit edge column index
|
|
576
|
+
right_edge: Right slit edge column index
|
|
577
|
+
row_frac: Fractional row limits for band extraction (default (0.40, 0.80))
|
|
578
|
+
Returns:
|
|
579
|
+
tuple:
|
|
580
|
+
- obj_pos_abs (int): Column of brightest object peak within slit
|
|
581
|
+
- obj_neg_abs (int): Column of darkest trough within slit
|
|
582
|
+
- prof (np.ndarray): Smoothed median profile used for detection
|
|
583
|
+
- (r0, r1) (tuple[int,int]): Row band used
|
|
584
|
+
"""
|
|
585
|
+
|
|
586
|
+
nrows, _ = data.shape
|
|
587
|
+
r0 = int(min(row_frac) * nrows)
|
|
588
|
+
r1 = int(max(row_frac) * nrows)
|
|
589
|
+
mid_lo = left_edge + int(0.20 * (right_edge - left_edge))
|
|
590
|
+
mid_hi = right_edge - int(0.20 * (right_edge - left_edge))
|
|
591
|
+
band = data[r0:r1, mid_lo : mid_hi + 1]
|
|
592
|
+
prof = gaussian_filter1d(np.median(band, axis=0).astype(float), 4.0)
|
|
593
|
+
pos_rel = int(np.argmax(prof))
|
|
594
|
+
neg_rel = int(np.argmin(prof))
|
|
595
|
+
obj_pos_abs = mid_lo + pos_rel
|
|
596
|
+
obj_neg_abs = mid_lo + neg_rel
|
|
597
|
+
|
|
598
|
+
return int(obj_pos_abs), int(obj_neg_abs), prof, (r0, r1)
|
|
599
|
+
|
|
600
|
+
|
|
601
|
+
def _find_arc_trace_col_strong(
|
|
602
|
+
arc_img, approx_col=None, search_half=240, x_hint=(900, 1300), row_frac=(0.35, 0.85)
|
|
603
|
+
):
|
|
604
|
+
"""
|
|
605
|
+
Purpose:
|
|
606
|
+
Find strong arc column (trace position) by peak-searching median
|
|
607
|
+
spatial profile, optionally near approximate column
|
|
608
|
+
Inputs:
|
|
609
|
+
arc_img: 2D arc image array
|
|
610
|
+
approx_col: Optional approximate column index around which to search
|
|
611
|
+
search_half: Half-width of search window around approx_col (default 240)
|
|
612
|
+
x_hint: Tuple giving broader column region for initial profile (default (900,1300))
|
|
613
|
+
row_frac: Fractional row band for profile median (default (0.35,0.85))
|
|
614
|
+
Returns:
|
|
615
|
+
int:
|
|
616
|
+
Selected column index corresponding to strongest arc-like feature
|
|
617
|
+
"""
|
|
618
|
+
|
|
619
|
+
img = np.asarray(arc_img, float)
|
|
620
|
+
nrows, ncols = img.shape
|
|
621
|
+
lo_x = int(max(0, min(x_hint)))
|
|
622
|
+
hi_x = int(min(ncols, max(x_hint)))
|
|
623
|
+
r0 = int(min(row_frac) * nrows)
|
|
624
|
+
r1 = int(max(row_frac) * nrows)
|
|
625
|
+
band = img[r0:r1, lo_x:hi_x]
|
|
626
|
+
prof = np.median(band, axis=0).astype(float)
|
|
627
|
+
base = gaussian_filter1d(prof, 65, mode="nearest")
|
|
628
|
+
hp = prof - base
|
|
629
|
+
sm = gaussian_filter1d(hp, 3, mode="nearest")
|
|
630
|
+
prom = max(np.nanpercentile(np.abs(sm), 98) * 0.6, 10.0)
|
|
631
|
+
cand_idx, _ = find_peaks(sm, prominence=float(prom), distance=10)
|
|
632
|
+
cand_cols = (lo_x + cand_idx).astype(int)
|
|
633
|
+
|
|
634
|
+
if approx_col is not None and cand_cols.size:
|
|
635
|
+
cand_cols = cand_cols[np.abs(cand_cols - int(approx_col)) <= int(search_half)]
|
|
636
|
+
|
|
637
|
+
if cand_cols.size == 0:
|
|
638
|
+
if approx_col is None:
|
|
639
|
+
return int(lo_x + int(np.argmax(sm)))
|
|
640
|
+
|
|
641
|
+
lo = max(0, int(approx_col) - int(search_half))
|
|
642
|
+
hi = min(ncols, int(approx_col) + int(search_half) + 1)
|
|
643
|
+
j = int(np.argmax(sm[(lo - lo_x) : (hi - lo_x)]))
|
|
644
|
+
|
|
645
|
+
return int(lo + j)
|
|
646
|
+
|
|
647
|
+
win = 5
|
|
648
|
+
scores = []
|
|
649
|
+
|
|
650
|
+
for c in cand_cols:
|
|
651
|
+
j = int(np.clip(c - lo_x, 0, band.shape[1] - 1))
|
|
652
|
+
j0 = max(0, j - win)
|
|
653
|
+
j1 = min(band.shape[1], j + win + 1)
|
|
654
|
+
scores.append(float(np.nanmedian(band[:, j0:j1], axis=0).max()))
|
|
655
|
+
|
|
656
|
+
return int(cand_cols[int(np.argmax(scores))])
|
|
657
|
+
|
|
658
|
+
|
|
659
|
+
def _average_wavecal_across_cols(arc_img, center_col, half=1, ref_lines_um=None, wl_range=(0.83, 2.45)):
|
|
660
|
+
"""
|
|
661
|
+
Purpose:
|
|
662
|
+
Compute averaged wavelength solution across a few neighboring columns
|
|
663
|
+
around chosen arc column by solving per-column and averaging
|
|
664
|
+
Inputs:
|
|
665
|
+
arc_img: 2D arc image array
|
|
666
|
+
center_col: Central column index to calibrate around
|
|
667
|
+
half: Use columns [center_col-half, center_col+half] (default 1)
|
|
668
|
+
ref_lines_um: 1D array of reference line wavelengths in microns (required)
|
|
669
|
+
wl_range: Tuple (wl_lo, wl_hi) microns for target coverage (default (0.83, 2.45))
|
|
670
|
+
Returns:
|
|
671
|
+
np.ndarray:
|
|
672
|
+
Averaged wavelength solution (microns) on native pixel grid,
|
|
673
|
+
clipped and oriented to increase
|
|
674
|
+
Raises:
|
|
675
|
+
ValueError: If ref_lines_um is None
|
|
676
|
+
"""
|
|
677
|
+
|
|
678
|
+
if ref_lines_um is None:
|
|
679
|
+
raise ValueError("ref_lines_um required")
|
|
680
|
+
|
|
681
|
+
ncols = arc_img.shape[1]
|
|
682
|
+
cols = [c for c in range(center_col - half, center_col + half + 1) if 0 <= c < ncols]
|
|
683
|
+
wl_list = []
|
|
684
|
+
|
|
685
|
+
for c in cols:
|
|
686
|
+
a1d = _extract_with_local_bg(arc_img, c, ap=5, bg_in=8, bg_out=18)
|
|
687
|
+
wl_c = _solve_dispersion_from_arc1d(
|
|
688
|
+
a1d, wl_range=wl_range, ref_lines_um=np.asarray(ref_lines_um, float), deg=3
|
|
689
|
+
)
|
|
690
|
+
|
|
691
|
+
if wl_c[0] > wl_c[-1]:
|
|
692
|
+
wl_c = wl_c[::-1]
|
|
693
|
+
|
|
694
|
+
wl_list.append(wl_c)
|
|
695
|
+
|
|
696
|
+
wl_avg = np.nanmean(np.vstack(wl_list), axis=0)
|
|
697
|
+
|
|
698
|
+
if wl_avg[0] > wl_avg[-1]:
|
|
699
|
+
wl_avg = wl_avg[::-1]
|
|
700
|
+
|
|
701
|
+
wl_min, wl_max = wl_range
|
|
702
|
+
span_fit = wl_avg[-1] - wl_avg[0]
|
|
703
|
+
span_tar = wl_max - wl_min
|
|
704
|
+
|
|
705
|
+
if abs(span_fit - span_tar) / max(span_tar, 1e-12) > 0.002:
|
|
706
|
+
a = span_tar / (span_fit + 1e-12)
|
|
707
|
+
b = wl_min - a * wl_avg[0]
|
|
708
|
+
wl_avg = a * wl_avg + b
|
|
709
|
+
|
|
710
|
+
else:
|
|
711
|
+
wl_avg = wl_avg + (wl_min - wl_avg[0])
|
|
712
|
+
|
|
713
|
+
return np.clip(wl_avg, wl_min, wl_max)
|
|
714
|
+
|
|
715
|
+
|
|
716
|
+
def _load_vega_model(path: str | Path):
|
|
717
|
+
"""
|
|
718
|
+
Purpose:
|
|
719
|
+
Load Vega model spectrum from a two-column text file (wavelength, flux)
|
|
720
|
+
Auto-convert wavelengths to microns if in Å (divide by 1e4)
|
|
721
|
+
Inputs:
|
|
722
|
+
path: Path-like to text file
|
|
723
|
+
Returns:
|
|
724
|
+
tuple:
|
|
725
|
+
- wl (np.ndarray): Wavelengths in microns
|
|
726
|
+
- fx (np.ndarray): Flux values, sorted by wavelength and finite
|
|
727
|
+
"""
|
|
728
|
+
|
|
729
|
+
arr = np.genfromtxt(path, dtype=float)
|
|
730
|
+
wl = np.asarray(arr[:, 0], float)
|
|
731
|
+
fx = np.asarray(arr[:, 1], float)
|
|
732
|
+
|
|
733
|
+
if np.nanmedian(wl) > 50:
|
|
734
|
+
wl = wl / 1e4 # Å -> µm
|
|
735
|
+
|
|
736
|
+
m = np.isfinite(wl) & np.isfinite(fx)
|
|
737
|
+
wl = wl[m]
|
|
738
|
+
fx = fx[m]
|
|
739
|
+
idx = np.argsort(wl)
|
|
740
|
+
|
|
741
|
+
return wl[idx], fx[idx]
|
|
742
|
+
|
|
743
|
+
|
|
744
|
+
def _gaussian_broaden_to_R(wl_um, fx, R_target=2000.0, oversample=2):
|
|
745
|
+
"""
|
|
746
|
+
Purpose:
|
|
747
|
+
Convolve spectrum with Gaussian in log-wavelength space to reach target
|
|
748
|
+
resolving power R
|
|
749
|
+
Inputs:
|
|
750
|
+
wl_um: 1D wavelengths in microns
|
|
751
|
+
fx: 1D flux array aligned with wl_um
|
|
752
|
+
R_target: Target resolving power (default 2000)
|
|
753
|
+
oversample: Oversampling factor for log grid (default 2)
|
|
754
|
+
Returns:
|
|
755
|
+
tuple:
|
|
756
|
+
- wl_out (np.ndarray): Output wavelengths (microns) on log grid back-converted
|
|
757
|
+
- fx_out (np.ndarray): Broadened flux array
|
|
758
|
+
"""
|
|
759
|
+
|
|
760
|
+
wl = np.asarray(wl_um, float)
|
|
761
|
+
fx = np.asarray(fx, float)
|
|
762
|
+
|
|
763
|
+
if wl.size < 10:
|
|
764
|
+
return wl, fx
|
|
765
|
+
|
|
766
|
+
logw = np.log(wl)
|
|
767
|
+
step = np.median(np.diff(logw)) / max(1, oversample)
|
|
768
|
+
grid = np.arange(logw.min(), logw.max() + step / 2, step)
|
|
769
|
+
f = np.interp(np.exp(grid), wl, fx, left=np.nan, right=np.nan)
|
|
770
|
+
m = np.isfinite(f)
|
|
771
|
+
|
|
772
|
+
if m.sum() < 10:
|
|
773
|
+
return wl, fx
|
|
774
|
+
|
|
775
|
+
if (~m).any():
|
|
776
|
+
f[~m] = np.interp(np.where(~m)[0], np.where(m)[0], f[m])
|
|
777
|
+
|
|
778
|
+
sigma_pix = (1.0 / (2.355 * R_target)) / step
|
|
779
|
+
|
|
780
|
+
if sigma_pix < 0.3:
|
|
781
|
+
return wl, fx
|
|
782
|
+
|
|
783
|
+
g = sps.windows.gaussian(M=int(8 * sigma_pix) | 1, std=sigma_pix)
|
|
784
|
+
g /= g.sum()
|
|
785
|
+
fb = np.convolve(f, g, mode="same")
|
|
786
|
+
|
|
787
|
+
return np.exp(grid), fb
|
|
788
|
+
|
|
789
|
+
|
|
790
|
+
def _mask_a0v_intrinsic_lines(wl_um):
|
|
791
|
+
"""
|
|
792
|
+
Purpose:
|
|
793
|
+
Mask regions around intrinsic A0V stellar lines where ratios are unreliable
|
|
794
|
+
Inputs:
|
|
795
|
+
wl_um: 1D array of wavelengths in microns
|
|
796
|
+
Returns:
|
|
797
|
+
np.ndarray:
|
|
798
|
+
Boolean mask array where True indicates keep (not masked)
|
|
799
|
+
"""
|
|
800
|
+
|
|
801
|
+
lines = [0.9546, 1.0049, 1.0941, 1.2821, 1.513, 1.641, 1.681, 1.736, 2.1661, 2.281, 2.625, 2.874]
|
|
802
|
+
wl = np.asarray(wl_um, float)
|
|
803
|
+
m = np.ones_like(wl, dtype=bool)
|
|
804
|
+
|
|
805
|
+
for w0 in lines:
|
|
806
|
+
m &= ~(np.abs(wl - w0) < 0.006)
|
|
807
|
+
|
|
808
|
+
return m
|
|
809
|
+
|
|
810
|
+
|
|
811
|
+
def _mask_deep_telluric_regions(wl_um):
|
|
812
|
+
"""
|
|
813
|
+
Purpose:
|
|
814
|
+
Mask very deep telluric absorption regions where correction is unreliable
|
|
815
|
+
Inputs:
|
|
816
|
+
wl_um: 1D array of wavelengths in microns
|
|
817
|
+
Returns:
|
|
818
|
+
np.ndarray:
|
|
819
|
+
Boolean mask array where True indicates keep (not masked)
|
|
820
|
+
"""
|
|
821
|
+
|
|
822
|
+
wl = np.asarray(wl_um, float)
|
|
823
|
+
m = np.ones_like(wl, dtype=bool)
|
|
824
|
+
|
|
825
|
+
for a, b in [(1.35, 1.42), (1.80, 1.95), (0.90, 0.94)]:
|
|
826
|
+
m &= ~((wl >= a) & (wl <= b))
|
|
827
|
+
|
|
828
|
+
return m
|
|
829
|
+
|
|
830
|
+
|
|
831
|
+
def _band_segments(wl):
|
|
832
|
+
"""
|
|
833
|
+
Purpose:
|
|
834
|
+
Define J, H, K band boolean masks on provided wavelength grid
|
|
835
|
+
Inputs:
|
|
836
|
+
wl: 1D array of wavelengths in microns
|
|
837
|
+
Returns:
|
|
838
|
+
list[np.ndarray]:
|
|
839
|
+
List of boolean masks [J, H, K] selecting each band's wavelength range
|
|
840
|
+
"""
|
|
841
|
+
|
|
842
|
+
wl = np.asarray(wl, float)
|
|
843
|
+
J = (wl >= 0.98) & (wl <= 1.31)
|
|
844
|
+
H = (wl >= 1.52) & (wl <= 1.74)
|
|
845
|
+
K = (wl >= 2.06) & (wl <= 2.33)
|
|
846
|
+
|
|
847
|
+
return [J, H, K]
|
|
848
|
+
|
|
849
|
+
|
|
850
|
+
def _build_T_per_band_dense(
|
|
851
|
+
wl_std,
|
|
852
|
+
fx_std,
|
|
853
|
+
wl_vega,
|
|
854
|
+
fx_vega,
|
|
855
|
+
R_fire=2000.0,
|
|
856
|
+
deg_cont=2,
|
|
857
|
+
min_run=60,
|
|
858
|
+
smooth_sigma=0.7,
|
|
859
|
+
clip_lo=0.06,
|
|
860
|
+
clip_hi=1.12,
|
|
861
|
+
):
|
|
862
|
+
"""
|
|
863
|
+
Purpose:
|
|
864
|
+
Build dense telluric transmission on standard star wavelength grid
|
|
865
|
+
by ratioing to broadened Vega model, fitting smooth continuum per band,
|
|
866
|
+
and smoothing only within contiguous supported segments
|
|
867
|
+
Inputs:
|
|
868
|
+
wl_std: 1D wavelengths (microns) of standard star spectrum
|
|
869
|
+
fx_std: 1D flux of standard star aligned with wl_std
|
|
870
|
+
wl_vega: 1D Vega model wavelengths (microns)
|
|
871
|
+
fx_vega: 1D Vega model flux
|
|
872
|
+
R_fire: Target resolution for broadening Vega (default 2000)
|
|
873
|
+
deg_cont: Degree of Chebyshev continuum fit per band (default 2)
|
|
874
|
+
min_run: Minimum contiguous run length to accept for smoothing (default 60)
|
|
875
|
+
smooth_sigma: Sigma for Gaussian smoothing in ln T space (default 0.7)
|
|
876
|
+
clip_lo: Lower clip for T values (default 0.06)
|
|
877
|
+
clip_hi: Upper clip for T values (default 1.12)
|
|
878
|
+
Returns:
|
|
879
|
+
np.ndarray:
|
|
880
|
+
Array on wl_std grid; NaN outside supported segments,
|
|
881
|
+
clipped to [clip_lo, clip_hi]
|
|
882
|
+
"""
|
|
883
|
+
|
|
884
|
+
wl_std = np.asarray(wl_std, float)
|
|
885
|
+
fx_std = np.asarray(fx_std, float)
|
|
886
|
+
|
|
887
|
+
wvb, fvb = _gaussian_broaden_to_R(wl_vega, fx_vega, R_target=R_fire, oversample=2)
|
|
888
|
+
fvb_i = np.interp(wl_std, wvb, fvb, left=np.nan, right=np.nan)
|
|
889
|
+
|
|
890
|
+
def cheb_fit(x, y, deg):
|
|
891
|
+
"""
|
|
892
|
+
Purpose:
|
|
893
|
+
Fit Chebyshev polynomial to (x, y) using robust reweighting
|
|
894
|
+
Inputs:
|
|
895
|
+
x: 1D array of x-values
|
|
896
|
+
y: 1D array of y-values
|
|
897
|
+
deg: Polynomial degree
|
|
898
|
+
Returns:
|
|
899
|
+
Callable:
|
|
900
|
+
Function P(z) that evaluates fitted polynomial at z
|
|
901
|
+
"""
|
|
902
|
+
|
|
903
|
+
x = np.asarray(x, float)
|
|
904
|
+
y = np.asarray(y, float)
|
|
905
|
+
|
|
906
|
+
if x.size < deg + 3:
|
|
907
|
+
m = np.nanmedian(y) if y.size else 1.0
|
|
908
|
+
return lambda z: np.full_like(np.asarray(z, float), m)
|
|
909
|
+
|
|
910
|
+
t = (x - x.min()) / max(x.ptp(), 1e-12) * 2 - 1
|
|
911
|
+
X = _cheb_design_matrix(t, deg)
|
|
912
|
+
w = np.ones_like(y)
|
|
913
|
+
|
|
914
|
+
for _ in range(10):
|
|
915
|
+
coef, *_ = npl.lstsq(X * w[:, None], y * w, rcond=None)
|
|
916
|
+
res = y - X.dot(coef)
|
|
917
|
+
w_new = _robust_weights(res)
|
|
918
|
+
|
|
919
|
+
if np.allclose(w, w_new, atol=1e-3):
|
|
920
|
+
break
|
|
921
|
+
|
|
922
|
+
w = w_new
|
|
923
|
+
|
|
924
|
+
def P(z):
|
|
925
|
+
z = np.asarray(z, float)
|
|
926
|
+
tz = (z - x.min()) / max(x.ptp(), 1e-12) * 2 - 1
|
|
927
|
+
|
|
928
|
+
return _cheb_design_matrix(tz, deg).dot(coef)
|
|
929
|
+
|
|
930
|
+
return P
|
|
931
|
+
|
|
932
|
+
T = np.full_like(wl_std, np.nan, dtype=float)
|
|
933
|
+
|
|
934
|
+
for mband in _band_segments(wl_std):
|
|
935
|
+
base = mband & np.isfinite(fx_std) & np.isfinite(fvb_i) & (fvb_i > 0)
|
|
936
|
+
|
|
937
|
+
if base.sum() < min_run:
|
|
938
|
+
continue
|
|
939
|
+
|
|
940
|
+
mfit = base & _mask_deep_telluric_regions(wl_std) & _mask_a0v_intrinsic_lines(wl_std)
|
|
941
|
+
|
|
942
|
+
if mfit.sum() < min_run:
|
|
943
|
+
mfit = base & _mask_deep_telluric_regions(wl_std)
|
|
944
|
+
|
|
945
|
+
if mfit.sum() < max(30, deg_cont + 3):
|
|
946
|
+
mfit = base
|
|
947
|
+
|
|
948
|
+
ratio = fx_std[mfit] / np.maximum(fvb_i[mfit], 1e-20)
|
|
949
|
+
keep = np.isfinite(ratio) & (ratio > 0)
|
|
950
|
+
|
|
951
|
+
if keep.sum() < max(30, deg_cont + 3):
|
|
952
|
+
continue
|
|
953
|
+
|
|
954
|
+
w_fit = wl_std[mfit][keep]
|
|
955
|
+
r_fit = ratio[keep]
|
|
956
|
+
P = cheb_fit(w_fit, r_fit, deg_cont)
|
|
957
|
+
|
|
958
|
+
idx_band = np.where(mband)[0]
|
|
959
|
+
idx_base = idx_band[base[idx_band]]
|
|
960
|
+
|
|
961
|
+
if idx_base.size == 0:
|
|
962
|
+
continue
|
|
963
|
+
|
|
964
|
+
cont = P(wl_std[idx_base])
|
|
965
|
+
rawT = fx_std[idx_base] / np.maximum(fvb_i[idx_base] * cont, 1e-20)
|
|
966
|
+
|
|
967
|
+
runs = []
|
|
968
|
+
i = 0
|
|
969
|
+
|
|
970
|
+
while i < idx_base.size:
|
|
971
|
+
j = i
|
|
972
|
+
|
|
973
|
+
while j + 1 < idx_base.size and (idx_base[j + 1] == idx_base[j] + 1):
|
|
974
|
+
j += 1
|
|
975
|
+
|
|
976
|
+
if (j - i + 1) >= min_run:
|
|
977
|
+
runs.append((i, j))
|
|
978
|
+
i = j + 1
|
|
979
|
+
|
|
980
|
+
for a, b in runs:
|
|
981
|
+
seg_idx = idx_base[a : b + 1]
|
|
982
|
+
vals = np.clip(rawT[a : b + 1], 1e-4, 5.0)
|
|
983
|
+
v = np.log(np.clip(vals, clip_lo, clip_hi))
|
|
984
|
+
v = gaussian_filter1d(v, sigma=smooth_sigma, mode="nearest")
|
|
985
|
+
T[seg_idx] = np.exp(v)
|
|
986
|
+
|
|
987
|
+
T = np.where(np.isfinite(T), np.clip(T, clip_lo, clip_hi), np.nan)
|
|
988
|
+
|
|
989
|
+
return T
|
|
990
|
+
|
|
991
|
+
|
|
992
|
+
def apply_telluric_correction(
|
|
993
|
+
science_fits: str | Path,
|
|
994
|
+
raw_dir: str | Path,
|
|
995
|
+
std_a_id: int,
|
|
996
|
+
std_b_id: int,
|
|
997
|
+
arc_path: str | Path,
|
|
998
|
+
ref_list_path: str | Path,
|
|
999
|
+
vega_model_path: str | Path,
|
|
1000
|
+
out_dir: str | Path,
|
|
1001
|
+
*,
|
|
1002
|
+
qa_dir: str | Path | None = None,
|
|
1003
|
+
wl_range: Tuple[float, float] = (0.83, 2.45),
|
|
1004
|
+
slit_x_hint: Tuple[int, int] = (900, 1300),
|
|
1005
|
+
row_fraction: Tuple[float, float] = (0.35, 0.85),
|
|
1006
|
+
std_ap: int = 9,
|
|
1007
|
+
std_bg_in: int = 16,
|
|
1008
|
+
std_bg_out: int = 32,
|
|
1009
|
+
R_fire: float = 2000.0,
|
|
1010
|
+
T_min: float = 0.2,
|
|
1011
|
+
T_max: float = 1.2,
|
|
1012
|
+
show_plots: bool = False,
|
|
1013
|
+
) -> Path:
|
|
1014
|
+
"""
|
|
1015
|
+
Purpose:
|
|
1016
|
+
Apply telluric correction to science spectrum using A0V standard and Vega model:
|
|
1017
|
+
- Load science spectrum and standard frames (A/B) and arc
|
|
1018
|
+
- Detect slit edges and object position in standard, choose extraction column
|
|
1019
|
+
- Solve wavelength solution from arc and average across nearby columns
|
|
1020
|
+
- Extract standard spectrum (POS-only) with tuned aperture/background
|
|
1021
|
+
- Build telluric transmission on standard grid (per band)
|
|
1022
|
+
- Interpolate T to science wavelength grid and apply within valid bounds
|
|
1023
|
+
- Save corrected spectrum and QA plots
|
|
1024
|
+
Inputs:
|
|
1025
|
+
science_fits: Path to input science spectrum FITS with table
|
|
1026
|
+
(wavelength_um, flux[, flux_err])
|
|
1027
|
+
raw_dir: Directory containing raw standard frames (e.g., fire_####.fits)
|
|
1028
|
+
std_a_id: Integer ID for the A frame (e.g., 1234 for fire_1234.fits)
|
|
1029
|
+
std_b_id: Integer ID for the B frame (e.g., 1235 for fire_1235.fits)
|
|
1030
|
+
arc_path: Path to arc FITS (2D)
|
|
1031
|
+
ref_list_path: Path to reference line list (text; units parsed to microns)
|
|
1032
|
+
vega_model_path: Path to Vega model text file; wavelengths auto-converted to microns
|
|
1033
|
+
out_dir: Output directory for corrected FITS and QA
|
|
1034
|
+
qa_dir: Optional directory for QA plots (default: out_dir/qa)
|
|
1035
|
+
wl_range: Wavelength range (microns) for wavecal solution (default (0.83, 2.45))
|
|
1036
|
+
slit_x_hint: Approximate slit column bounds for edge detection (default (900, 1300))
|
|
1037
|
+
row_fraction: Fractional row range for profiles (default (0.35, 0.85))
|
|
1038
|
+
std_ap: Base half-aperture (columns) for standard extraction (default 9)
|
|
1039
|
+
std_bg_in: Base inner background offset (columns) (default 16)
|
|
1040
|
+
std_bg_out: Base outer background offset (columns) (default 32)
|
|
1041
|
+
R_fire: Resolving power used to broaden Vega (default 2000)
|
|
1042
|
+
T_min: Minimum allowed T when applying correction (default 0.2)
|
|
1043
|
+
T_max: Maximum allowed T when applying correction (default 1.2)
|
|
1044
|
+
show_plots: Show QA plots
|
|
1045
|
+
Returns:
|
|
1046
|
+
Path:
|
|
1047
|
+
Path to the output telluric-corrected FITS table saved
|
|
1048
|
+
"""
|
|
1049
|
+
|
|
1050
|
+
out_dir = Path(out_dir)
|
|
1051
|
+
qa_dir = Path(qa_dir) if qa_dir is not None else (out_dir / "qa")
|
|
1052
|
+
_ensure_dir(out_dir)
|
|
1053
|
+
_ensure_dir(qa_dir)
|
|
1054
|
+
|
|
1055
|
+
# Load science
|
|
1056
|
+
sci_wl, sci_fx, sci_err, sci_hdr = _load_table_spectrum(science_fits)
|
|
1057
|
+
sci_wl, sci_fx = _orient_to_increasing(sci_wl, sci_fx)
|
|
1058
|
+
sci_wl, sci_fx = _assert_monotonic_and_align(sci_wl, sci_fx)
|
|
1059
|
+
if sci_err is not None:
|
|
1060
|
+
sci_err = np.asarray(sci_err, float)
|
|
1061
|
+
|
|
1062
|
+
if sci_err.size == sci_wl.size:
|
|
1063
|
+
_, sci_err = _orient_to_increasing(sci_wl, sci_err)
|
|
1064
|
+
_, sci_err = _assert_monotonic_and_align(sci_wl, sci_err)
|
|
1065
|
+
|
|
1066
|
+
else:
|
|
1067
|
+
idx_err = np.linspace(0, 1, sci_err.size)
|
|
1068
|
+
idx_wl = np.linspace(0, 1, sci_wl.size)
|
|
1069
|
+
sci_err = np.interp(idx_wl, idx_err, sci_err).astype(float)
|
|
1070
|
+
|
|
1071
|
+
_plot_1d(sci_wl, sci_fx, "Science coadd (input)", qa_dir / "science_input.png", show=show_plots)
|
|
1072
|
+
|
|
1073
|
+
# Load ARC + lines + standard frames
|
|
1074
|
+
arc_data, _ = _load_fits_primary(arc_path)
|
|
1075
|
+
ref_lines = _load_line_list_to_microns(ref_list_path)
|
|
1076
|
+
|
|
1077
|
+
def build_path(num): return Path(raw_dir) / f"fire_{num:04d}.fits"
|
|
1078
|
+
|
|
1079
|
+
stdA, _ = _load_fits_primary(build_path(std_a_id))
|
|
1080
|
+
stdB, _ = _load_fits_primary(build_path(std_b_id))
|
|
1081
|
+
|
|
1082
|
+
# Slit and object positions on standard
|
|
1083
|
+
std_sub = stdA - stdB
|
|
1084
|
+
le, re, *_ = _detect_slit_edges(std_sub, x_hint=slit_x_hint, hint_expand=250, row_frac=row_fraction)
|
|
1085
|
+
|
|
1086
|
+
if (re - le) < 25:
|
|
1087
|
+
le, re = slit_x_hint
|
|
1088
|
+
|
|
1089
|
+
obj_pos_a, obj_neg_a, _, _ = _detect_objects_in_slit(stdA, le, re, row_frac=(0.40, 0.80))
|
|
1090
|
+
|
|
1091
|
+
def column_brightness(img, col, ap=5):
|
|
1092
|
+
"""
|
|
1093
|
+
Purpose:
|
|
1094
|
+
Compute median brightness metric in local vertical band around column
|
|
1095
|
+
Inputs:
|
|
1096
|
+
img: 2D image array
|
|
1097
|
+
col: Column index
|
|
1098
|
+
ap: Half-width of local column window (default 5)
|
|
1099
|
+
Returns:
|
|
1100
|
+
float:
|
|
1101
|
+
Median brightness within row band used
|
|
1102
|
+
"""
|
|
1103
|
+
|
|
1104
|
+
nrows = img.shape[0]
|
|
1105
|
+
r0 = int(0.45 * nrows)
|
|
1106
|
+
r1 = int(0.75 * nrows)
|
|
1107
|
+
c0 = max(0, col - ap)
|
|
1108
|
+
c1 = min(img.shape[1], col + ap + 1)
|
|
1109
|
+
|
|
1110
|
+
return float(np.nanmedian(img[r0:r1, c0:c1]))
|
|
1111
|
+
|
|
1112
|
+
bp = column_brightness(stdA, obj_pos_a, ap=5) if le <= obj_pos_a <= re else -np.inf
|
|
1113
|
+
bn = column_brightness(stdA, obj_neg_a, ap=5) if le <= obj_neg_a <= re else -np.inf
|
|
1114
|
+
|
|
1115
|
+
if (not np.isfinite(bp) and not np.isfinite(bn)) or abs(obj_pos_a - obj_neg_a) < 8:
|
|
1116
|
+
band_rows = stdA[int(row_fraction[0] * stdA.shape[0]) : int(row_fraction[1] * stdA.shape[0]), le:re]
|
|
1117
|
+
prof = np.median(band_rows, axis=0).astype(float)
|
|
1118
|
+
arc_col_std = le + int(np.argmax(gaussian_filter1d(prof, 3.0)))
|
|
1119
|
+
|
|
1120
|
+
else:
|
|
1121
|
+
arc_col_std = obj_pos_a if bp >= bn else obj_neg_a
|
|
1122
|
+
|
|
1123
|
+
band_rows = stdA[
|
|
1124
|
+
int(row_fraction[0] * stdA.shape[0]) : int(row_fraction[1] * stdA.shape[0]),
|
|
1125
|
+
max(le, arc_col_std - 8) : min(re, arc_col_std + 9),
|
|
1126
|
+
]
|
|
1127
|
+
|
|
1128
|
+
prof_local = np.median(band_rows, axis=0).astype(float)
|
|
1129
|
+
arc_col_std = max(le, arc_col_std - 8) + int(np.argmax(gaussian_filter1d(prof_local, 2.0)))
|
|
1130
|
+
|
|
1131
|
+
# Wavecal and POS-only extraction on standard
|
|
1132
|
+
wl_std_pix = _average_wavecal_across_cols(arc_data, arc_col_std, half=1, ref_lines_um=ref_lines, wl_range=wl_range)
|
|
1133
|
+
|
|
1134
|
+
def extract_std_tuned(img, col, ap0, bgi0, bgo0):
|
|
1135
|
+
"""
|
|
1136
|
+
Purpose:
|
|
1137
|
+
Extract robust standard star spectrum by trying multiple aperture
|
|
1138
|
+
and background settings and selecting a viable result
|
|
1139
|
+
Inputs:
|
|
1140
|
+
img: 2D image array
|
|
1141
|
+
col: Central column index
|
|
1142
|
+
ap0: Base half-aperture in columns
|
|
1143
|
+
bgi0: Base inner background offset
|
|
1144
|
+
bgo0: Base outer background offset
|
|
1145
|
+
Returns:
|
|
1146
|
+
tuple:
|
|
1147
|
+
- flux (np.ndarray): Extracted 1D flux
|
|
1148
|
+
- err (np.ndarray): Estimated per-row 1-sigma errors
|
|
1149
|
+
"""
|
|
1150
|
+
|
|
1151
|
+
ap_list = [ap0, ap0 + 2, ap0 + 4, ap0 + 6]
|
|
1152
|
+
|
|
1153
|
+
bg_list = [
|
|
1154
|
+
(bgi0, bgo0),
|
|
1155
|
+
(max(bgi0 + 4, 16), max(bgo0 + 8, 32)),
|
|
1156
|
+
(max(bgi0 + 8, 22), max(bgo0 + 14, 42)),
|
|
1157
|
+
]
|
|
1158
|
+
|
|
1159
|
+
best = (-1e30, None, None, (ap0, bgi0, bgo0))
|
|
1160
|
+
|
|
1161
|
+
for ap in ap_list:
|
|
1162
|
+
for bgi, bgo in bg_list:
|
|
1163
|
+
fx, er = _extract_cols_median_with_err(img, col, half=1, ap=ap, bg_in=bgi, bg_out=bgo)
|
|
1164
|
+
med = float(np.nanmedian(fx))
|
|
1165
|
+
|
|
1166
|
+
if med > 0:
|
|
1167
|
+
return fx, er
|
|
1168
|
+
|
|
1169
|
+
if med > best[0]:
|
|
1170
|
+
best = (med, fx, er, (ap, bgi, bgo))
|
|
1171
|
+
|
|
1172
|
+
_, fx, er, _ = best
|
|
1173
|
+
|
|
1174
|
+
return fx, er
|
|
1175
|
+
|
|
1176
|
+
std_flux, std_err = extract_std_tuned(stdA, arc_col_std, std_ap, std_bg_in, std_bg_out)
|
|
1177
|
+
|
|
1178
|
+
# Fallbacks if needed
|
|
1179
|
+
if float(np.nanmedian(std_flux)) <= 0 and le <= obj_neg_a <= re and obj_neg_a != arc_col_std:
|
|
1180
|
+
alt_fx, alt_er = extract_std_tuned(stdA, obj_neg_a, std_ap, std_bg_in, std_bg_out)
|
|
1181
|
+
if float(np.nanmedian(alt_fx)) > float(np.nanmedian(std_flux)):
|
|
1182
|
+
std_flux, std_err = alt_fx, alt_er
|
|
1183
|
+
arc_col_std = obj_neg_a
|
|
1184
|
+
|
|
1185
|
+
if std_flux is not None and std_flux.size > 11:
|
|
1186
|
+
std_flux = gaussian_filter1d(std_flux, sigma=0.5, mode="nearest")
|
|
1187
|
+
|
|
1188
|
+
wl_std_pix, std_flux = _assert_monotonic_and_align(*_orient_to_increasing(wl_std_pix, std_flux))
|
|
1189
|
+
|
|
1190
|
+
if std_err is not None:
|
|
1191
|
+
_, std_err = _assert_monotonic_and_align(*_orient_to_increasing(wl_std_pix, std_err))
|
|
1192
|
+
|
|
1193
|
+
# Load Vega model
|
|
1194
|
+
v_wl, v_fx = _load_vega_model(vega_model_path)
|
|
1195
|
+
|
|
1196
|
+
if np.nanmedian(v_fx) <= 0:
|
|
1197
|
+
v_fx = np.abs(v_fx) + 1e-6
|
|
1198
|
+
|
|
1199
|
+
# Trim ends
|
|
1200
|
+
Kstd = 6
|
|
1201
|
+
|
|
1202
|
+
if wl_std_pix.size > 2 * Kstd:
|
|
1203
|
+
wl_std = wl_std_pix[Kstd:-Kstd]
|
|
1204
|
+
std_fx = std_flux[Kstd:-Kstd]
|
|
1205
|
+
|
|
1206
|
+
else:
|
|
1207
|
+
wl_std, std_fx = wl_std_pix, std_flux
|
|
1208
|
+
|
|
1209
|
+
# Build T on native standard grid
|
|
1210
|
+
T_std = _build_T_per_band_dense(
|
|
1211
|
+
wl_std,
|
|
1212
|
+
std_fx,
|
|
1213
|
+
v_wl,
|
|
1214
|
+
v_fx,
|
|
1215
|
+
R_fire=R_fire,
|
|
1216
|
+
deg_cont=2,
|
|
1217
|
+
min_run=60,
|
|
1218
|
+
smooth_sigma=0.7,
|
|
1219
|
+
clip_lo=0.06,
|
|
1220
|
+
clip_hi=1.12,
|
|
1221
|
+
)
|
|
1222
|
+
|
|
1223
|
+
def dense_support_mask(w, y, min_run=60):
|
|
1224
|
+
"""
|
|
1225
|
+
Purpose:
|
|
1226
|
+
Identify indices where y is finite/positive in contiguous runs of at least min_run
|
|
1227
|
+
Inputs:
|
|
1228
|
+
w: 1D x-values (unused except for length consistency)
|
|
1229
|
+
y: 1D values to test for support
|
|
1230
|
+
min_run: Minimum length of contiguous run to keep (default 60)
|
|
1231
|
+
Returns:
|
|
1232
|
+
np.ndarray:
|
|
1233
|
+
Boolean mask of same shape as y indicating supported indices
|
|
1234
|
+
"""
|
|
1235
|
+
|
|
1236
|
+
w = np.asarray(w, float)
|
|
1237
|
+
y = np.asarray(y, float)
|
|
1238
|
+
m = np.isfinite(w) & np.isfinite(y) & (y > 0)
|
|
1239
|
+
keep = np.zeros_like(m, bool)
|
|
1240
|
+
i = 0
|
|
1241
|
+
n = m.size
|
|
1242
|
+
|
|
1243
|
+
while i < n:
|
|
1244
|
+
if not m[i]:
|
|
1245
|
+
i += 1
|
|
1246
|
+
continue
|
|
1247
|
+
j = i
|
|
1248
|
+
|
|
1249
|
+
while j < n and m[j]:
|
|
1250
|
+
j += 1
|
|
1251
|
+
|
|
1252
|
+
if (j - i) >= min_run:
|
|
1253
|
+
keep[i:j] = True
|
|
1254
|
+
|
|
1255
|
+
i = j
|
|
1256
|
+
|
|
1257
|
+
return keep
|
|
1258
|
+
|
|
1259
|
+
m_dense = dense_support_mask(wl_std, T_std, min_run=60)
|
|
1260
|
+
T_on_grid = np.full_like(sci_wl, np.nan, float)
|
|
1261
|
+
|
|
1262
|
+
if m_dense.sum() >= 60:
|
|
1263
|
+
wmin, wmax = wl_std[m_dense].min(), wl_std[m_dense].max()
|
|
1264
|
+
inside = (sci_wl >= wmin) & (sci_wl <= wmax)
|
|
1265
|
+
|
|
1266
|
+
if inside.any():
|
|
1267
|
+
T_on_grid[inside] = np.interp(sci_wl[inside], wl_std[m_dense], T_std[m_dense])
|
|
1268
|
+
|
|
1269
|
+
# Apply T
|
|
1270
|
+
T_safe = np.where(
|
|
1271
|
+
np.isfinite(T_on_grid) & (T_on_grid >= T_min) & (T_on_grid <= T_max),
|
|
1272
|
+
T_on_grid,
|
|
1273
|
+
np.nan,
|
|
1274
|
+
)
|
|
1275
|
+
|
|
1276
|
+
fx_corr = np.divide(
|
|
1277
|
+
sci_fx,
|
|
1278
|
+
T_safe,
|
|
1279
|
+
out=np.full_like(sci_fx, np.nan, dtype=float),
|
|
1280
|
+
where=np.isfinite(T_safe),
|
|
1281
|
+
)
|
|
1282
|
+
|
|
1283
|
+
err_corr = None
|
|
1284
|
+
|
|
1285
|
+
if sci_err is not None:
|
|
1286
|
+
err_corr = np.divide(
|
|
1287
|
+
sci_err,
|
|
1288
|
+
T_safe,
|
|
1289
|
+
out=np.full_like(sci_err, np.nan, dtype=float),
|
|
1290
|
+
where=np.isfinite(T_safe),
|
|
1291
|
+
)
|
|
1292
|
+
|
|
1293
|
+
# QA plots
|
|
1294
|
+
_plot_1d(sci_wl, sci_fx, "Science coadd (input)", qa_dir / "science_input.png", show=show_plots)
|
|
1295
|
+
mT = np.isfinite(T_on_grid)
|
|
1296
|
+
|
|
1297
|
+
if mT.sum() >= 10:
|
|
1298
|
+
_plot_1d(
|
|
1299
|
+
sci_wl[mT],
|
|
1300
|
+
T_on_grid[mT],
|
|
1301
|
+
"Telluric T(λ) — science grid",
|
|
1302
|
+
qa_dir / "T_on_science_grid.png",
|
|
1303
|
+
xlabel="Wavelength (um)",
|
|
1304
|
+
ylabel="Transmission",
|
|
1305
|
+
show=False,
|
|
1306
|
+
)
|
|
1307
|
+
m_corr = np.isfinite(fx_corr)
|
|
1308
|
+
|
|
1309
|
+
if m_corr.sum() >= 10:
|
|
1310
|
+
_plot_1d(
|
|
1311
|
+
sci_wl[m_corr],
|
|
1312
|
+
fx_corr[m_corr],
|
|
1313
|
+
"Science (telluric-corrected, masked)",
|
|
1314
|
+
qa_dir / "science_telluric_corrected.png",
|
|
1315
|
+
show=show_plots,
|
|
1316
|
+
)
|
|
1317
|
+
|
|
1318
|
+
# Save corrected FITS
|
|
1319
|
+
out_fits = out_dir / f"telluric_corrected_{Path(science_fits).stem}.fits"
|
|
1320
|
+
|
|
1321
|
+
hist = [
|
|
1322
|
+
"Telluric: POS-only standard, band-wise scaling, deep-gap masking",
|
|
1323
|
+
f"Vega broadened to R~{R_fire}, smoothing within contiguous segments only",
|
|
1324
|
+
f"Applied only where {T_min}<=T<={T_max} and within overlap",
|
|
1325
|
+
]
|
|
1326
|
+
|
|
1327
|
+
_write_spectrum_with_err(out_fits, sci_wl, fx_corr, err_corr, base_header=sci_hdr, history=hist)
|
|
1328
|
+
|
|
1329
|
+
return out_fits
|
|
1330
|
+
|
|
1331
|
+
|
|
1332
|
+
def _load_line_list_to_microns(path: str | Path) -> np.ndarray:
|
|
1333
|
+
"""
|
|
1334
|
+
Purpose:
|
|
1335
|
+
Load reference line list from text, parsing units to microns (um)
|
|
1336
|
+
Accepts tokens with explicit units (um, µm, nm, A/Ang/angstroms) or bare numbers
|
|
1337
|
+
with heuristic unit inference
|
|
1338
|
+
Inputs:
|
|
1339
|
+
path: Path-like to text file containing line wavelengths
|
|
1340
|
+
Returns:
|
|
1341
|
+
np.ndarray:
|
|
1342
|
+
Sorted array of wavelengths in microns (float), finite values only
|
|
1343
|
+
"""
|
|
1344
|
+
|
|
1345
|
+
waves = []
|
|
1346
|
+
|
|
1347
|
+
with open(path, "r") as f:
|
|
1348
|
+
for raw in f:
|
|
1349
|
+
s = raw.strip()
|
|
1350
|
+
|
|
1351
|
+
if not s or s.startswith("#"):
|
|
1352
|
+
continue
|
|
1353
|
+
|
|
1354
|
+
parts = s.replace(",", " ").split()
|
|
1355
|
+
val = None
|
|
1356
|
+
unit = None
|
|
1357
|
+
|
|
1358
|
+
for tok in parts:
|
|
1359
|
+
tl = tok.lower()
|
|
1360
|
+
try:
|
|
1361
|
+
if tl.endswith(("um", "µm", "micron", "microns")):
|
|
1362
|
+
unit = "um"
|
|
1363
|
+
num = "".join(ch for ch in tok if (ch.isdigit() or ch in ".-+eE"))
|
|
1364
|
+
val = float(num)
|
|
1365
|
+
break
|
|
1366
|
+
|
|
1367
|
+
if tl.endswith("nm"):
|
|
1368
|
+
unit = "nm"
|
|
1369
|
+
val = float(tok[:-2])
|
|
1370
|
+
break
|
|
1371
|
+
|
|
1372
|
+
if tl.endswith(("a", "ang", "angstrom", "angstroms")):
|
|
1373
|
+
unit = "A"
|
|
1374
|
+
num = "".join(ch for ch in tok if (ch.isdigit() or ch in ".-+eE"))
|
|
1375
|
+
val = float(num)
|
|
1376
|
+
break
|
|
1377
|
+
|
|
1378
|
+
val = float(tok)
|
|
1379
|
+
unit = None
|
|
1380
|
+
break
|
|
1381
|
+
|
|
1382
|
+
except Exception:
|
|
1383
|
+
continue
|
|
1384
|
+
|
|
1385
|
+
if val is None:
|
|
1386
|
+
continue
|
|
1387
|
+
|
|
1388
|
+
if unit is None:
|
|
1389
|
+
if val > 1000:
|
|
1390
|
+
unit = "A"
|
|
1391
|
+
elif 400 <= val <= 5000:
|
|
1392
|
+
unit = "nm"
|
|
1393
|
+
else:
|
|
1394
|
+
unit = "um"
|
|
1395
|
+
|
|
1396
|
+
waves.append(val if unit == "um" else val / 1000.0 if unit == "nm" else val / 1e4)
|
|
1397
|
+
|
|
1398
|
+
arr = np.array(waves, dtype=float)
|
|
1399
|
+
arr = arr[np.isfinite(arr)]
|
|
1400
|
+
|
|
1401
|
+
return np.sort(arr)
|