pyreduce-astro 0.7a4__cp314-cp314-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyreduce/__init__.py +67 -0
- pyreduce/__main__.py +322 -0
- pyreduce/cli.py +342 -0
- pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.exp +0 -0
- pyreduce/clib/Release/_slitfunc_2d.cp311-win_amd64.lib +0 -0
- pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.exp +0 -0
- pyreduce/clib/Release/_slitfunc_2d.cp312-win_amd64.lib +0 -0
- pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.exp +0 -0
- pyreduce/clib/Release/_slitfunc_2d.cp313-win_amd64.lib +0 -0
- pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.exp +0 -0
- pyreduce/clib/Release/_slitfunc_2d.cp314-win_amd64.lib +0 -0
- pyreduce/clib/Release/_slitfunc_2d.obj +0 -0
- pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.exp +0 -0
- pyreduce/clib/Release/_slitfunc_bd.cp311-win_amd64.lib +0 -0
- pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.exp +0 -0
- pyreduce/clib/Release/_slitfunc_bd.cp312-win_amd64.lib +0 -0
- pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.exp +0 -0
- pyreduce/clib/Release/_slitfunc_bd.cp313-win_amd64.lib +0 -0
- pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.exp +0 -0
- pyreduce/clib/Release/_slitfunc_bd.cp314-win_amd64.lib +0 -0
- pyreduce/clib/Release/_slitfunc_bd.obj +0 -0
- pyreduce/clib/__init__.py +0 -0
- pyreduce/clib/_slitfunc_2d.cp311-win_amd64.pyd +0 -0
- pyreduce/clib/_slitfunc_2d.cp312-win_amd64.pyd +0 -0
- pyreduce/clib/_slitfunc_2d.cp313-win_amd64.pyd +0 -0
- pyreduce/clib/_slitfunc_2d.cp314-win_amd64.pyd +0 -0
- pyreduce/clib/_slitfunc_bd.cp311-win_amd64.pyd +0 -0
- pyreduce/clib/_slitfunc_bd.cp312-win_amd64.pyd +0 -0
- pyreduce/clib/_slitfunc_bd.cp313-win_amd64.pyd +0 -0
- pyreduce/clib/_slitfunc_bd.cp314-win_amd64.pyd +0 -0
- pyreduce/clib/build_extract.py +75 -0
- pyreduce/clib/slit_func_2d_xi_zeta_bd.c +1313 -0
- pyreduce/clib/slit_func_2d_xi_zeta_bd.h +55 -0
- pyreduce/clib/slit_func_bd.c +362 -0
- pyreduce/clib/slit_func_bd.h +17 -0
- pyreduce/clipnflip.py +147 -0
- pyreduce/combine_frames.py +861 -0
- pyreduce/configuration.py +191 -0
- pyreduce/continuum_normalization.py +329 -0
- pyreduce/cwrappers.py +404 -0
- pyreduce/datasets.py +238 -0
- pyreduce/echelle.py +413 -0
- pyreduce/estimate_background_scatter.py +130 -0
- pyreduce/extract.py +1362 -0
- pyreduce/extraction_width.py +77 -0
- pyreduce/instruments/__init__.py +0 -0
- pyreduce/instruments/aj.py +9 -0
- pyreduce/instruments/aj.yaml +51 -0
- pyreduce/instruments/andes.py +102 -0
- pyreduce/instruments/andes.yaml +72 -0
- pyreduce/instruments/common.py +711 -0
- pyreduce/instruments/common.yaml +57 -0
- pyreduce/instruments/crires_plus.py +103 -0
- pyreduce/instruments/crires_plus.yaml +101 -0
- pyreduce/instruments/filters.py +195 -0
- pyreduce/instruments/harpn.py +203 -0
- pyreduce/instruments/harpn.yaml +140 -0
- pyreduce/instruments/harps.py +312 -0
- pyreduce/instruments/harps.yaml +144 -0
- pyreduce/instruments/instrument_info.py +140 -0
- pyreduce/instruments/jwst_miri.py +29 -0
- pyreduce/instruments/jwst_miri.yaml +53 -0
- pyreduce/instruments/jwst_niriss.py +98 -0
- pyreduce/instruments/jwst_niriss.yaml +60 -0
- pyreduce/instruments/lick_apf.py +35 -0
- pyreduce/instruments/lick_apf.yaml +60 -0
- pyreduce/instruments/mcdonald.py +123 -0
- pyreduce/instruments/mcdonald.yaml +56 -0
- pyreduce/instruments/metis_ifu.py +45 -0
- pyreduce/instruments/metis_ifu.yaml +62 -0
- pyreduce/instruments/metis_lss.py +45 -0
- pyreduce/instruments/metis_lss.yaml +62 -0
- pyreduce/instruments/micado.py +45 -0
- pyreduce/instruments/micado.yaml +62 -0
- pyreduce/instruments/models.py +257 -0
- pyreduce/instruments/neid.py +156 -0
- pyreduce/instruments/neid.yaml +61 -0
- pyreduce/instruments/nirspec.py +215 -0
- pyreduce/instruments/nirspec.yaml +63 -0
- pyreduce/instruments/nte.py +42 -0
- pyreduce/instruments/nte.yaml +55 -0
- pyreduce/instruments/uves.py +46 -0
- pyreduce/instruments/uves.yaml +65 -0
- pyreduce/instruments/xshooter.py +39 -0
- pyreduce/instruments/xshooter.yaml +63 -0
- pyreduce/make_shear.py +607 -0
- pyreduce/masks/mask_crires_plus_det1.fits.gz +0 -0
- pyreduce/masks/mask_crires_plus_det2.fits.gz +0 -0
- pyreduce/masks/mask_crires_plus_det3.fits.gz +0 -0
- pyreduce/masks/mask_ctio_chiron.fits.gz +0 -0
- pyreduce/masks/mask_elodie.fits.gz +0 -0
- pyreduce/masks/mask_feros3.fits.gz +0 -0
- pyreduce/masks/mask_flames_giraffe.fits.gz +0 -0
- pyreduce/masks/mask_harps_blue.fits.gz +0 -0
- pyreduce/masks/mask_harps_red.fits.gz +0 -0
- pyreduce/masks/mask_hds_blue.fits.gz +0 -0
- pyreduce/masks/mask_hds_red.fits.gz +0 -0
- pyreduce/masks/mask_het_hrs_2x5.fits.gz +0 -0
- pyreduce/masks/mask_jwst_miri_lrs_slitless.fits.gz +0 -0
- pyreduce/masks/mask_jwst_niriss_gr700xd.fits.gz +0 -0
- pyreduce/masks/mask_lick_apf_.fits.gz +0 -0
- pyreduce/masks/mask_mcdonald.fits.gz +0 -0
- pyreduce/masks/mask_nes.fits.gz +0 -0
- pyreduce/masks/mask_nirspec_nirspec.fits.gz +0 -0
- pyreduce/masks/mask_sarg.fits.gz +0 -0
- pyreduce/masks/mask_sarg_2x2a.fits.gz +0 -0
- pyreduce/masks/mask_sarg_2x2b.fits.gz +0 -0
- pyreduce/masks/mask_subaru_hds_red.fits.gz +0 -0
- pyreduce/masks/mask_uves_blue.fits.gz +0 -0
- pyreduce/masks/mask_uves_blue_binned_2_2.fits.gz +0 -0
- pyreduce/masks/mask_uves_middle.fits.gz +0 -0
- pyreduce/masks/mask_uves_middle_2x2_split.fits.gz +0 -0
- pyreduce/masks/mask_uves_middle_binned_2_2.fits.gz +0 -0
- pyreduce/masks/mask_uves_red.fits.gz +0 -0
- pyreduce/masks/mask_uves_red_2x2.fits.gz +0 -0
- pyreduce/masks/mask_uves_red_2x2_split.fits.gz +0 -0
- pyreduce/masks/mask_uves_red_binned_2_2.fits.gz +0 -0
- pyreduce/masks/mask_xshooter_nir.fits.gz +0 -0
- pyreduce/pipeline.py +619 -0
- pyreduce/rectify.py +138 -0
- pyreduce/reduce.py +2065 -0
- pyreduce/settings/settings_AJ.json +19 -0
- pyreduce/settings/settings_ANDES.json +89 -0
- pyreduce/settings/settings_CRIRES_PLUS.json +89 -0
- pyreduce/settings/settings_HARPN.json +73 -0
- pyreduce/settings/settings_HARPS.json +69 -0
- pyreduce/settings/settings_JWST_MIRI.json +55 -0
- pyreduce/settings/settings_JWST_NIRISS.json +55 -0
- pyreduce/settings/settings_LICK_APF.json +62 -0
- pyreduce/settings/settings_MCDONALD.json +58 -0
- pyreduce/settings/settings_METIS_IFU.json +77 -0
- pyreduce/settings/settings_METIS_LSS.json +77 -0
- pyreduce/settings/settings_MICADO.json +78 -0
- pyreduce/settings/settings_NEID.json +73 -0
- pyreduce/settings/settings_NIRSPEC.json +58 -0
- pyreduce/settings/settings_NTE.json +60 -0
- pyreduce/settings/settings_UVES.json +54 -0
- pyreduce/settings/settings_XSHOOTER.json +78 -0
- pyreduce/settings/settings_pyreduce.json +184 -0
- pyreduce/settings/settings_schema.json +850 -0
- pyreduce/tools/__init__.py +0 -0
- pyreduce/tools/combine.py +117 -0
- pyreduce/trace.py +979 -0
- pyreduce/util.py +1366 -0
- pyreduce/wavecal/MICADO_HK_3arcsec_chip5.npz +0 -0
- pyreduce/wavecal/atlas/thar.fits +4946 -13
- pyreduce/wavecal/atlas/thar_list.txt +4172 -0
- pyreduce/wavecal/atlas/une.fits +0 -0
- pyreduce/wavecal/convert.py +38 -0
- pyreduce/wavecal/crires_plus_J1228_Open_det1.npz +0 -0
- pyreduce/wavecal/crires_plus_J1228_Open_det2.npz +0 -0
- pyreduce/wavecal/crires_plus_J1228_Open_det3.npz +0 -0
- pyreduce/wavecal/harpn_harpn_2D.npz +0 -0
- pyreduce/wavecal/harps_blue_2D.npz +0 -0
- pyreduce/wavecal/harps_blue_pol_2D.npz +0 -0
- pyreduce/wavecal/harps_red_2D.npz +0 -0
- pyreduce/wavecal/harps_red_pol_2D.npz +0 -0
- pyreduce/wavecal/mcdonald.npz +0 -0
- pyreduce/wavecal/metis_lss_l_2D.npz +0 -0
- pyreduce/wavecal/metis_lss_m_2D.npz +0 -0
- pyreduce/wavecal/nirspec_K2.npz +0 -0
- pyreduce/wavecal/uves_blue_360nm_2D.npz +0 -0
- pyreduce/wavecal/uves_blue_390nm_2D.npz +0 -0
- pyreduce/wavecal/uves_blue_437nm_2D.npz +0 -0
- pyreduce/wavecal/uves_middle_2x2_2D.npz +0 -0
- pyreduce/wavecal/uves_middle_565nm_2D.npz +0 -0
- pyreduce/wavecal/uves_middle_580nm_2D.npz +0 -0
- pyreduce/wavecal/uves_middle_600nm_2D.npz +0 -0
- pyreduce/wavecal/uves_middle_665nm_2D.npz +0 -0
- pyreduce/wavecal/uves_middle_860nm_2D.npz +0 -0
- pyreduce/wavecal/uves_red_580nm_2D.npz +0 -0
- pyreduce/wavecal/uves_red_600nm_2D.npz +0 -0
- pyreduce/wavecal/uves_red_665nm_2D.npz +0 -0
- pyreduce/wavecal/uves_red_760nm_2D.npz +0 -0
- pyreduce/wavecal/uves_red_860nm_2D.npz +0 -0
- pyreduce/wavecal/xshooter_nir.npz +0 -0
- pyreduce/wavelength_calibration.py +1871 -0
- pyreduce_astro-0.7a4.dist-info/METADATA +106 -0
- pyreduce_astro-0.7a4.dist-info/RECORD +182 -0
- pyreduce_astro-0.7a4.dist-info/WHEEL +4 -0
- pyreduce_astro-0.7a4.dist-info/entry_points.txt +2 -0
- pyreduce_astro-0.7a4.dist-info/licenses/LICENSE +674 -0
pyreduce/util.py
ADDED
|
@@ -0,0 +1,1366 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Collection of various useful and/or reoccuring functions across PyReduce
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import os
|
|
7
|
+
import warnings
|
|
8
|
+
|
|
9
|
+
import matplotlib.pyplot as plt
|
|
10
|
+
import numpy as np
|
|
11
|
+
import scipy.constants
|
|
12
|
+
import scipy.interpolate
|
|
13
|
+
from astropy import coordinates as coord
|
|
14
|
+
from astropy import time
|
|
15
|
+
from astropy import units as u
|
|
16
|
+
from scipy.linalg import lstsq, solve_banded
|
|
17
|
+
from scipy.ndimage.filters import median_filter
|
|
18
|
+
from scipy.optimize import curve_fit, least_squares
|
|
19
|
+
from scipy.special import binom
|
|
20
|
+
|
|
21
|
+
from . import __version__
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
# Global plot directory - set by Pipeline/main() to save plots instead of showing
|
|
26
|
+
_plot_dir = os.environ.get("PYREDUCE_PLOT_DIR")
|
|
27
|
+
if _plot_dir:
|
|
28
|
+
os.makedirs(_plot_dir, exist_ok=True)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def set_plot_dir(path):
|
|
32
|
+
"""Set directory for saving plots. If None, plots will be shown interactively."""
|
|
33
|
+
global _plot_dir
|
|
34
|
+
_plot_dir = path
|
|
35
|
+
if path:
|
|
36
|
+
os.makedirs(path, exist_ok=True)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def show_or_save(name="plot"):
|
|
40
|
+
"""Show plot interactively or save to file if plot_dir is set.
|
|
41
|
+
|
|
42
|
+
Parameters
|
|
43
|
+
----------
|
|
44
|
+
name : str
|
|
45
|
+
Base name for the saved file (without extension)
|
|
46
|
+
"""
|
|
47
|
+
if _plot_dir:
|
|
48
|
+
fname = os.path.join(_plot_dir, f"{name}.png")
|
|
49
|
+
plt.savefig(fname, dpi=150, bbox_inches="tight")
|
|
50
|
+
logger.debug("Saved plot to %s", fname)
|
|
51
|
+
plt.close()
|
|
52
|
+
else:
|
|
53
|
+
plt.show()
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def plot_traces(im, traces, ax=None, imshow_kwargs=None, **line_kwargs):
|
|
57
|
+
"""Plot image with polynomial traces overlaid.
|
|
58
|
+
|
|
59
|
+
Parameters
|
|
60
|
+
----------
|
|
61
|
+
im : array[nrow, ncol]
|
|
62
|
+
2D image to display
|
|
63
|
+
traces : array or list
|
|
64
|
+
Polynomial coefficients for traces. Either a 2D array of shape
|
|
65
|
+
(n_traces, degree+1) or a list of 1D coefficient arrays.
|
|
66
|
+
Coefficients are in numpy polyval order (highest degree first).
|
|
67
|
+
ax : matplotlib.axes.Axes, optional
|
|
68
|
+
Axes to plot on. If None, creates new figure.
|
|
69
|
+
imshow_kwargs : dict, optional
|
|
70
|
+
Keyword arguments passed to imshow (e.g., vmin, vmax, cmap)
|
|
71
|
+
**line_kwargs
|
|
72
|
+
Additional keyword arguments passed to plot for trace lines
|
|
73
|
+
(e.g., color, linewidth, alpha)
|
|
74
|
+
|
|
75
|
+
Returns
|
|
76
|
+
-------
|
|
77
|
+
ax : matplotlib.axes.Axes
|
|
78
|
+
The axes with the plot
|
|
79
|
+
"""
|
|
80
|
+
if ax is None:
|
|
81
|
+
_, ax = plt.subplots()
|
|
82
|
+
|
|
83
|
+
imshow_defaults = {"origin": "lower", "aspect": "auto"}
|
|
84
|
+
if imshow_kwargs:
|
|
85
|
+
imshow_defaults.update(imshow_kwargs)
|
|
86
|
+
ax.imshow(im, **imshow_defaults)
|
|
87
|
+
|
|
88
|
+
traces = np.atleast_2d(traces)
|
|
89
|
+
x = np.arange(im.shape[1])
|
|
90
|
+
|
|
91
|
+
line_defaults = {"color": "red", "linewidth": 0.5}
|
|
92
|
+
line_defaults.update(line_kwargs)
|
|
93
|
+
|
|
94
|
+
for coef in traces:
|
|
95
|
+
y = np.polyval(coef, x)
|
|
96
|
+
ax.plot(x, y, **line_defaults)
|
|
97
|
+
|
|
98
|
+
ax.set_xlim(0, im.shape[1] - 1)
|
|
99
|
+
ax.set_ylim(0, im.shape[0] - 1)
|
|
100
|
+
|
|
101
|
+
return ax
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def resample(array, new_size):
|
|
105
|
+
x = np.arange(new_size)
|
|
106
|
+
xp = np.linspace(0, new_size, len(array))
|
|
107
|
+
return np.interp(x, xp, array)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def remove_bias(img, ihead, bias, bhead, nfiles=1):
|
|
111
|
+
if bias is not None and bhead is not None:
|
|
112
|
+
b_exptime = bhead["EXPTIME"]
|
|
113
|
+
i_exptime = ihead["EXPTIME"]
|
|
114
|
+
if b_exptime == 0 or i_exptime == 0:
|
|
115
|
+
b_exptime = 1
|
|
116
|
+
i_exptime = nfiles
|
|
117
|
+
img = img - bias * i_exptime / b_exptime
|
|
118
|
+
return img
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def in_ipynb():
|
|
122
|
+
try:
|
|
123
|
+
cfg = get_ipython().config
|
|
124
|
+
if cfg["IPKernelApp"]["parent_appname"] == "ipython-notebook":
|
|
125
|
+
return True
|
|
126
|
+
else:
|
|
127
|
+
return False
|
|
128
|
+
except NameError:
|
|
129
|
+
return False
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def log_version():
|
|
133
|
+
"""For Debug purposes"""
|
|
134
|
+
logger.debug("----------------------")
|
|
135
|
+
logger.debug("PyReduce version: %s", __version__)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def start_logging(log_file="log.log"):
|
|
139
|
+
"""Start logging to log file and command line
|
|
140
|
+
|
|
141
|
+
Parameters
|
|
142
|
+
----------
|
|
143
|
+
log_file : str, optional
|
|
144
|
+
name of the logging file (default: "log.log")
|
|
145
|
+
"""
|
|
146
|
+
|
|
147
|
+
os.makedirs(os.path.dirname(log_file), exist_ok=True)
|
|
148
|
+
|
|
149
|
+
logging.basicConfig(
|
|
150
|
+
filename=log_file,
|
|
151
|
+
level=logging.DEBUG,
|
|
152
|
+
format="%(asctime)-15s - %(levelname)s - %(name)-8s - %(message)s",
|
|
153
|
+
)
|
|
154
|
+
logging.captureWarnings(True)
|
|
155
|
+
log_version()
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def vac2air(wl_vac):
|
|
159
|
+
"""
|
|
160
|
+
Convert vacuum wavelengths to wavelengths in air
|
|
161
|
+
Author: Nikolai Piskunov
|
|
162
|
+
"""
|
|
163
|
+
wl_air = wl_vac
|
|
164
|
+
ii = np.where(wl_vac > 2e3)
|
|
165
|
+
|
|
166
|
+
sigma2 = (1e4 / wl_vac[ii]) ** 2 # Compute wavenumbers squared
|
|
167
|
+
fact = (
|
|
168
|
+
1e0
|
|
169
|
+
+ 8.34254e-5
|
|
170
|
+
+ 2.406147e-2 / (130e0 - sigma2)
|
|
171
|
+
+ 1.5998e-4 / (38.9e0 - sigma2)
|
|
172
|
+
)
|
|
173
|
+
wl_air[ii] = wl_vac[ii] / fact # Convert to air wavelength
|
|
174
|
+
return wl_air
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def air2vac(wl_air):
|
|
178
|
+
"""
|
|
179
|
+
Convert wavelengths in air to vacuum wavelength
|
|
180
|
+
Author: Nikolai Piskunov
|
|
181
|
+
"""
|
|
182
|
+
wl_vac = np.copy(wl_air)
|
|
183
|
+
ii = np.where(wl_air > 1999.352)
|
|
184
|
+
|
|
185
|
+
sigma2 = (1e4 / wl_air[ii]) ** 2 # Compute wavenumbers squared
|
|
186
|
+
fact = (
|
|
187
|
+
1e0
|
|
188
|
+
+ 8.336624212083e-5
|
|
189
|
+
+ 2.408926869968e-2 / (1.301065924522e2 - sigma2)
|
|
190
|
+
+ 1.599740894897e-4 / (3.892568793293e1 - sigma2)
|
|
191
|
+
)
|
|
192
|
+
wl_vac[ii] = wl_air[ii] * fact # Convert to vacuum wavelength
|
|
193
|
+
return wl_vac
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def swap_extension(fname, ext, path=None):
|
|
197
|
+
"""exchange the extension of the given file with a new one"""
|
|
198
|
+
if path is None:
|
|
199
|
+
path = os.path.dirname(fname)
|
|
200
|
+
nameout = os.path.basename(fname)
|
|
201
|
+
if nameout[-3:] == ".gz":
|
|
202
|
+
nameout = nameout[:-3]
|
|
203
|
+
nameout = nameout.rsplit(".", 1)[0]
|
|
204
|
+
nameout = os.path.join(path, nameout + ext)
|
|
205
|
+
return nameout
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def find_first_index(arr, value):
|
|
209
|
+
"""find the first element equal to value in the array arr"""
|
|
210
|
+
try:
|
|
211
|
+
return next(i for i, v in enumerate(arr) if v == value)
|
|
212
|
+
except StopIteration as e:
|
|
213
|
+
raise Exception(f"Value {value} not found") from e
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def interpolate_masked(masked):
|
|
217
|
+
"""Interpolate masked values, from non masked values
|
|
218
|
+
|
|
219
|
+
Parameters
|
|
220
|
+
----------
|
|
221
|
+
masked : masked_array
|
|
222
|
+
masked array to interpolate on
|
|
223
|
+
|
|
224
|
+
Returns
|
|
225
|
+
-------
|
|
226
|
+
interpolated : array
|
|
227
|
+
interpolated non masked array
|
|
228
|
+
"""
|
|
229
|
+
|
|
230
|
+
mask = np.ma.getmaskarray(masked)
|
|
231
|
+
idx = np.nonzero(~mask)[0]
|
|
232
|
+
interpol = np.interp(np.arange(len(masked)), idx, masked[idx])
|
|
233
|
+
return interpol
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def cutout_image(img, ymin, ymax, xmin, xmax):
|
|
237
|
+
"""Cut a section of an image out
|
|
238
|
+
|
|
239
|
+
Parameters
|
|
240
|
+
----------
|
|
241
|
+
img : array
|
|
242
|
+
image
|
|
243
|
+
ymin : array[ncol](int)
|
|
244
|
+
lower y value
|
|
245
|
+
ymax : array[ncol](int)
|
|
246
|
+
upper y value
|
|
247
|
+
xmin : int
|
|
248
|
+
lower x value
|
|
249
|
+
xmax : int
|
|
250
|
+
upper x value
|
|
251
|
+
|
|
252
|
+
Returns
|
|
253
|
+
-------
|
|
254
|
+
cutout : array[height, ncol]
|
|
255
|
+
selection of the image
|
|
256
|
+
"""
|
|
257
|
+
|
|
258
|
+
cutout = np.zeros((ymax[0] - ymin[0] + 1, xmax - xmin), dtype=img.dtype)
|
|
259
|
+
for i, x in enumerate(range(xmin, xmax)):
|
|
260
|
+
cutout[:, i] = img[ymin[x] : ymax[x] + 1, x]
|
|
261
|
+
return cutout
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def make_index(ymin, ymax, xmin, xmax, zero=0):
|
|
265
|
+
"""Create an index (numpy style) that will select part of an image with changing position but fixed height
|
|
266
|
+
|
|
267
|
+
The user is responsible for making sure the height is constant, otherwise it will still work, but the subsection will not have the desired format
|
|
268
|
+
|
|
269
|
+
Parameters
|
|
270
|
+
----------
|
|
271
|
+
ymin : array[ncol](int)
|
|
272
|
+
lower y border
|
|
273
|
+
ymax : array[ncol](int)
|
|
274
|
+
upper y border
|
|
275
|
+
xmin : int
|
|
276
|
+
leftmost column
|
|
277
|
+
xmax : int
|
|
278
|
+
rightmost colum
|
|
279
|
+
zero : bool, optional
|
|
280
|
+
if True count y array from 0 instead of xmin (default: False)
|
|
281
|
+
|
|
282
|
+
Returns
|
|
283
|
+
-------
|
|
284
|
+
index : tuple(array[height, width], array[height, width])
|
|
285
|
+
numpy index for the selection of a subsection of an image
|
|
286
|
+
"""
|
|
287
|
+
|
|
288
|
+
# TODO
|
|
289
|
+
# Define the indices for the pixels between two y arrays, e.g. pixels in an order
|
|
290
|
+
# in x: the rows between ymin and ymax
|
|
291
|
+
# in y: the column, but n times to match the x index
|
|
292
|
+
ymin = np.asarray(ymin, dtype=int)
|
|
293
|
+
ymax = np.asarray(ymax, dtype=int)
|
|
294
|
+
xmin = int(xmin)
|
|
295
|
+
xmax = int(xmax)
|
|
296
|
+
|
|
297
|
+
if zero:
|
|
298
|
+
zero = xmin
|
|
299
|
+
|
|
300
|
+
index_x = np.array(
|
|
301
|
+
[np.arange(ymin[col], ymax[col] + 1) for col in range(xmin - zero, xmax - zero)]
|
|
302
|
+
)
|
|
303
|
+
index_y = np.array(
|
|
304
|
+
[
|
|
305
|
+
np.full(ymax[col] - ymin[col] + 1, col)
|
|
306
|
+
for col in range(xmin - zero, xmax - zero)
|
|
307
|
+
]
|
|
308
|
+
)
|
|
309
|
+
index = index_x.T, index_y.T + zero
|
|
310
|
+
|
|
311
|
+
return index
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
def gridsearch(func, grid, args=(), kwargs=None):
|
|
315
|
+
if kwargs is None:
|
|
316
|
+
kwargs = {}
|
|
317
|
+
matrix = np.zeros(grid.shape[:-1])
|
|
318
|
+
|
|
319
|
+
for idx in np.ndindex(grid.shape[:-1]):
|
|
320
|
+
value = grid[idx]
|
|
321
|
+
print(f"Value: {value}")
|
|
322
|
+
try:
|
|
323
|
+
result = func(value, *args, **kwargs)
|
|
324
|
+
print(f"Success: {result}")
|
|
325
|
+
except Exception as e:
|
|
326
|
+
result = np.nan
|
|
327
|
+
print(f"Failed: {e}")
|
|
328
|
+
finally:
|
|
329
|
+
matrix[idx] = result
|
|
330
|
+
|
|
331
|
+
return matrix
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
def gaussfit(x, y):
|
|
335
|
+
"""
|
|
336
|
+
Fit a simple gaussian to data
|
|
337
|
+
|
|
338
|
+
gauss(x, a, mu, sigma) = a * exp(-z**2/2)
|
|
339
|
+
with z = (x - mu) / sigma
|
|
340
|
+
|
|
341
|
+
Parameters
|
|
342
|
+
----------
|
|
343
|
+
x : array(float)
|
|
344
|
+
x values
|
|
345
|
+
y : array(float)
|
|
346
|
+
y values
|
|
347
|
+
Returns
|
|
348
|
+
-------
|
|
349
|
+
gauss(x), parameters
|
|
350
|
+
fitted values for x, fit paramters (a, mu, sigma)
|
|
351
|
+
"""
|
|
352
|
+
|
|
353
|
+
def gauss(x, A0, A1, A2):
|
|
354
|
+
return A0 * np.exp(-(((x - A1) / A2) ** 2) / 2)
|
|
355
|
+
|
|
356
|
+
popt, _ = curve_fit(gauss, x, y, p0=[max(y), 0, 1])
|
|
357
|
+
return gauss(x, *popt), popt
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
def gaussfit2(x, y):
|
|
361
|
+
"""Fit a gaussian(normal) curve to data x, y
|
|
362
|
+
|
|
363
|
+
gauss = A * exp(-(x-mu)**2/(2*sig**2)) + offset
|
|
364
|
+
|
|
365
|
+
Parameters
|
|
366
|
+
----------
|
|
367
|
+
x : array[n]
|
|
368
|
+
x values
|
|
369
|
+
y : array[n]
|
|
370
|
+
y values
|
|
371
|
+
|
|
372
|
+
Returns
|
|
373
|
+
-------
|
|
374
|
+
popt : array[4]
|
|
375
|
+
coefficients of the gaussian: A, mu, sigma**2, offset
|
|
376
|
+
"""
|
|
377
|
+
|
|
378
|
+
gauss = gaussval2
|
|
379
|
+
|
|
380
|
+
x = np.ma.compressed(x)
|
|
381
|
+
y = np.ma.compressed(y)
|
|
382
|
+
|
|
383
|
+
if len(x) == 0 or len(y) == 0:
|
|
384
|
+
raise ValueError("All values masked")
|
|
385
|
+
|
|
386
|
+
if len(x) != len(y):
|
|
387
|
+
raise ValueError("The masks of x and y are different")
|
|
388
|
+
|
|
389
|
+
# Find the peak in the center of the image
|
|
390
|
+
weights = np.ones(len(y), dtype=y.dtype)
|
|
391
|
+
midpoint = len(y) // 2
|
|
392
|
+
weights[:midpoint] = np.linspace(0, 1, midpoint, dtype=weights.dtype)
|
|
393
|
+
weights[midpoint:] = np.linspace(1, 0, len(y) - midpoint, dtype=weights.dtype)
|
|
394
|
+
|
|
395
|
+
i = np.argmax(y * weights)
|
|
396
|
+
p0 = [y[i], x[i], 1]
|
|
397
|
+
with warnings.catch_warnings():
|
|
398
|
+
warnings.simplefilter("ignore")
|
|
399
|
+
res = least_squares(
|
|
400
|
+
lambda c: gauss(x, *c, np.ma.min(y)) - y,
|
|
401
|
+
p0,
|
|
402
|
+
loss="soft_l1",
|
|
403
|
+
bounds=(
|
|
404
|
+
[min(np.ma.mean(y), y[i]), np.ma.min(x), 0],
|
|
405
|
+
[np.ma.max(y) * 1.5, np.ma.max(x), len(x) / 2],
|
|
406
|
+
),
|
|
407
|
+
)
|
|
408
|
+
popt = list(res.x) + [np.min(y)]
|
|
409
|
+
return popt
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
def gaussfit3(x, y):
|
|
413
|
+
"""A very simple (and relatively fast) gaussian fit
|
|
414
|
+
gauss = A * exp(-(x-mu)**2/(2*sig**2)) + offset
|
|
415
|
+
|
|
416
|
+
Parameters
|
|
417
|
+
----------
|
|
418
|
+
x : array of shape (n,)
|
|
419
|
+
x data
|
|
420
|
+
y : array of shape (n,)
|
|
421
|
+
y data
|
|
422
|
+
|
|
423
|
+
Returns
|
|
424
|
+
-------
|
|
425
|
+
popt : list of shape (4,)
|
|
426
|
+
Parameters A, mu, sigma**2, offset
|
|
427
|
+
"""
|
|
428
|
+
mask = np.ma.getmaskarray(x) | np.ma.getmaskarray(y)
|
|
429
|
+
x, y = x[~mask], y[~mask]
|
|
430
|
+
|
|
431
|
+
gauss = gaussval2
|
|
432
|
+
i = np.argmax(y[len(y) // 4 : len(y) * 3 // 4]) + len(y) // 4
|
|
433
|
+
p0 = [y[i], x[i], 1, np.min(y)]
|
|
434
|
+
|
|
435
|
+
with warnings.catch_warnings():
|
|
436
|
+
warnings.simplefilter("ignore")
|
|
437
|
+
popt, _ = curve_fit(gauss, x, y, p0=p0)
|
|
438
|
+
|
|
439
|
+
return popt
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
def gaussfit4(x, y):
|
|
443
|
+
"""A very simple (and relatively fast) gaussian fit
|
|
444
|
+
gauss = A * exp(-(x-mu)**2/(2*sig**2)) + offset
|
|
445
|
+
|
|
446
|
+
Assumes x is sorted
|
|
447
|
+
|
|
448
|
+
Parameters
|
|
449
|
+
----------
|
|
450
|
+
x : array of shape (n,)
|
|
451
|
+
x data
|
|
452
|
+
y : array of shape (n,)
|
|
453
|
+
y data
|
|
454
|
+
|
|
455
|
+
Returns
|
|
456
|
+
-------
|
|
457
|
+
popt : list of shape (4,)
|
|
458
|
+
Parameters A, mu, sigma**2, offset
|
|
459
|
+
"""
|
|
460
|
+
gauss = gaussval2
|
|
461
|
+
x = np.ma.compressed(x)
|
|
462
|
+
y = np.ma.compressed(y)
|
|
463
|
+
i = np.argmax(y)
|
|
464
|
+
p0 = [y[i], x[i], 1, np.min(y)]
|
|
465
|
+
|
|
466
|
+
with warnings.catch_warnings():
|
|
467
|
+
warnings.simplefilter("ignore")
|
|
468
|
+
popt, _ = curve_fit(gauss, x, y, p0=p0)
|
|
469
|
+
|
|
470
|
+
return popt
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
def gaussfit_linear(x, y):
|
|
474
|
+
"""Transform the gaussian fit into a linear least squares problem, and solve that instead of the non-linear curve fit
|
|
475
|
+
For efficiency reasons. (roughly 10 times faster than the curve fit)
|
|
476
|
+
|
|
477
|
+
Parameters
|
|
478
|
+
----------
|
|
479
|
+
x : array of shape (n,)
|
|
480
|
+
x data
|
|
481
|
+
y : array of shape (n,)
|
|
482
|
+
y data
|
|
483
|
+
|
|
484
|
+
Returns
|
|
485
|
+
-------
|
|
486
|
+
coef : tuple
|
|
487
|
+
a, mu, sig, 0
|
|
488
|
+
"""
|
|
489
|
+
x = x[y > 0]
|
|
490
|
+
y = y[y > 0]
|
|
491
|
+
|
|
492
|
+
offset = np.min(y)
|
|
493
|
+
y = y - offset + 1e-12
|
|
494
|
+
|
|
495
|
+
weights = y
|
|
496
|
+
|
|
497
|
+
d = np.log(y)
|
|
498
|
+
G = np.ones((x.size, 3), dtype=np.float)
|
|
499
|
+
G[:, 0] = x**2
|
|
500
|
+
G[:, 1] = x
|
|
501
|
+
|
|
502
|
+
beta, _, _, _ = np.linalg.lstsq((G.T * weights**2).T, d * weights**2, rcond=None)
|
|
503
|
+
|
|
504
|
+
a = np.exp(beta[2] - beta[1] ** 2 / (4 * beta[0]))
|
|
505
|
+
sig = -1 / (2 * beta[0])
|
|
506
|
+
mu = -beta[1] / (2 * beta[0])
|
|
507
|
+
|
|
508
|
+
return a, mu, sig, offset
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
def gaussval2(x, a, mu, sig, const):
|
|
512
|
+
return a * np.exp(-((x - mu) ** 2) / (2 * sig)) + const
|
|
513
|
+
|
|
514
|
+
|
|
515
|
+
def gaussbroad(x, y, hwhm):
|
|
516
|
+
"""
|
|
517
|
+
Apply gaussian broadening to x, y data with half width half maximum hwhm
|
|
518
|
+
|
|
519
|
+
Parameters
|
|
520
|
+
----------
|
|
521
|
+
x : array(float)
|
|
522
|
+
x values
|
|
523
|
+
y : array(float)
|
|
524
|
+
y values
|
|
525
|
+
hwhm : float > 0
|
|
526
|
+
half width half maximum
|
|
527
|
+
Returns
|
|
528
|
+
-------
|
|
529
|
+
array(float)
|
|
530
|
+
broadened y values
|
|
531
|
+
"""
|
|
532
|
+
|
|
533
|
+
# alternatively use:
|
|
534
|
+
# from scipy.ndimage.filters import gaussian_filter1d as gaussbroad
|
|
535
|
+
# but that doesn't have an x coordinate
|
|
536
|
+
|
|
537
|
+
nw = len(x)
|
|
538
|
+
dw = (x[-1] - x[0]) / (len(x) - 1)
|
|
539
|
+
|
|
540
|
+
if hwhm > 5 * (x[-1] - x[0]):
|
|
541
|
+
return np.full(len(x), sum(y) / len(x))
|
|
542
|
+
|
|
543
|
+
nhalf = int(3.3972872 * hwhm / dw)
|
|
544
|
+
ng = 2 * nhalf + 1 # points in gaussian (odd!)
|
|
545
|
+
# wavelength scale of gaussian
|
|
546
|
+
wg = dw * (np.arange(0, ng, 1, dtype=float) - (ng - 1) / 2)
|
|
547
|
+
xg = (0.83255461 / hwhm) * wg # convenient absisca
|
|
548
|
+
gpro = (0.46974832 * dw / hwhm) * np.exp(-xg * xg) # unit area gaussian w/ FWHM
|
|
549
|
+
gpro = gpro / np.sum(gpro)
|
|
550
|
+
|
|
551
|
+
# Pad spectrum ends to minimize impact of Fourier ringing.
|
|
552
|
+
npad = nhalf + 2 # pad pixels on each end
|
|
553
|
+
spad = np.concatenate((np.full(npad, y[0]), y, np.full(npad, y[-1])))
|
|
554
|
+
|
|
555
|
+
# Convolve and trim.
|
|
556
|
+
sout = np.convolve(spad, gpro) # convolve with gaussian
|
|
557
|
+
sout = sout[npad : npad + nw] # trim to original data / length
|
|
558
|
+
return sout # return broadened spectrum.
|
|
559
|
+
|
|
560
|
+
|
|
561
|
+
def polyfit1d(x, y, degree=1, regularization=0):
|
|
562
|
+
idx = np.arange(degree + 1)
|
|
563
|
+
coeff = np.zeros(degree + 1)
|
|
564
|
+
|
|
565
|
+
A = np.array([np.power(x, i) for i in idx], dtype=float).T
|
|
566
|
+
b = y.ravel()
|
|
567
|
+
|
|
568
|
+
L = np.array([regularization * i**2 for i in idx])
|
|
569
|
+
inv_matrix = np.linalg.inv(A.T @ A + np.diag(L))
|
|
570
|
+
coeff = inv_matrix @ A.T @ b
|
|
571
|
+
|
|
572
|
+
coeff = coeff[::-1]
|
|
573
|
+
|
|
574
|
+
return coeff
|
|
575
|
+
|
|
576
|
+
|
|
577
|
+
def _get_coeff_idx(coeff):
|
|
578
|
+
idx = np.indices(coeff.shape)
|
|
579
|
+
idx = idx.T.swapaxes(0, 1).reshape((-1, 2))
|
|
580
|
+
# degree = coeff.shape
|
|
581
|
+
# idx = [[i, j] for i, j in product(range(degree[0]), range(degree[1]))]
|
|
582
|
+
# idx = np.asarray(idx)
|
|
583
|
+
return idx
|
|
584
|
+
|
|
585
|
+
|
|
586
|
+
def _scale(x, y):
|
|
587
|
+
# Normalize x and y to avoid huge numbers
|
|
588
|
+
# Mean 0, Variation 1
|
|
589
|
+
offset_x, offset_y = np.mean(x), np.mean(y)
|
|
590
|
+
norm_x, norm_y = np.std(x), np.std(y)
|
|
591
|
+
if norm_x == 0:
|
|
592
|
+
norm_x = 1
|
|
593
|
+
if norm_y == 0:
|
|
594
|
+
norm_y = 1
|
|
595
|
+
x = (x - offset_x) / norm_x
|
|
596
|
+
y = (y - offset_y) / norm_y
|
|
597
|
+
return x, y, (norm_x, norm_y), (offset_x, offset_y)
|
|
598
|
+
|
|
599
|
+
|
|
600
|
+
def _unscale(x, y, norm, offset):
|
|
601
|
+
x = x * norm[0] + offset[0]
|
|
602
|
+
y = y * norm[1] + offset[1]
|
|
603
|
+
return x, y
|
|
604
|
+
|
|
605
|
+
|
|
606
|
+
def polyvander2d(x, y, degree):
|
|
607
|
+
# A = np.array([x ** i * y ** j for i, j in idx], dtype=float).T
|
|
608
|
+
A = np.polynomial.polynomial.polyvander2d(x, y, degree)
|
|
609
|
+
return A
|
|
610
|
+
|
|
611
|
+
|
|
612
|
+
def polyscale2d(coeff, scale_x, scale_y, copy=True):
|
|
613
|
+
if copy:
|
|
614
|
+
coeff = np.copy(coeff)
|
|
615
|
+
idx = _get_coeff_idx(coeff)
|
|
616
|
+
for _k, (i, j) in enumerate(idx):
|
|
617
|
+
coeff[i, j] /= scale_x**i * scale_y**j
|
|
618
|
+
return coeff
|
|
619
|
+
|
|
620
|
+
|
|
621
|
+
def polyshift2d(coeff, offset_x, offset_y, copy=True):
|
|
622
|
+
if copy:
|
|
623
|
+
coeff = np.copy(coeff)
|
|
624
|
+
idx = _get_coeff_idx(coeff)
|
|
625
|
+
# Copy coeff because it changes during the loop
|
|
626
|
+
coeff2 = np.copy(coeff)
|
|
627
|
+
for k, m in idx:
|
|
628
|
+
not_the_same = ~((idx[:, 0] == k) & (idx[:, 1] == m))
|
|
629
|
+
above = (idx[:, 0] >= k) & (idx[:, 1] >= m) & not_the_same
|
|
630
|
+
for i, j in idx[above]:
|
|
631
|
+
b = binom(i, k) * binom(j, m)
|
|
632
|
+
sign = (-1) ** ((i - k) + (j - m))
|
|
633
|
+
offset = offset_x ** (i - k) * offset_y ** (j - m)
|
|
634
|
+
coeff[k, m] += sign * b * coeff2[i, j] * offset
|
|
635
|
+
return coeff
|
|
636
|
+
|
|
637
|
+
|
|
638
|
+
def plot2d(x, y, z, coeff, title=None):
|
|
639
|
+
# regular grid covering the domain of the data
|
|
640
|
+
if x.size > 500:
|
|
641
|
+
choice = np.random.choice(x.size, size=500, replace=False)
|
|
642
|
+
else:
|
|
643
|
+
choice = slice(None, None, None)
|
|
644
|
+
x, y, z = x[choice], y[choice], z[choice]
|
|
645
|
+
X, Y = np.meshgrid(
|
|
646
|
+
np.linspace(np.min(x), np.max(x), 20), np.linspace(np.min(y), np.max(y), 20)
|
|
647
|
+
)
|
|
648
|
+
Z = np.polynomial.polynomial.polyval2d(X, Y, coeff)
|
|
649
|
+
fig = plt.figure()
|
|
650
|
+
ax = fig.add_subplot(111, projection="3d")
|
|
651
|
+
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=0.2)
|
|
652
|
+
ax.scatter(x, y, z, c="r", s=50)
|
|
653
|
+
plt.xlabel("X")
|
|
654
|
+
plt.ylabel("Y")
|
|
655
|
+
ax.set_zlabel("Z")
|
|
656
|
+
if title is not None:
|
|
657
|
+
plt.title(title)
|
|
658
|
+
# ax.axis("equal")
|
|
659
|
+
# ax.axis("tight")
|
|
660
|
+
show_or_save("polyfit2d")
|
|
661
|
+
|
|
662
|
+
|
|
663
|
+
def polyfit2d(
|
|
664
|
+
x, y, z, degree=1, max_degree=None, scale=True, plot=False, plot_title=None
|
|
665
|
+
):
|
|
666
|
+
"""A simple 2D plynomial fit to data x, y, z
|
|
667
|
+
The polynomial can be evaluated with numpy.polynomial.polynomial.polyval2d
|
|
668
|
+
|
|
669
|
+
Parameters
|
|
670
|
+
----------
|
|
671
|
+
x : array[n]
|
|
672
|
+
x coordinates
|
|
673
|
+
y : array[n]
|
|
674
|
+
y coordinates
|
|
675
|
+
z : array[n]
|
|
676
|
+
data values
|
|
677
|
+
degree : int, optional
|
|
678
|
+
degree of the polynomial fit (default: 1)
|
|
679
|
+
max_degree : {int, None}, optional
|
|
680
|
+
if given the maximum combined degree of the coefficients is limited to this value
|
|
681
|
+
scale : bool, optional
|
|
682
|
+
Wether to scale the input arrays x and y to mean 0 and variance 1, to avoid numerical overflows.
|
|
683
|
+
Especially useful at higher degrees. (default: True)
|
|
684
|
+
plot : bool, optional
|
|
685
|
+
wether to plot the fitted surface and data (slow) (default: False)
|
|
686
|
+
|
|
687
|
+
Returns
|
|
688
|
+
-------
|
|
689
|
+
coeff : array[degree+1, degree+1]
|
|
690
|
+
the polynomial coefficients in numpy 2d format, i.e. coeff[i, j] for x**i * y**j
|
|
691
|
+
"""
|
|
692
|
+
# Flatten input
|
|
693
|
+
x = np.asarray(x).ravel()
|
|
694
|
+
y = np.asarray(y).ravel()
|
|
695
|
+
z = np.asarray(z).ravel()
|
|
696
|
+
|
|
697
|
+
# Removed masked values
|
|
698
|
+
mask = ~(np.ma.getmask(z) | np.ma.getmask(x) | np.ma.getmask(y))
|
|
699
|
+
x, y, z = x[mask].ravel(), y[mask].ravel(), z[mask].ravel()
|
|
700
|
+
|
|
701
|
+
if scale:
|
|
702
|
+
x, y, norm, offset = _scale(x, y)
|
|
703
|
+
|
|
704
|
+
# Create combinations of degree of x and y
|
|
705
|
+
# usually: [(0, 0), (1, 0), (0, 1), (1, 1), (2, 0), ....]
|
|
706
|
+
if np.isscalar(degree):
|
|
707
|
+
degree = (int(degree), int(degree))
|
|
708
|
+
assert len(degree) == 2, "Only 2D polynomials can be fitted"
|
|
709
|
+
degree = [int(degree[0]), int(degree[1])]
|
|
710
|
+
# idx = [[i, j] for i, j in product(range(degree[0] + 1), range(degree[1] + 1))]
|
|
711
|
+
coeff = np.zeros((degree[0] + 1, degree[1] + 1))
|
|
712
|
+
idx = _get_coeff_idx(coeff)
|
|
713
|
+
|
|
714
|
+
# Calculate elements 1, x, y, x*y, x**2, y**2, ...
|
|
715
|
+
A = polyvander2d(x, y, degree)
|
|
716
|
+
|
|
717
|
+
# We only want the combinations with maximum order COMBINED power
|
|
718
|
+
if max_degree is not None:
|
|
719
|
+
mask = idx[:, 0] + idx[:, 1] <= int(max_degree)
|
|
720
|
+
idx = idx[mask]
|
|
721
|
+
A = A[:, mask]
|
|
722
|
+
|
|
723
|
+
# Do least squares fit
|
|
724
|
+
C, *_ = lstsq(A, z)
|
|
725
|
+
|
|
726
|
+
# Reorder coefficients into numpy compatible 2d array
|
|
727
|
+
for k, (i, j) in enumerate(idx):
|
|
728
|
+
coeff[i, j] = C[k]
|
|
729
|
+
|
|
730
|
+
# # Backup copy of coeff
|
|
731
|
+
if scale:
|
|
732
|
+
coeff = polyscale2d(coeff, *norm, copy=False)
|
|
733
|
+
coeff = polyshift2d(coeff, *offset, copy=False)
|
|
734
|
+
|
|
735
|
+
if plot: # pragma: no cover
|
|
736
|
+
if scale:
|
|
737
|
+
x, y = _unscale(x, y, norm, offset)
|
|
738
|
+
plot2d(x, y, z, coeff, title=plot_title)
|
|
739
|
+
|
|
740
|
+
return coeff
|
|
741
|
+
|
|
742
|
+
|
|
743
|
+
def polyfit2d_2(x, y, z, degree=1, x0=None, loss="arctan", method="trf", plot=False):
|
|
744
|
+
x = x.ravel()
|
|
745
|
+
y = y.ravel()
|
|
746
|
+
z = z.ravel()
|
|
747
|
+
|
|
748
|
+
if np.isscalar(degree):
|
|
749
|
+
degree_x = degree_y = degree + 1
|
|
750
|
+
else:
|
|
751
|
+
degree_x = degree[0] + 1
|
|
752
|
+
degree_y = degree[1] + 1
|
|
753
|
+
|
|
754
|
+
polyval2d = np.polynomial.polynomial.polyval2d
|
|
755
|
+
|
|
756
|
+
def func(c):
|
|
757
|
+
c = c.reshape(degree_x, degree_y)
|
|
758
|
+
value = polyval2d(x, y, c)
|
|
759
|
+
return value - z
|
|
760
|
+
|
|
761
|
+
if x0 is None:
|
|
762
|
+
x0 = np.zeros(degree_x * degree_y)
|
|
763
|
+
else:
|
|
764
|
+
x0 = x0.ravel()
|
|
765
|
+
|
|
766
|
+
res = least_squares(func, x0, loss=loss, method=method)
|
|
767
|
+
coef = res.x
|
|
768
|
+
coef = coef.reshape(degree_x, degree_y)
|
|
769
|
+
|
|
770
|
+
if plot: # pragma: no cover
|
|
771
|
+
# regular grid covering the domain of the data
|
|
772
|
+
if x.size > 500:
|
|
773
|
+
choice = np.random.choice(x.size, size=500, replace=False)
|
|
774
|
+
else:
|
|
775
|
+
choice = slice(None, None, None)
|
|
776
|
+
x, y, z = x[choice], y[choice], z[choice]
|
|
777
|
+
X, Y = np.meshgrid(
|
|
778
|
+
np.linspace(np.min(x), np.max(x), 20), np.linspace(np.min(y), np.max(y), 20)
|
|
779
|
+
)
|
|
780
|
+
Z = np.polynomial.polynomial.polyval2d(X, Y, coef)
|
|
781
|
+
fig = plt.figure()
|
|
782
|
+
ax = fig.add_subplot(111, projection="3d")
|
|
783
|
+
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=0.2)
|
|
784
|
+
ax.scatter(x, y, z, c="r", s=50)
|
|
785
|
+
plt.xlabel("X")
|
|
786
|
+
plt.ylabel("Y")
|
|
787
|
+
ax.set_zlabel("Z")
|
|
788
|
+
ax.axis("equal")
|
|
789
|
+
ax.axis("tight")
|
|
790
|
+
show_or_save("polyfit2d_2")
|
|
791
|
+
return coef
|
|
792
|
+
|
|
793
|
+
|
|
794
|
+
def bezier_interp(x_old, y_old, x_new):
|
|
795
|
+
"""
|
|
796
|
+
Bezier interpolation, based on the scipy methods
|
|
797
|
+
|
|
798
|
+
This mostly sanitizes the input by removing masked values and duplicate entries
|
|
799
|
+
Note that in case of duplicate entries (in x_old) the results are not well defined as only one of the entries is used and the other is discarded
|
|
800
|
+
|
|
801
|
+
Parameters
|
|
802
|
+
----------
|
|
803
|
+
x_old : array[n]
|
|
804
|
+
old x values
|
|
805
|
+
y_old : array[n]
|
|
806
|
+
old y values
|
|
807
|
+
x_new : array[m]
|
|
808
|
+
new x values
|
|
809
|
+
|
|
810
|
+
Returns
|
|
811
|
+
-------
|
|
812
|
+
y_new : array[m]
|
|
813
|
+
new y values
|
|
814
|
+
"""
|
|
815
|
+
|
|
816
|
+
# Handle masked arrays
|
|
817
|
+
if np.ma.is_masked(x_old):
|
|
818
|
+
x_old = np.ma.compressed(x_old)
|
|
819
|
+
y_old = np.ma.compressed(y_old)
|
|
820
|
+
|
|
821
|
+
# avoid duplicate entries in x
|
|
822
|
+
assert x_old.size == y_old.size
|
|
823
|
+
x_old, index = np.unique(x_old, return_index=True)
|
|
824
|
+
y_old = y_old[index]
|
|
825
|
+
|
|
826
|
+
knots, coef, order = scipy.interpolate.splrep(x_old, y_old, s=0)
|
|
827
|
+
y_new = scipy.interpolate.BSpline(knots, coef, order)(x_new)
|
|
828
|
+
return y_new
|
|
829
|
+
|
|
830
|
+
|
|
831
|
+
def safe_interpolation(x_old, y_old, x_new=None, fill_value=0):
|
|
832
|
+
"""
|
|
833
|
+
'Safe' interpolation method that should avoid
|
|
834
|
+
the common pitfalls of spline interpolation
|
|
835
|
+
|
|
836
|
+
masked arrays are compressed, i.e. only non masked entries are used
|
|
837
|
+
remove NaN input in x_old and y_old
|
|
838
|
+
only unique x values are used, corresponding y values are 'random'
|
|
839
|
+
if all else fails, revert to linear interpolation
|
|
840
|
+
|
|
841
|
+
Parameters
|
|
842
|
+
----------
|
|
843
|
+
x_old : array of size (n,)
|
|
844
|
+
x values of the data
|
|
845
|
+
y_old : array of size (n,)
|
|
846
|
+
y values of the data
|
|
847
|
+
x_new : array of size (m, ) or None, optional
|
|
848
|
+
x values of the interpolated values
|
|
849
|
+
if None will return the interpolator object
|
|
850
|
+
(default: None)
|
|
851
|
+
|
|
852
|
+
Returns
|
|
853
|
+
-------
|
|
854
|
+
y_new: array of size (m, ) or interpolator
|
|
855
|
+
if x_new was given, return the interpolated values
|
|
856
|
+
otherwise return the interpolator object
|
|
857
|
+
"""
|
|
858
|
+
|
|
859
|
+
# Handle masked arrays
|
|
860
|
+
if np.ma.is_masked(x_old):
|
|
861
|
+
x_old = np.ma.compressed(x_old)
|
|
862
|
+
y_old = np.ma.compressed(y_old)
|
|
863
|
+
|
|
864
|
+
mask = np.isfinite(x_old) & np.isfinite(y_old)
|
|
865
|
+
x_old = x_old[mask]
|
|
866
|
+
y_old = y_old[mask]
|
|
867
|
+
|
|
868
|
+
# avoid duplicate entries in x
|
|
869
|
+
# also sorts data, which allows us to use assume_sorted below
|
|
870
|
+
x_old, index = np.unique(x_old, return_index=True)
|
|
871
|
+
y_old = y_old[index]
|
|
872
|
+
|
|
873
|
+
try:
|
|
874
|
+
interpolator = scipy.interpolate.interp1d(
|
|
875
|
+
x_old,
|
|
876
|
+
y_old,
|
|
877
|
+
kind="cubic",
|
|
878
|
+
fill_value=fill_value,
|
|
879
|
+
bounds_error=False,
|
|
880
|
+
assume_sorted=True,
|
|
881
|
+
)
|
|
882
|
+
except ValueError:
|
|
883
|
+
logging.warning(
|
|
884
|
+
"Could not instantiate cubic spline interpolation, using linear instead"
|
|
885
|
+
)
|
|
886
|
+
interpolator = scipy.interpolate.interp1d(
|
|
887
|
+
x_old,
|
|
888
|
+
y_old,
|
|
889
|
+
kind="linear",
|
|
890
|
+
fill_value=fill_value,
|
|
891
|
+
bounds_error=False,
|
|
892
|
+
assume_sorted=True,
|
|
893
|
+
)
|
|
894
|
+
|
|
895
|
+
if x_new is not None:
|
|
896
|
+
return interpolator(x_new)
|
|
897
|
+
else:
|
|
898
|
+
return interpolator
|
|
899
|
+
|
|
900
|
+
|
|
901
|
+
def bottom(f, order=1, iterations=40, eps=0.001, poly=False, weight=1, **kwargs):
|
|
902
|
+
"""
|
|
903
|
+
bottom tries to fit a smooth curve to the lower envelope
|
|
904
|
+
of 1D data array f. Filter size "filter"
|
|
905
|
+
together with the total number of iterations determine
|
|
906
|
+
the smoothness and the quality of the fit. The total
|
|
907
|
+
number of iterations can be controlled by limiting the
|
|
908
|
+
maximum number of iterations (iter) and/or by setting
|
|
909
|
+
the convergence criterion for the fit (eps)
|
|
910
|
+
04-Nov-2000 N.Piskunov wrote.
|
|
911
|
+
09-Nov-2011 NP added weights and 2nd derivative constraint as LAM2
|
|
912
|
+
|
|
913
|
+
Parameters
|
|
914
|
+
----------
|
|
915
|
+
f : Callable
|
|
916
|
+
Function to fit
|
|
917
|
+
filter : int
|
|
918
|
+
Smoothing parameter of the optimal filter (or polynomial degree of poly is True)
|
|
919
|
+
iter : int
|
|
920
|
+
maximum number of iterations [def: 40]
|
|
921
|
+
eps : float
|
|
922
|
+
convergence level [def: 0.001]
|
|
923
|
+
mn : float
|
|
924
|
+
minimum function values to be considered [def: min(f)]
|
|
925
|
+
mx : float
|
|
926
|
+
maximum function values to be considered [def: max(f)]
|
|
927
|
+
lam2 : float
|
|
928
|
+
constraint on 2nd derivative
|
|
929
|
+
weight : array(float)
|
|
930
|
+
vector of weights.
|
|
931
|
+
"""
|
|
932
|
+
|
|
933
|
+
mn = kwargs.get("min", np.min(f))
|
|
934
|
+
mx = kwargs.get("max", np.max(f))
|
|
935
|
+
lambda2 = kwargs.get("lambda2", -1)
|
|
936
|
+
|
|
937
|
+
if poly:
|
|
938
|
+
j = np.where((f >= mn) & (f <= mx))
|
|
939
|
+
xx = np.linspace(-1, 1, num=len(f))
|
|
940
|
+
fmin = np.min(f[j]) - 1
|
|
941
|
+
fmax = np.max(f[j]) + 1
|
|
942
|
+
ff = (f[j] - fmin) / (fmax - fmin)
|
|
943
|
+
ff_old = np.copy(ff)
|
|
944
|
+
else:
|
|
945
|
+
fff = middle(
|
|
946
|
+
f, order, iterations=iterations, eps=eps, weight=weight, lambda2=lambda2
|
|
947
|
+
)
|
|
948
|
+
fmin = min(f) - 1
|
|
949
|
+
fmax = max(f) + 1
|
|
950
|
+
fff = (fff - fmin) / (fmax - fmin)
|
|
951
|
+
ff = (f - fmin) / (fmax - fmin) / fff
|
|
952
|
+
ff_old = np.copy(ff)
|
|
953
|
+
|
|
954
|
+
for _ in range(iterations):
|
|
955
|
+
if poly:
|
|
956
|
+
if order > 0: # this is a bug in rsi poly routine
|
|
957
|
+
t = median_filter(np.polyval(np.polyfit(xx, ff, order), xx), 3)
|
|
958
|
+
t = np.clip(t - ff, 0, None) ** 2
|
|
959
|
+
tmp = np.polyval(np.polyfit(xx, t, order), xx)
|
|
960
|
+
dev = np.sqrt(np.nan_to_num(tmp))
|
|
961
|
+
else:
|
|
962
|
+
t = np.tile(np.polyfit(xx, ff, order), len(f))
|
|
963
|
+
t = np.polyfit(xx, np.clip(t - ff, 0, None) ** 2, order)
|
|
964
|
+
t = np.tile(t, len(f))
|
|
965
|
+
dev = np.nan_to_num(t)
|
|
966
|
+
dev = np.sqrt(t)
|
|
967
|
+
else:
|
|
968
|
+
t = median_filter(opt_filter(ff, order, weight=weight, lambda2=lambda2), 3)
|
|
969
|
+
dev = np.sqrt(
|
|
970
|
+
opt_filter(
|
|
971
|
+
np.clip(weight * (t - ff), 0, None),
|
|
972
|
+
order,
|
|
973
|
+
weight=weight,
|
|
974
|
+
lambda2=lambda2,
|
|
975
|
+
)
|
|
976
|
+
)
|
|
977
|
+
ff = np.clip(
|
|
978
|
+
np.clip(t - dev, ff, None), None, t
|
|
979
|
+
) # the order matters, t dominates
|
|
980
|
+
dev2 = np.max(weight * np.abs(ff - ff_old))
|
|
981
|
+
ff_old = ff
|
|
982
|
+
if dev2 <= eps:
|
|
983
|
+
break
|
|
984
|
+
|
|
985
|
+
if poly:
|
|
986
|
+
if order > 0: # this is a bug in rsi poly routine
|
|
987
|
+
t = median_filter(np.polyval(np.polyfit(xx, ff, order), xx), 3)
|
|
988
|
+
else:
|
|
989
|
+
t = np.tile(np.polyfit(xx, ff, order), len(f))
|
|
990
|
+
return t * (fmax - fmin) + fmin
|
|
991
|
+
else:
|
|
992
|
+
return t * fff * (fmax - fmin) + fmin
|
|
993
|
+
|
|
994
|
+
|
|
995
|
+
def middle(
|
|
996
|
+
f,
|
|
997
|
+
param,
|
|
998
|
+
x=None,
|
|
999
|
+
iterations=40,
|
|
1000
|
+
eps=0.001,
|
|
1001
|
+
poly=False,
|
|
1002
|
+
weight=1,
|
|
1003
|
+
lambda2=-1,
|
|
1004
|
+
mn=None,
|
|
1005
|
+
mx=None,
|
|
1006
|
+
):
|
|
1007
|
+
"""
|
|
1008
|
+
middle tries to fit a smooth curve that is located
|
|
1009
|
+
along the "middle" of 1D data array f. Filter size "filter"
|
|
1010
|
+
together with the total number of iterations determine
|
|
1011
|
+
the smoothness and the quality of the fit. The total
|
|
1012
|
+
number of iterations can be controlled by limiting the
|
|
1013
|
+
maximum number of iterations (iter) and/or by setting
|
|
1014
|
+
the convergence criterion for the fit (eps)
|
|
1015
|
+
04-Nov-2000 N.Piskunov wrote.
|
|
1016
|
+
09-Nov-2011 NP added weights and 2nd derivative constraint as LAM2
|
|
1017
|
+
|
|
1018
|
+
Parameters
|
|
1019
|
+
----------
|
|
1020
|
+
f : Callable
|
|
1021
|
+
Function to fit
|
|
1022
|
+
filter : int
|
|
1023
|
+
Smoothing parameter of the optimal filter (or polynomial degree of poly is True)
|
|
1024
|
+
iter : int
|
|
1025
|
+
maximum number of iterations [def: 40]
|
|
1026
|
+
eps : float
|
|
1027
|
+
convergence level [def: 0.001]
|
|
1028
|
+
mn : float
|
|
1029
|
+
minimum function values to be considered [def: min(f)]
|
|
1030
|
+
mx : float
|
|
1031
|
+
maximum function values to be considered [def: max(f)]
|
|
1032
|
+
lam2 : float
|
|
1033
|
+
constraint on 2nd derivative
|
|
1034
|
+
weight : array(float)
|
|
1035
|
+
vector of weights.
|
|
1036
|
+
"""
|
|
1037
|
+
mn = mn if mn is not None else np.min(f)
|
|
1038
|
+
mx = mx if mx is not None else np.max(f)
|
|
1039
|
+
|
|
1040
|
+
f = np.asarray(f)
|
|
1041
|
+
|
|
1042
|
+
if x is None:
|
|
1043
|
+
xx = np.linspace(-1, 1, num=f.size)
|
|
1044
|
+
else:
|
|
1045
|
+
xx = np.asarray(x)
|
|
1046
|
+
|
|
1047
|
+
if poly:
|
|
1048
|
+
j = (f >= mn) & (f <= mx)
|
|
1049
|
+
n = np.count_nonzero(j)
|
|
1050
|
+
if n <= round(param):
|
|
1051
|
+
return f
|
|
1052
|
+
|
|
1053
|
+
fmin = np.min(f[j]) - 1
|
|
1054
|
+
fmax = np.max(f[j]) + 1
|
|
1055
|
+
ff = (f[j] - fmin) / (fmax - fmin)
|
|
1056
|
+
ff_old = ff
|
|
1057
|
+
else:
|
|
1058
|
+
fmin = np.min(f) - 1
|
|
1059
|
+
fmax = np.max(f) + 1
|
|
1060
|
+
ff = (f - fmin) / (fmax - fmin)
|
|
1061
|
+
ff_old = ff
|
|
1062
|
+
n = len(f)
|
|
1063
|
+
|
|
1064
|
+
for _ in range(iterations):
|
|
1065
|
+
if poly:
|
|
1066
|
+
param = round(param)
|
|
1067
|
+
if param > 0:
|
|
1068
|
+
t = median_filter(np.polyval(np.polyfit(xx, ff, param), xx), 3)
|
|
1069
|
+
tmp = np.polyval(np.polyfit(xx, (t - ff) ** 2, param), xx)
|
|
1070
|
+
else:
|
|
1071
|
+
t = np.tile(np.polyfit(xx, ff, param), len(f))
|
|
1072
|
+
tmp = np.tile(np.polyfit(xx, (t - ff) ** 2, param), len(f))
|
|
1073
|
+
else:
|
|
1074
|
+
t = median_filter(opt_filter(ff, param, weight=weight, lambda2=lambda2), 3)
|
|
1075
|
+
tmp = opt_filter(
|
|
1076
|
+
weight * (t - ff) ** 2, param, weight=weight, lambda2=lambda2
|
|
1077
|
+
)
|
|
1078
|
+
|
|
1079
|
+
dev = np.sqrt(np.clip(tmp, 0, None))
|
|
1080
|
+
ff = np.clip(t - dev, ff, t + dev)
|
|
1081
|
+
dev2 = np.max(weight * np.abs(ff - ff_old))
|
|
1082
|
+
ff_old = ff
|
|
1083
|
+
|
|
1084
|
+
# print(dev2)
|
|
1085
|
+
if dev2 <= eps:
|
|
1086
|
+
break
|
|
1087
|
+
|
|
1088
|
+
if poly:
|
|
1089
|
+
xx = np.linspace(-1, 1, len(f))
|
|
1090
|
+
if param > 0:
|
|
1091
|
+
t = median_filter(np.polyval(np.polyfit(xx, ff, param), xx), 3)
|
|
1092
|
+
else:
|
|
1093
|
+
t = np.tile(np.polyfit(xx, ff, param), len(f))
|
|
1094
|
+
|
|
1095
|
+
return t * (fmax - fmin) + fmin
|
|
1096
|
+
|
|
1097
|
+
|
|
1098
|
+
def top(
|
|
1099
|
+
f,
|
|
1100
|
+
order=1,
|
|
1101
|
+
iterations=40,
|
|
1102
|
+
eps=0.001,
|
|
1103
|
+
poly=False,
|
|
1104
|
+
weight=1,
|
|
1105
|
+
lambda2=-1,
|
|
1106
|
+
mn=None,
|
|
1107
|
+
mx=None,
|
|
1108
|
+
):
|
|
1109
|
+
"""
|
|
1110
|
+
top tries to fit a smooth curve to the upper envelope
|
|
1111
|
+
of 1D data array f. Filter size "filter"
|
|
1112
|
+
together with the total number of iterations determine
|
|
1113
|
+
the smoothness and the quality of the fit. The total
|
|
1114
|
+
number of iterations can be controlled by limiting the
|
|
1115
|
+
maximum number of iterations (iter) and/or by setting
|
|
1116
|
+
the convergence criterion for the fit (eps)
|
|
1117
|
+
04-Nov-2000 N.Piskunov wrote.
|
|
1118
|
+
09-Nov-2011 NP added weights and 2nd derivative constraint as LAM2
|
|
1119
|
+
|
|
1120
|
+
Parameters
|
|
1121
|
+
----------
|
|
1122
|
+
f : Callable
|
|
1123
|
+
Function to fit
|
|
1124
|
+
filter : int
|
|
1125
|
+
Smoothing parameter of the optimal filter (or polynomial degree of poly is True)
|
|
1126
|
+
iter : int
|
|
1127
|
+
maximum number of iterations [def: 40]
|
|
1128
|
+
eps : float
|
|
1129
|
+
convergence level [def: 0.001]
|
|
1130
|
+
mn : float
|
|
1131
|
+
minimum function values to be considered [def: min(f)]
|
|
1132
|
+
mx : float
|
|
1133
|
+
maximum function values to be considered [def: max(f)]
|
|
1134
|
+
lam2 : float
|
|
1135
|
+
constraint on 2nd derivative
|
|
1136
|
+
weight : array(float)
|
|
1137
|
+
vector of weights.
|
|
1138
|
+
"""
|
|
1139
|
+
mn = mn if mn is not None else np.min(f)
|
|
1140
|
+
mx = mx if mx is not None else np.max(f)
|
|
1141
|
+
|
|
1142
|
+
f = np.asarray(f)
|
|
1143
|
+
xx = np.linspace(-1, 1, num=f.size)
|
|
1144
|
+
|
|
1145
|
+
if poly:
|
|
1146
|
+
j = (f >= mn) & (f <= mx)
|
|
1147
|
+
if np.count_nonzero(j) <= round(order):
|
|
1148
|
+
raise ValueError("Not enough points")
|
|
1149
|
+
fmin = np.min(f[j]) - 1
|
|
1150
|
+
fmax = np.max(f[j]) + 1
|
|
1151
|
+
ff = (f - fmin) / (fmax - fmin)
|
|
1152
|
+
ff_old = ff
|
|
1153
|
+
else:
|
|
1154
|
+
fff = middle(
|
|
1155
|
+
f, order, iterations=iterations, eps=eps, weight=weight, lambda2=lambda2
|
|
1156
|
+
)
|
|
1157
|
+
fmin = np.min(f) - 1
|
|
1158
|
+
fmax = np.max(f) + 1
|
|
1159
|
+
fff = (fff - fmin) / (fmax - fmin)
|
|
1160
|
+
ff = (f - fmin) / (fmax - fmin) / fff
|
|
1161
|
+
ff_old = ff
|
|
1162
|
+
|
|
1163
|
+
for _ in range(iterations):
|
|
1164
|
+
order = round(order)
|
|
1165
|
+
if poly:
|
|
1166
|
+
t = median_filter(np.polyval(np.polyfit(xx, ff, order), xx), 3)
|
|
1167
|
+
tmp = np.polyval(np.polyfit(xx, np.clip(ff - t, 0, None) ** 2, order), xx)
|
|
1168
|
+
dev = np.sqrt(np.clip(tmp, 0, None))
|
|
1169
|
+
else:
|
|
1170
|
+
t = median_filter(opt_filter(ff, order, weight=weight, lambda2=lambda2), 3)
|
|
1171
|
+
tmp = opt_filter(
|
|
1172
|
+
np.clip(weight * (ff - t), 0, None),
|
|
1173
|
+
order,
|
|
1174
|
+
weight=weight,
|
|
1175
|
+
lambda2=lambda2,
|
|
1176
|
+
)
|
|
1177
|
+
dev = np.sqrt(np.clip(tmp, 0, None))
|
|
1178
|
+
|
|
1179
|
+
ff = np.clip(t - eps, ff, t + dev * 3)
|
|
1180
|
+
dev2 = np.max(weight * np.abs(ff - ff_old))
|
|
1181
|
+
ff_old = ff
|
|
1182
|
+
if dev2 <= eps:
|
|
1183
|
+
break
|
|
1184
|
+
|
|
1185
|
+
if poly:
|
|
1186
|
+
t = median_filter(np.polyval(np.polyfit(xx, ff, order), xx), 3)
|
|
1187
|
+
return t * (fmax - fmin) + fmin
|
|
1188
|
+
else:
|
|
1189
|
+
return t * fff * (fmax - fmin) + fmin
|
|
1190
|
+
|
|
1191
|
+
|
|
1192
|
+
def opt_filter(y, par, par1=None, weight=None, lambda2=-1, maxiter=100):
|
|
1193
|
+
"""
|
|
1194
|
+
Optimal filtering of 1D and 2D arrays.
|
|
1195
|
+
Uses tridiag in 1D case and sprsin and linbcg in 2D case.
|
|
1196
|
+
Written by N.Piskunov 8-May-2000
|
|
1197
|
+
|
|
1198
|
+
Parameters
|
|
1199
|
+
----------
|
|
1200
|
+
f : array
|
|
1201
|
+
1d or 2d array
|
|
1202
|
+
xwidth : int
|
|
1203
|
+
filter width (for 2d array width in x direction (1st index)
|
|
1204
|
+
ywidth : int
|
|
1205
|
+
(for 2d array only) filter width in y direction (2nd index) if ywidth is missing for 2d array, it set equal to xwidth
|
|
1206
|
+
weight : array(float)
|
|
1207
|
+
an array of the same size(s) as f containing values between 0 and 1
|
|
1208
|
+
lambda1: float
|
|
1209
|
+
regularization parameter
|
|
1210
|
+
maxiter : int
|
|
1211
|
+
maximum number of iteration for filtering of 2d array
|
|
1212
|
+
"""
|
|
1213
|
+
|
|
1214
|
+
y = np.asarray(y)
|
|
1215
|
+
|
|
1216
|
+
if y.ndim not in [1, 2]:
|
|
1217
|
+
raise ValueError("Input y must have 1 or 2 dimensions")
|
|
1218
|
+
|
|
1219
|
+
if par < 1:
|
|
1220
|
+
par = 1
|
|
1221
|
+
|
|
1222
|
+
# 1D case
|
|
1223
|
+
if y.ndim == 1 or (y.ndim == 2 and (y.shape[0] == 1 or y.shape[1] == 1)):
|
|
1224
|
+
y = y.ravel()
|
|
1225
|
+
n = y.size
|
|
1226
|
+
|
|
1227
|
+
if weight is None:
|
|
1228
|
+
weight = np.ones(n)
|
|
1229
|
+
elif np.isscalar(weight):
|
|
1230
|
+
weight = np.full(n, weight)
|
|
1231
|
+
else:
|
|
1232
|
+
weight = weight[:n]
|
|
1233
|
+
|
|
1234
|
+
if lambda2 > 0:
|
|
1235
|
+
# Apply regularization lambda
|
|
1236
|
+
aij = np.zeros((5, n))
|
|
1237
|
+
# 2nd lower subdiagonal
|
|
1238
|
+
aij[0, 2:] = lambda2
|
|
1239
|
+
# Lower subdiagonal
|
|
1240
|
+
aij[1, 1] = -par - 2 * lambda2
|
|
1241
|
+
aij[1, 2:-1] = -par - 4 * lambda2
|
|
1242
|
+
aij[1, -1] = -par - 2 * lambda2
|
|
1243
|
+
# Main diagonal
|
|
1244
|
+
aij[2, 0] = weight[0] + par + lambda2
|
|
1245
|
+
aij[2, 1] = weight[1] + 2 * par + 5 * lambda2
|
|
1246
|
+
aij[2, 2:-2] = weight[2:-2] + 2 * par + 6 * lambda2
|
|
1247
|
+
aij[2, -2] = weight[-2] + 2 * par + 5 * lambda2
|
|
1248
|
+
aij[2, -1] = weight[-1] + par + lambda2
|
|
1249
|
+
# Upper subdiagonal
|
|
1250
|
+
aij[3, 0] = -par - 2 * lambda2
|
|
1251
|
+
aij[3, 1:-2] = -par - 4 * lambda2
|
|
1252
|
+
aij[3, -2] = -par - 2 * lambda2
|
|
1253
|
+
# 2nd lower subdiagonal
|
|
1254
|
+
aij[4, 0:-2] = lambda2
|
|
1255
|
+
# RHS
|
|
1256
|
+
b = weight * y
|
|
1257
|
+
|
|
1258
|
+
f = solve_banded((2, 2), aij, b)
|
|
1259
|
+
else:
|
|
1260
|
+
a = np.full(n, -abs(par))
|
|
1261
|
+
b = np.copy(weight) + abs(par)
|
|
1262
|
+
b[1:-1] += abs(par)
|
|
1263
|
+
aba = np.array([a, b, a])
|
|
1264
|
+
|
|
1265
|
+
f = solve_banded((1, 1), aba, weight * y)
|
|
1266
|
+
|
|
1267
|
+
return f
|
|
1268
|
+
else:
|
|
1269
|
+
# 2D case
|
|
1270
|
+
if par1 is None:
|
|
1271
|
+
par1 = par
|
|
1272
|
+
if par == 0 and par1 == 0:
|
|
1273
|
+
raise ValueError("xwidth and ywidth can't both be 0")
|
|
1274
|
+
n = y.size
|
|
1275
|
+
nx, ny = y.shape
|
|
1276
|
+
|
|
1277
|
+
lam_x = abs(par)
|
|
1278
|
+
lam_y = abs(par1)
|
|
1279
|
+
|
|
1280
|
+
n = nx * ny
|
|
1281
|
+
ndiag = 2 * nx + 1
|
|
1282
|
+
aij = np.zeros((n, ndiag))
|
|
1283
|
+
aij[nx, 0] = weight[0, 0] + lam_x + lam_y
|
|
1284
|
+
aij[nx, 1:nx] = weight[0, 1:nx] + 2 * lam_x + lam_y
|
|
1285
|
+
aij[nx, nx : n - nx] = weight[1 : ny - 1] + 2 * (lam_x + lam_y)
|
|
1286
|
+
aij[nx, n - nx : n - 1] = weight[ny - 1, 0 : nx - 1] + 2 * lam_x + lam_y
|
|
1287
|
+
aij[nx, n - 1] = weight[ny - 1, nx - 1] + lam_x + lam_y
|
|
1288
|
+
|
|
1289
|
+
aij[nx - 1, 1:n] = -lam_x
|
|
1290
|
+
aij[nx + 1, 0 : n - 1] = -lam_x
|
|
1291
|
+
|
|
1292
|
+
ind = np.arrange(ny - 1) * nx + nx + nx * n
|
|
1293
|
+
aij[ind - 1] = aij[ind - 1] - lam_x
|
|
1294
|
+
aij[ind] = aij[ind] - lam_x
|
|
1295
|
+
|
|
1296
|
+
ind = np.arrange(ny - 1) * nx + nx
|
|
1297
|
+
aij[nx + 1, ind - 1] = 0
|
|
1298
|
+
aij[nx - 1, ind] = 0
|
|
1299
|
+
|
|
1300
|
+
aij[0, nx:n] = -lam_y
|
|
1301
|
+
aij[ndiag - 1, 0 : n - nx] = -lam_y
|
|
1302
|
+
|
|
1303
|
+
rhs = f * weight
|
|
1304
|
+
|
|
1305
|
+
model = solve_banded((nx, nx), aij, rhs)
|
|
1306
|
+
model = np.reshape(model, (ny, nx))
|
|
1307
|
+
return model
|
|
1308
|
+
|
|
1309
|
+
|
|
1310
|
+
def helcorr(obs_long, obs_lat, obs_alt, ra2000, dec2000, jd, system="barycentric"):
|
|
1311
|
+
"""
|
|
1312
|
+
calculates heliocentric Julian date, barycentric and heliocentric radial
|
|
1313
|
+
velocity corrections, using astropy functions
|
|
1314
|
+
|
|
1315
|
+
Parameters
|
|
1316
|
+
---------
|
|
1317
|
+
obs_long : float
|
|
1318
|
+
Longitude of observatory (degrees, western direction is positive)
|
|
1319
|
+
obs_lat : float
|
|
1320
|
+
Latitude of observatory (degrees)
|
|
1321
|
+
obs_alt : float
|
|
1322
|
+
Altitude of observatory (meters)
|
|
1323
|
+
ra2000 : float
|
|
1324
|
+
Right ascension of object for epoch 2000.0 (hours)
|
|
1325
|
+
dec2000 : float
|
|
1326
|
+
Declination of object for epoch 2000.0 (degrees)
|
|
1327
|
+
jd : float
|
|
1328
|
+
Julian date for the middle of exposure in MJD
|
|
1329
|
+
system : {"barycentric", "heliocentric"}, optional
|
|
1330
|
+
reference system of the result, barycentric: around earth-sun gravity center,
|
|
1331
|
+
heliocentric: around sun, usually barycentric is preferred (default: "barycentric)
|
|
1332
|
+
|
|
1333
|
+
Returns
|
|
1334
|
+
-------
|
|
1335
|
+
correction : float
|
|
1336
|
+
radial velocity correction due to barycentre offset
|
|
1337
|
+
hjd : float
|
|
1338
|
+
Heliocentric Julian date for middle of exposure
|
|
1339
|
+
"""
|
|
1340
|
+
|
|
1341
|
+
# jd = 2400000.5 + jd
|
|
1342
|
+
jd = time.Time(jd, format="mjd")
|
|
1343
|
+
|
|
1344
|
+
ra = coord.Longitude(ra2000, unit=u.hour)
|
|
1345
|
+
dec = coord.Latitude(dec2000, unit=u.degree)
|
|
1346
|
+
|
|
1347
|
+
observatory = coord.EarthLocation.from_geodetic(obs_long, obs_lat, height=obs_alt)
|
|
1348
|
+
sky_location = coord.SkyCoord(ra, dec, obstime=jd, location=observatory)
|
|
1349
|
+
times = time.Time(jd, location=observatory)
|
|
1350
|
+
|
|
1351
|
+
if system == "barycentric":
|
|
1352
|
+
correction = sky_location.radial_velocity_correction().to(u.km / u.s).value
|
|
1353
|
+
ltt = times.light_travel_time(sky_location)
|
|
1354
|
+
elif system == "heliocentric":
|
|
1355
|
+
correction = (
|
|
1356
|
+
sky_location.radial_velocity_correction("heliocentric").to(u.km / u.s).value
|
|
1357
|
+
)
|
|
1358
|
+
ltt = times.light_travel_time(sky_location, "heliocentric")
|
|
1359
|
+
else:
|
|
1360
|
+
raise AttributeError(
|
|
1361
|
+
"Could not parse system, values are: ('barycentric', 'heliocentric')"
|
|
1362
|
+
)
|
|
1363
|
+
|
|
1364
|
+
times = (times.utc + ltt).value - 2400000
|
|
1365
|
+
|
|
1366
|
+
return -correction, times
|