pyreduce-astro 0.6.0b5__cp313-cp313-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (156) hide show
  1. pyreduce/__init__.py +67 -0
  2. pyreduce/__main__.py +106 -0
  3. pyreduce/clib/__init__.py +0 -0
  4. pyreduce/clib/_slitfunc_2d.cpython-311-darwin.so +0 -0
  5. pyreduce/clib/_slitfunc_2d.cpython-312-darwin.so +0 -0
  6. pyreduce/clib/_slitfunc_2d.cpython-313-darwin.so +0 -0
  7. pyreduce/clib/_slitfunc_bd.cpython-311-darwin.so +0 -0
  8. pyreduce/clib/_slitfunc_bd.cpython-312-darwin.so +0 -0
  9. pyreduce/clib/_slitfunc_bd.cpython-313-darwin.so +0 -0
  10. pyreduce/clib/build_extract.py +75 -0
  11. pyreduce/clib/slit_func_2d_xi_zeta_bd.c +1313 -0
  12. pyreduce/clib/slit_func_2d_xi_zeta_bd.h +55 -0
  13. pyreduce/clib/slit_func_bd.c +362 -0
  14. pyreduce/clib/slit_func_bd.h +17 -0
  15. pyreduce/clipnflip.py +147 -0
  16. pyreduce/combine_frames.py +855 -0
  17. pyreduce/configuration.py +186 -0
  18. pyreduce/continuum_normalization.py +329 -0
  19. pyreduce/cwrappers.py +404 -0
  20. pyreduce/datasets.py +231 -0
  21. pyreduce/echelle.py +413 -0
  22. pyreduce/estimate_background_scatter.py +129 -0
  23. pyreduce/extract.py +1361 -0
  24. pyreduce/extraction_width.py +77 -0
  25. pyreduce/instruments/__init__.py +0 -0
  26. pyreduce/instruments/andes.json +61 -0
  27. pyreduce/instruments/andes.py +102 -0
  28. pyreduce/instruments/common.json +46 -0
  29. pyreduce/instruments/common.py +675 -0
  30. pyreduce/instruments/crires_plus.json +63 -0
  31. pyreduce/instruments/crires_plus.py +103 -0
  32. pyreduce/instruments/filters.py +195 -0
  33. pyreduce/instruments/harpn.json +136 -0
  34. pyreduce/instruments/harpn.py +201 -0
  35. pyreduce/instruments/harps.json +155 -0
  36. pyreduce/instruments/harps.py +310 -0
  37. pyreduce/instruments/instrument_info.py +140 -0
  38. pyreduce/instruments/instrument_schema.json +221 -0
  39. pyreduce/instruments/jwst_miri.json +53 -0
  40. pyreduce/instruments/jwst_miri.py +29 -0
  41. pyreduce/instruments/jwst_niriss.json +52 -0
  42. pyreduce/instruments/jwst_niriss.py +98 -0
  43. pyreduce/instruments/lick_apf.json +53 -0
  44. pyreduce/instruments/lick_apf.py +35 -0
  45. pyreduce/instruments/mcdonald.json +59 -0
  46. pyreduce/instruments/mcdonald.py +123 -0
  47. pyreduce/instruments/metis_ifu.json +63 -0
  48. pyreduce/instruments/metis_ifu.py +45 -0
  49. pyreduce/instruments/metis_lss.json +65 -0
  50. pyreduce/instruments/metis_lss.py +45 -0
  51. pyreduce/instruments/micado.json +53 -0
  52. pyreduce/instruments/micado.py +45 -0
  53. pyreduce/instruments/neid.json +51 -0
  54. pyreduce/instruments/neid.py +154 -0
  55. pyreduce/instruments/nirspec.json +56 -0
  56. pyreduce/instruments/nirspec.py +215 -0
  57. pyreduce/instruments/nte.json +47 -0
  58. pyreduce/instruments/nte.py +42 -0
  59. pyreduce/instruments/uves.json +59 -0
  60. pyreduce/instruments/uves.py +46 -0
  61. pyreduce/instruments/xshooter.json +66 -0
  62. pyreduce/instruments/xshooter.py +39 -0
  63. pyreduce/make_shear.py +606 -0
  64. pyreduce/masks/mask_crires_plus_det1.fits.gz +0 -0
  65. pyreduce/masks/mask_crires_plus_det2.fits.gz +0 -0
  66. pyreduce/masks/mask_crires_plus_det3.fits.gz +0 -0
  67. pyreduce/masks/mask_ctio_chiron.fits.gz +0 -0
  68. pyreduce/masks/mask_elodie.fits.gz +0 -0
  69. pyreduce/masks/mask_feros3.fits.gz +0 -0
  70. pyreduce/masks/mask_flames_giraffe.fits.gz +0 -0
  71. pyreduce/masks/mask_harps_blue.fits.gz +0 -0
  72. pyreduce/masks/mask_harps_red.fits.gz +0 -0
  73. pyreduce/masks/mask_hds_blue.fits.gz +0 -0
  74. pyreduce/masks/mask_hds_red.fits.gz +0 -0
  75. pyreduce/masks/mask_het_hrs_2x5.fits.gz +0 -0
  76. pyreduce/masks/mask_jwst_miri_lrs_slitless.fits.gz +0 -0
  77. pyreduce/masks/mask_jwst_niriss_gr700xd.fits.gz +0 -0
  78. pyreduce/masks/mask_lick_apf_.fits.gz +0 -0
  79. pyreduce/masks/mask_mcdonald.fits.gz +0 -0
  80. pyreduce/masks/mask_nes.fits.gz +0 -0
  81. pyreduce/masks/mask_nirspec_nirspec.fits.gz +0 -0
  82. pyreduce/masks/mask_sarg.fits.gz +0 -0
  83. pyreduce/masks/mask_sarg_2x2a.fits.gz +0 -0
  84. pyreduce/masks/mask_sarg_2x2b.fits.gz +0 -0
  85. pyreduce/masks/mask_subaru_hds_red.fits.gz +0 -0
  86. pyreduce/masks/mask_uves_blue.fits.gz +0 -0
  87. pyreduce/masks/mask_uves_blue_binned_2_2.fits.gz +0 -0
  88. pyreduce/masks/mask_uves_middle.fits.gz +0 -0
  89. pyreduce/masks/mask_uves_middle_2x2_split.fits.gz +0 -0
  90. pyreduce/masks/mask_uves_middle_binned_2_2.fits.gz +0 -0
  91. pyreduce/masks/mask_uves_red.fits.gz +0 -0
  92. pyreduce/masks/mask_uves_red_2x2.fits.gz +0 -0
  93. pyreduce/masks/mask_uves_red_2x2_split.fits.gz +0 -0
  94. pyreduce/masks/mask_uves_red_binned_2_2.fits.gz +0 -0
  95. pyreduce/masks/mask_xshooter_nir.fits.gz +0 -0
  96. pyreduce/rectify.py +138 -0
  97. pyreduce/reduce.py +2205 -0
  98. pyreduce/settings/settings_ANDES.json +89 -0
  99. pyreduce/settings/settings_CRIRES_PLUS.json +89 -0
  100. pyreduce/settings/settings_HARPN.json +73 -0
  101. pyreduce/settings/settings_HARPS.json +69 -0
  102. pyreduce/settings/settings_JWST_MIRI.json +55 -0
  103. pyreduce/settings/settings_JWST_NIRISS.json +55 -0
  104. pyreduce/settings/settings_LICK_APF.json +62 -0
  105. pyreduce/settings/settings_MCDONALD.json +58 -0
  106. pyreduce/settings/settings_METIS_IFU.json +77 -0
  107. pyreduce/settings/settings_METIS_LSS.json +77 -0
  108. pyreduce/settings/settings_MICADO.json +78 -0
  109. pyreduce/settings/settings_NEID.json +73 -0
  110. pyreduce/settings/settings_NIRSPEC.json +58 -0
  111. pyreduce/settings/settings_NTE.json +60 -0
  112. pyreduce/settings/settings_UVES.json +54 -0
  113. pyreduce/settings/settings_XSHOOTER.json +78 -0
  114. pyreduce/settings/settings_pyreduce.json +178 -0
  115. pyreduce/settings/settings_schema.json +827 -0
  116. pyreduce/tools/__init__.py +0 -0
  117. pyreduce/tools/combine.py +117 -0
  118. pyreduce/trace_orders.py +645 -0
  119. pyreduce/util.py +1288 -0
  120. pyreduce/wavecal/MICADO_HK_3arcsec_chip5.npz +0 -0
  121. pyreduce/wavecal/atlas/thar.fits +4946 -13
  122. pyreduce/wavecal/atlas/thar_list.txt +4172 -0
  123. pyreduce/wavecal/atlas/une.fits +0 -0
  124. pyreduce/wavecal/convert.py +38 -0
  125. pyreduce/wavecal/crires_plus_J1228_Open_det1.npz +0 -0
  126. pyreduce/wavecal/crires_plus_J1228_Open_det2.npz +0 -0
  127. pyreduce/wavecal/crires_plus_J1228_Open_det3.npz +0 -0
  128. pyreduce/wavecal/harpn_harpn_2D.npz +0 -0
  129. pyreduce/wavecal/harps_blue_2D.npz +0 -0
  130. pyreduce/wavecal/harps_blue_pol_2D.npz +0 -0
  131. pyreduce/wavecal/harps_red_2D.npz +0 -0
  132. pyreduce/wavecal/harps_red_pol_2D.npz +0 -0
  133. pyreduce/wavecal/mcdonald.npz +0 -0
  134. pyreduce/wavecal/metis_lss_l_2D.npz +0 -0
  135. pyreduce/wavecal/metis_lss_m_2D.npz +0 -0
  136. pyreduce/wavecal/nirspec_K2.npz +0 -0
  137. pyreduce/wavecal/uves_blue_360nm_2D.npz +0 -0
  138. pyreduce/wavecal/uves_blue_390nm_2D.npz +0 -0
  139. pyreduce/wavecal/uves_blue_437nm_2D.npz +0 -0
  140. pyreduce/wavecal/uves_middle_2x2_2D.npz +0 -0
  141. pyreduce/wavecal/uves_middle_565nm_2D.npz +0 -0
  142. pyreduce/wavecal/uves_middle_580nm_2D.npz +0 -0
  143. pyreduce/wavecal/uves_middle_600nm_2D.npz +0 -0
  144. pyreduce/wavecal/uves_middle_665nm_2D.npz +0 -0
  145. pyreduce/wavecal/uves_middle_860nm_2D.npz +0 -0
  146. pyreduce/wavecal/uves_red_580nm_2D.npz +0 -0
  147. pyreduce/wavecal/uves_red_600nm_2D.npz +0 -0
  148. pyreduce/wavecal/uves_red_665nm_2D.npz +0 -0
  149. pyreduce/wavecal/uves_red_760nm_2D.npz +0 -0
  150. pyreduce/wavecal/uves_red_860nm_2D.npz +0 -0
  151. pyreduce/wavecal/xshooter_nir.npz +0 -0
  152. pyreduce/wavelength_calibration.py +1873 -0
  153. pyreduce_astro-0.6.0b5.dist-info/METADATA +113 -0
  154. pyreduce_astro-0.6.0b5.dist-info/RECORD +156 -0
  155. pyreduce_astro-0.6.0b5.dist-info/WHEEL +6 -0
  156. pyreduce_astro-0.6.0b5.dist-info/licenses/LICENSE +674 -0
pyreduce/util.py ADDED
@@ -0,0 +1,1288 @@
1
+ """
2
+ Collection of various useful and/or reoccuring functions across PyReduce
3
+ """
4
+
5
+ import logging
6
+ import os
7
+ import warnings
8
+
9
+ import matplotlib.pyplot as plt
10
+ import numpy as np
11
+ import scipy.constants
12
+ import scipy.interpolate
13
+ from astropy import coordinates as coord
14
+ from astropy import time
15
+ from astropy import units as u
16
+ from scipy.linalg import lstsq, solve_banded
17
+ from scipy.ndimage.filters import median_filter
18
+ from scipy.optimize import curve_fit, least_squares
19
+ from scipy.special import binom
20
+
21
+ from . import __version__
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ def resample(array, new_size):
27
+ x = np.arange(new_size)
28
+ xp = np.linspace(0, new_size, len(array))
29
+ return np.interp(x, xp, array)
30
+
31
+
32
+ def remove_bias(img, ihead, bias, bhead, nfiles=1):
33
+ if bias is not None and bhead is not None:
34
+ b_exptime = bhead["EXPTIME"]
35
+ i_exptime = ihead["EXPTIME"]
36
+ if b_exptime == 0 or i_exptime == 0:
37
+ b_exptime = 1
38
+ i_exptime = nfiles
39
+ img = img - bias * i_exptime / b_exptime
40
+ return img
41
+
42
+
43
+ def in_ipynb():
44
+ try:
45
+ cfg = get_ipython().config
46
+ if cfg["IPKernelApp"]["parent_appname"] == "ipython-notebook":
47
+ return True
48
+ else:
49
+ return False
50
+ except NameError:
51
+ return False
52
+
53
+
54
+ def log_version():
55
+ """For Debug purposes"""
56
+ logger.debug("----------------------")
57
+ logger.debug("PyReduce version: %s", __version__)
58
+
59
+
60
+ def start_logging(log_file="log.log"):
61
+ """Start logging to log file and command line
62
+
63
+ Parameters
64
+ ----------
65
+ log_file : str, optional
66
+ name of the logging file (default: "log.log")
67
+ """
68
+
69
+ os.makedirs(os.path.dirname(log_file), exist_ok=True)
70
+
71
+ logging.basicConfig(
72
+ filename=log_file,
73
+ level=logging.DEBUG,
74
+ format="%(asctime)-15s - %(levelname)s - %(name)-8s - %(message)s",
75
+ )
76
+ logging.captureWarnings(True)
77
+ log_version()
78
+
79
+
80
+ def vac2air(wl_vac):
81
+ """
82
+ Convert vacuum wavelengths to wavelengths in air
83
+ Author: Nikolai Piskunov
84
+ """
85
+ wl_air = wl_vac
86
+ ii = np.where(wl_vac > 2e3)
87
+
88
+ sigma2 = (1e4 / wl_vac[ii]) ** 2 # Compute wavenumbers squared
89
+ fact = (
90
+ 1e0
91
+ + 8.34254e-5
92
+ + 2.406147e-2 / (130e0 - sigma2)
93
+ + 1.5998e-4 / (38.9e0 - sigma2)
94
+ )
95
+ wl_air[ii] = wl_vac[ii] / fact # Convert to air wavelength
96
+ return wl_air
97
+
98
+
99
+ def air2vac(wl_air):
100
+ """
101
+ Convert wavelengths in air to vacuum wavelength
102
+ Author: Nikolai Piskunov
103
+ """
104
+ wl_vac = np.copy(wl_air)
105
+ ii = np.where(wl_air > 1999.352)
106
+
107
+ sigma2 = (1e4 / wl_air[ii]) ** 2 # Compute wavenumbers squared
108
+ fact = (
109
+ 1e0
110
+ + 8.336624212083e-5
111
+ + 2.408926869968e-2 / (1.301065924522e2 - sigma2)
112
+ + 1.599740894897e-4 / (3.892568793293e1 - sigma2)
113
+ )
114
+ wl_vac[ii] = wl_air[ii] * fact # Convert to vacuum wavelength
115
+ return wl_vac
116
+
117
+
118
+ def swap_extension(fname, ext, path=None):
119
+ """exchange the extension of the given file with a new one"""
120
+ if path is None:
121
+ path = os.path.dirname(fname)
122
+ nameout = os.path.basename(fname)
123
+ if nameout[-3:] == ".gz":
124
+ nameout = nameout[:-3]
125
+ nameout = nameout.rsplit(".", 1)[0]
126
+ nameout = os.path.join(path, nameout + ext)
127
+ return nameout
128
+
129
+
130
+ def find_first_index(arr, value):
131
+ """find the first element equal to value in the array arr"""
132
+ try:
133
+ return next(i for i, v in enumerate(arr) if v == value)
134
+ except StopIteration as e:
135
+ raise Exception(f"Value {value} not found") from e
136
+
137
+
138
+ def interpolate_masked(masked):
139
+ """Interpolate masked values, from non masked values
140
+
141
+ Parameters
142
+ ----------
143
+ masked : masked_array
144
+ masked array to interpolate on
145
+
146
+ Returns
147
+ -------
148
+ interpolated : array
149
+ interpolated non masked array
150
+ """
151
+
152
+ mask = np.ma.getmaskarray(masked)
153
+ idx = np.nonzero(~mask)[0]
154
+ interpol = np.interp(np.arange(len(masked)), idx, masked[idx])
155
+ return interpol
156
+
157
+
158
+ def cutout_image(img, ymin, ymax, xmin, xmax):
159
+ """Cut a section of an image out
160
+
161
+ Parameters
162
+ ----------
163
+ img : array
164
+ image
165
+ ymin : array[ncol](int)
166
+ lower y value
167
+ ymax : array[ncol](int)
168
+ upper y value
169
+ xmin : int
170
+ lower x value
171
+ xmax : int
172
+ upper x value
173
+
174
+ Returns
175
+ -------
176
+ cutout : array[height, ncol]
177
+ selection of the image
178
+ """
179
+
180
+ cutout = np.zeros((ymax[0] - ymin[0] + 1, xmax - xmin), dtype=img.dtype)
181
+ for i, x in enumerate(range(xmin, xmax)):
182
+ cutout[:, i] = img[ymin[x] : ymax[x] + 1, x]
183
+ return cutout
184
+
185
+
186
+ def make_index(ymin, ymax, xmin, xmax, zero=0):
187
+ """Create an index (numpy style) that will select part of an image with changing position but fixed height
188
+
189
+ The user is responsible for making sure the height is constant, otherwise it will still work, but the subsection will not have the desired format
190
+
191
+ Parameters
192
+ ----------
193
+ ymin : array[ncol](int)
194
+ lower y border
195
+ ymax : array[ncol](int)
196
+ upper y border
197
+ xmin : int
198
+ leftmost column
199
+ xmax : int
200
+ rightmost colum
201
+ zero : bool, optional
202
+ if True count y array from 0 instead of xmin (default: False)
203
+
204
+ Returns
205
+ -------
206
+ index : tuple(array[height, width], array[height, width])
207
+ numpy index for the selection of a subsection of an image
208
+ """
209
+
210
+ # TODO
211
+ # Define the indices for the pixels between two y arrays, e.g. pixels in an order
212
+ # in x: the rows between ymin and ymax
213
+ # in y: the column, but n times to match the x index
214
+ ymin = np.asarray(ymin, dtype=int)
215
+ ymax = np.asarray(ymax, dtype=int)
216
+ xmin = int(xmin)
217
+ xmax = int(xmax)
218
+
219
+ if zero:
220
+ zero = xmin
221
+
222
+ index_x = np.array(
223
+ [np.arange(ymin[col], ymax[col] + 1) for col in range(xmin - zero, xmax - zero)]
224
+ )
225
+ index_y = np.array(
226
+ [
227
+ np.full(ymax[col] - ymin[col] + 1, col)
228
+ for col in range(xmin - zero, xmax - zero)
229
+ ]
230
+ )
231
+ index = index_x.T, index_y.T + zero
232
+
233
+ return index
234
+
235
+
236
+ def gridsearch(func, grid, args=(), kwargs=None):
237
+ if kwargs is None:
238
+ kwargs = {}
239
+ matrix = np.zeros(grid.shape[:-1])
240
+
241
+ for idx in np.ndindex(grid.shape[:-1]):
242
+ value = grid[idx]
243
+ print(f"Value: {value}")
244
+ try:
245
+ result = func(value, *args, **kwargs)
246
+ print(f"Success: {result}")
247
+ except Exception as e:
248
+ result = np.nan
249
+ print(f"Failed: {e}")
250
+ finally:
251
+ matrix[idx] = result
252
+
253
+ return matrix
254
+
255
+
256
+ def gaussfit(x, y):
257
+ """
258
+ Fit a simple gaussian to data
259
+
260
+ gauss(x, a, mu, sigma) = a * exp(-z**2/2)
261
+ with z = (x - mu) / sigma
262
+
263
+ Parameters
264
+ ----------
265
+ x : array(float)
266
+ x values
267
+ y : array(float)
268
+ y values
269
+ Returns
270
+ -------
271
+ gauss(x), parameters
272
+ fitted values for x, fit paramters (a, mu, sigma)
273
+ """
274
+
275
+ def gauss(x, A0, A1, A2):
276
+ return A0 * np.exp(-(((x - A1) / A2) ** 2) / 2)
277
+
278
+ popt, _ = curve_fit(gauss, x, y, p0=[max(y), 0, 1])
279
+ return gauss(x, *popt), popt
280
+
281
+
282
+ def gaussfit2(x, y):
283
+ """Fit a gaussian(normal) curve to data x, y
284
+
285
+ gauss = A * exp(-(x-mu)**2/(2*sig**2)) + offset
286
+
287
+ Parameters
288
+ ----------
289
+ x : array[n]
290
+ x values
291
+ y : array[n]
292
+ y values
293
+
294
+ Returns
295
+ -------
296
+ popt : array[4]
297
+ coefficients of the gaussian: A, mu, sigma**2, offset
298
+ """
299
+
300
+ gauss = gaussval2
301
+
302
+ x = np.ma.compressed(x)
303
+ y = np.ma.compressed(y)
304
+
305
+ if len(x) == 0 or len(y) == 0:
306
+ raise ValueError("All values masked")
307
+
308
+ if len(x) != len(y):
309
+ raise ValueError("The masks of x and y are different")
310
+
311
+ # Find the peak in the center of the image
312
+ weights = np.ones(len(y), dtype=y.dtype)
313
+ midpoint = len(y) // 2
314
+ weights[:midpoint] = np.linspace(0, 1, midpoint, dtype=weights.dtype)
315
+ weights[midpoint:] = np.linspace(1, 0, len(y) - midpoint, dtype=weights.dtype)
316
+
317
+ i = np.argmax(y * weights)
318
+ p0 = [y[i], x[i], 1]
319
+ with warnings.catch_warnings():
320
+ warnings.simplefilter("ignore")
321
+ res = least_squares(
322
+ lambda c: gauss(x, *c, np.ma.min(y)) - y,
323
+ p0,
324
+ loss="soft_l1",
325
+ bounds=(
326
+ [min(np.ma.mean(y), y[i]), np.ma.min(x), 0],
327
+ [np.ma.max(y) * 1.5, np.ma.max(x), len(x) / 2],
328
+ ),
329
+ )
330
+ popt = list(res.x) + [np.min(y)]
331
+ return popt
332
+
333
+
334
+ def gaussfit3(x, y):
335
+ """A very simple (and relatively fast) gaussian fit
336
+ gauss = A * exp(-(x-mu)**2/(2*sig**2)) + offset
337
+
338
+ Parameters
339
+ ----------
340
+ x : array of shape (n,)
341
+ x data
342
+ y : array of shape (n,)
343
+ y data
344
+
345
+ Returns
346
+ -------
347
+ popt : list of shape (4,)
348
+ Parameters A, mu, sigma**2, offset
349
+ """
350
+ mask = np.ma.getmaskarray(x) | np.ma.getmaskarray(y)
351
+ x, y = x[~mask], y[~mask]
352
+
353
+ gauss = gaussval2
354
+ i = np.argmax(y[len(y) // 4 : len(y) * 3 // 4]) + len(y) // 4
355
+ p0 = [y[i], x[i], 1, np.min(y)]
356
+
357
+ with warnings.catch_warnings():
358
+ warnings.simplefilter("ignore")
359
+ popt, _ = curve_fit(gauss, x, y, p0=p0)
360
+
361
+ return popt
362
+
363
+
364
+ def gaussfit4(x, y):
365
+ """A very simple (and relatively fast) gaussian fit
366
+ gauss = A * exp(-(x-mu)**2/(2*sig**2)) + offset
367
+
368
+ Assumes x is sorted
369
+
370
+ Parameters
371
+ ----------
372
+ x : array of shape (n,)
373
+ x data
374
+ y : array of shape (n,)
375
+ y data
376
+
377
+ Returns
378
+ -------
379
+ popt : list of shape (4,)
380
+ Parameters A, mu, sigma**2, offset
381
+ """
382
+ gauss = gaussval2
383
+ x = np.ma.compressed(x)
384
+ y = np.ma.compressed(y)
385
+ i = np.argmax(y)
386
+ p0 = [y[i], x[i], 1, np.min(y)]
387
+
388
+ with warnings.catch_warnings():
389
+ warnings.simplefilter("ignore")
390
+ popt, _ = curve_fit(gauss, x, y, p0=p0)
391
+
392
+ return popt
393
+
394
+
395
+ def gaussfit_linear(x, y):
396
+ """Transform the gaussian fit into a linear least squares problem, and solve that instead of the non-linear curve fit
397
+ For efficiency reasons. (roughly 10 times faster than the curve fit)
398
+
399
+ Parameters
400
+ ----------
401
+ x : array of shape (n,)
402
+ x data
403
+ y : array of shape (n,)
404
+ y data
405
+
406
+ Returns
407
+ -------
408
+ coef : tuple
409
+ a, mu, sig, 0
410
+ """
411
+ x = x[y > 0]
412
+ y = y[y > 0]
413
+
414
+ offset = np.min(y)
415
+ y = y - offset + 1e-12
416
+
417
+ weights = y
418
+
419
+ d = np.log(y)
420
+ G = np.ones((x.size, 3), dtype=np.float)
421
+ G[:, 0] = x**2
422
+ G[:, 1] = x
423
+
424
+ beta, _, _, _ = np.linalg.lstsq((G.T * weights**2).T, d * weights**2, rcond=None)
425
+
426
+ a = np.exp(beta[2] - beta[1] ** 2 / (4 * beta[0]))
427
+ sig = -1 / (2 * beta[0])
428
+ mu = -beta[1] / (2 * beta[0])
429
+
430
+ return a, mu, sig, offset
431
+
432
+
433
+ def gaussval2(x, a, mu, sig, const):
434
+ return a * np.exp(-((x - mu) ** 2) / (2 * sig)) + const
435
+
436
+
437
+ def gaussbroad(x, y, hwhm):
438
+ """
439
+ Apply gaussian broadening to x, y data with half width half maximum hwhm
440
+
441
+ Parameters
442
+ ----------
443
+ x : array(float)
444
+ x values
445
+ y : array(float)
446
+ y values
447
+ hwhm : float > 0
448
+ half width half maximum
449
+ Returns
450
+ -------
451
+ array(float)
452
+ broadened y values
453
+ """
454
+
455
+ # alternatively use:
456
+ # from scipy.ndimage.filters import gaussian_filter1d as gaussbroad
457
+ # but that doesn't have an x coordinate
458
+
459
+ nw = len(x)
460
+ dw = (x[-1] - x[0]) / (len(x) - 1)
461
+
462
+ if hwhm > 5 * (x[-1] - x[0]):
463
+ return np.full(len(x), sum(y) / len(x))
464
+
465
+ nhalf = int(3.3972872 * hwhm / dw)
466
+ ng = 2 * nhalf + 1 # points in gaussian (odd!)
467
+ # wavelength scale of gaussian
468
+ wg = dw * (np.arange(0, ng, 1, dtype=float) - (ng - 1) / 2)
469
+ xg = (0.83255461 / hwhm) * wg # convenient absisca
470
+ gpro = (0.46974832 * dw / hwhm) * np.exp(-xg * xg) # unit area gaussian w/ FWHM
471
+ gpro = gpro / np.sum(gpro)
472
+
473
+ # Pad spectrum ends to minimize impact of Fourier ringing.
474
+ npad = nhalf + 2 # pad pixels on each end
475
+ spad = np.concatenate((np.full(npad, y[0]), y, np.full(npad, y[-1])))
476
+
477
+ # Convolve and trim.
478
+ sout = np.convolve(spad, gpro) # convolve with gaussian
479
+ sout = sout[npad : npad + nw] # trim to original data / length
480
+ return sout # return broadened spectrum.
481
+
482
+
483
+ def polyfit1d(x, y, degree=1, regularization=0):
484
+ idx = np.arange(degree + 1)
485
+ coeff = np.zeros(degree + 1)
486
+
487
+ A = np.array([np.power(x, i) for i in idx], dtype=float).T
488
+ b = y.ravel()
489
+
490
+ L = np.array([regularization * i**2 for i in idx])
491
+ inv_matrix = np.linalg.inv(A.T @ A + np.diag(L))
492
+ coeff = inv_matrix @ A.T @ b
493
+
494
+ coeff = coeff[::-1]
495
+
496
+ return coeff
497
+
498
+
499
+ def _get_coeff_idx(coeff):
500
+ idx = np.indices(coeff.shape)
501
+ idx = idx.T.swapaxes(0, 1).reshape((-1, 2))
502
+ # degree = coeff.shape
503
+ # idx = [[i, j] for i, j in product(range(degree[0]), range(degree[1]))]
504
+ # idx = np.asarray(idx)
505
+ return idx
506
+
507
+
508
+ def _scale(x, y):
509
+ # Normalize x and y to avoid huge numbers
510
+ # Mean 0, Variation 1
511
+ offset_x, offset_y = np.mean(x), np.mean(y)
512
+ norm_x, norm_y = np.std(x), np.std(y)
513
+ if norm_x == 0:
514
+ norm_x = 1
515
+ if norm_y == 0:
516
+ norm_y = 1
517
+ x = (x - offset_x) / norm_x
518
+ y = (y - offset_y) / norm_y
519
+ return x, y, (norm_x, norm_y), (offset_x, offset_y)
520
+
521
+
522
+ def _unscale(x, y, norm, offset):
523
+ x = x * norm[0] + offset[0]
524
+ y = y * norm[1] + offset[1]
525
+ return x, y
526
+
527
+
528
+ def polyvander2d(x, y, degree):
529
+ # A = np.array([x ** i * y ** j for i, j in idx], dtype=float).T
530
+ A = np.polynomial.polynomial.polyvander2d(x, y, degree)
531
+ return A
532
+
533
+
534
+ def polyscale2d(coeff, scale_x, scale_y, copy=True):
535
+ if copy:
536
+ coeff = np.copy(coeff)
537
+ idx = _get_coeff_idx(coeff)
538
+ for _k, (i, j) in enumerate(idx):
539
+ coeff[i, j] /= scale_x**i * scale_y**j
540
+ return coeff
541
+
542
+
543
+ def polyshift2d(coeff, offset_x, offset_y, copy=True):
544
+ if copy:
545
+ coeff = np.copy(coeff)
546
+ idx = _get_coeff_idx(coeff)
547
+ # Copy coeff because it changes during the loop
548
+ coeff2 = np.copy(coeff)
549
+ for k, m in idx:
550
+ not_the_same = ~((idx[:, 0] == k) & (idx[:, 1] == m))
551
+ above = (idx[:, 0] >= k) & (idx[:, 1] >= m) & not_the_same
552
+ for i, j in idx[above]:
553
+ b = binom(i, k) * binom(j, m)
554
+ sign = (-1) ** ((i - k) + (j - m))
555
+ offset = offset_x ** (i - k) * offset_y ** (j - m)
556
+ coeff[k, m] += sign * b * coeff2[i, j] * offset
557
+ return coeff
558
+
559
+
560
+ def plot2d(x, y, z, coeff, title=None):
561
+ # regular grid covering the domain of the data
562
+ if x.size > 500:
563
+ choice = np.random.choice(x.size, size=500, replace=False)
564
+ else:
565
+ choice = slice(None, None, None)
566
+ x, y, z = x[choice], y[choice], z[choice]
567
+ X, Y = np.meshgrid(
568
+ np.linspace(np.min(x), np.max(x), 20), np.linspace(np.min(y), np.max(y), 20)
569
+ )
570
+ Z = np.polynomial.polynomial.polyval2d(X, Y, coeff)
571
+ fig = plt.figure()
572
+ ax = fig.gca(projection="3d")
573
+ ax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=0.2)
574
+ ax.scatter(x, y, z, c="r", s=50)
575
+ plt.xlabel("X")
576
+ plt.ylabel("Y")
577
+ ax.set_zlabel("Z")
578
+ if title is not None:
579
+ plt.title(title)
580
+ # ax.axis("equal")
581
+ # ax.axis("tight")
582
+ plt.show()
583
+
584
+
585
+ def polyfit2d(
586
+ x, y, z, degree=1, max_degree=None, scale=True, plot=False, plot_title=None
587
+ ):
588
+ """A simple 2D plynomial fit to data x, y, z
589
+ The polynomial can be evaluated with numpy.polynomial.polynomial.polyval2d
590
+
591
+ Parameters
592
+ ----------
593
+ x : array[n]
594
+ x coordinates
595
+ y : array[n]
596
+ y coordinates
597
+ z : array[n]
598
+ data values
599
+ degree : int, optional
600
+ degree of the polynomial fit (default: 1)
601
+ max_degree : {int, None}, optional
602
+ if given the maximum combined degree of the coefficients is limited to this value
603
+ scale : bool, optional
604
+ Wether to scale the input arrays x and y to mean 0 and variance 1, to avoid numerical overflows.
605
+ Especially useful at higher degrees. (default: True)
606
+ plot : bool, optional
607
+ wether to plot the fitted surface and data (slow) (default: False)
608
+
609
+ Returns
610
+ -------
611
+ coeff : array[degree+1, degree+1]
612
+ the polynomial coefficients in numpy 2d format, i.e. coeff[i, j] for x**i * y**j
613
+ """
614
+ # Flatten input
615
+ x = np.asarray(x).ravel()
616
+ y = np.asarray(y).ravel()
617
+ z = np.asarray(z).ravel()
618
+
619
+ # Removed masked values
620
+ mask = ~(np.ma.getmask(z) | np.ma.getmask(x) | np.ma.getmask(y))
621
+ x, y, z = x[mask].ravel(), y[mask].ravel(), z[mask].ravel()
622
+
623
+ if scale:
624
+ x, y, norm, offset = _scale(x, y)
625
+
626
+ # Create combinations of degree of x and y
627
+ # usually: [(0, 0), (1, 0), (0, 1), (1, 1), (2, 0), ....]
628
+ if np.isscalar(degree):
629
+ degree = (int(degree), int(degree))
630
+ assert len(degree) == 2, "Only 2D polynomials can be fitted"
631
+ degree = [int(degree[0]), int(degree[1])]
632
+ # idx = [[i, j] for i, j in product(range(degree[0] + 1), range(degree[1] + 1))]
633
+ coeff = np.zeros((degree[0] + 1, degree[1] + 1))
634
+ idx = _get_coeff_idx(coeff)
635
+
636
+ # Calculate elements 1, x, y, x*y, x**2, y**2, ...
637
+ A = polyvander2d(x, y, degree)
638
+
639
+ # We only want the combinations with maximum order COMBINED power
640
+ if max_degree is not None:
641
+ mask = idx[:, 0] + idx[:, 1] <= int(max_degree)
642
+ idx = idx[mask]
643
+ A = A[:, mask]
644
+
645
+ # Do least squares fit
646
+ C, *_ = lstsq(A, z)
647
+
648
+ # Reorder coefficients into numpy compatible 2d array
649
+ for k, (i, j) in enumerate(idx):
650
+ coeff[i, j] = C[k]
651
+
652
+ # # Backup copy of coeff
653
+ if scale:
654
+ coeff = polyscale2d(coeff, *norm, copy=False)
655
+ coeff = polyshift2d(coeff, *offset, copy=False)
656
+
657
+ if plot: # pragma: no cover
658
+ if scale:
659
+ x, y = _unscale(x, y, norm, offset)
660
+ plot2d(x, y, z, coeff, title=plot_title)
661
+
662
+ return coeff
663
+
664
+
665
+ def polyfit2d_2(x, y, z, degree=1, x0=None, loss="arctan", method="trf", plot=False):
666
+ x = x.ravel()
667
+ y = y.ravel()
668
+ z = z.ravel()
669
+
670
+ if np.isscalar(degree):
671
+ degree_x = degree_y = degree + 1
672
+ else:
673
+ degree_x = degree[0] + 1
674
+ degree_y = degree[1] + 1
675
+
676
+ polyval2d = np.polynomial.polynomial.polyval2d
677
+
678
+ def func(c):
679
+ c = c.reshape(degree_x, degree_y)
680
+ value = polyval2d(x, y, c)
681
+ return value - z
682
+
683
+ if x0 is None:
684
+ x0 = np.zeros(degree_x * degree_y)
685
+ else:
686
+ x0 = x0.ravel()
687
+
688
+ res = least_squares(func, x0, loss=loss, method=method)
689
+ coef = res.x
690
+ coef = coef.reshape(degree_x, degree_y)
691
+
692
+ if plot: # pragma: no cover
693
+ # regular grid covering the domain of the data
694
+ if x.size > 500:
695
+ choice = np.random.choice(x.size, size=500, replace=False)
696
+ else:
697
+ choice = slice(None, None, None)
698
+ x, y, z = x[choice], y[choice], z[choice]
699
+ X, Y = np.meshgrid(
700
+ np.linspace(np.min(x), np.max(x), 20), np.linspace(np.min(y), np.max(y), 20)
701
+ )
702
+ Z = np.polynomial.polynomial.polyval2d(X, Y, coef)
703
+ fig = plt.figure()
704
+ ax = fig.gca(projection="3d")
705
+ ax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=0.2)
706
+ ax.scatter(x, y, z, c="r", s=50)
707
+ plt.xlabel("X")
708
+ plt.ylabel("Y")
709
+ ax.set_zlabel("Z")
710
+ ax.axis("equal")
711
+ ax.axis("tight")
712
+ plt.show()
713
+ return coef
714
+
715
+
716
+ def bezier_interp(x_old, y_old, x_new):
717
+ """
718
+ Bezier interpolation, based on the scipy methods
719
+
720
+ This mostly sanitizes the input by removing masked values and duplicate entries
721
+ Note that in case of duplicate entries (in x_old) the results are not well defined as only one of the entries is used and the other is discarded
722
+
723
+ Parameters
724
+ ----------
725
+ x_old : array[n]
726
+ old x values
727
+ y_old : array[n]
728
+ old y values
729
+ x_new : array[m]
730
+ new x values
731
+
732
+ Returns
733
+ -------
734
+ y_new : array[m]
735
+ new y values
736
+ """
737
+
738
+ # Handle masked arrays
739
+ if np.ma.is_masked(x_old):
740
+ x_old = np.ma.compressed(x_old)
741
+ y_old = np.ma.compressed(y_old)
742
+
743
+ # avoid duplicate entries in x
744
+ assert x_old.size == y_old.size
745
+ x_old, index = np.unique(x_old, return_index=True)
746
+ y_old = y_old[index]
747
+
748
+ knots, coef, order = scipy.interpolate.splrep(x_old, y_old, s=0)
749
+ y_new = scipy.interpolate.BSpline(knots, coef, order)(x_new)
750
+ return y_new
751
+
752
+
753
+ def safe_interpolation(x_old, y_old, x_new=None, fill_value=0):
754
+ """
755
+ 'Safe' interpolation method that should avoid
756
+ the common pitfalls of spline interpolation
757
+
758
+ masked arrays are compressed, i.e. only non masked entries are used
759
+ remove NaN input in x_old and y_old
760
+ only unique x values are used, corresponding y values are 'random'
761
+ if all else fails, revert to linear interpolation
762
+
763
+ Parameters
764
+ ----------
765
+ x_old : array of size (n,)
766
+ x values of the data
767
+ y_old : array of size (n,)
768
+ y values of the data
769
+ x_new : array of size (m, ) or None, optional
770
+ x values of the interpolated values
771
+ if None will return the interpolator object
772
+ (default: None)
773
+
774
+ Returns
775
+ -------
776
+ y_new: array of size (m, ) or interpolator
777
+ if x_new was given, return the interpolated values
778
+ otherwise return the interpolator object
779
+ """
780
+
781
+ # Handle masked arrays
782
+ if np.ma.is_masked(x_old):
783
+ x_old = np.ma.compressed(x_old)
784
+ y_old = np.ma.compressed(y_old)
785
+
786
+ mask = np.isfinite(x_old) & np.isfinite(y_old)
787
+ x_old = x_old[mask]
788
+ y_old = y_old[mask]
789
+
790
+ # avoid duplicate entries in x
791
+ # also sorts data, which allows us to use assume_sorted below
792
+ x_old, index = np.unique(x_old, return_index=True)
793
+ y_old = y_old[index]
794
+
795
+ try:
796
+ interpolator = scipy.interpolate.interp1d(
797
+ x_old,
798
+ y_old,
799
+ kind="cubic",
800
+ fill_value=fill_value,
801
+ bounds_error=False,
802
+ assume_sorted=True,
803
+ )
804
+ except ValueError:
805
+ logging.warning(
806
+ "Could not instantiate cubic spline interpolation, using linear instead"
807
+ )
808
+ interpolator = scipy.interpolate.interp1d(
809
+ x_old,
810
+ y_old,
811
+ kind="linear",
812
+ fill_value=fill_value,
813
+ bounds_error=False,
814
+ assume_sorted=True,
815
+ )
816
+
817
+ if x_new is not None:
818
+ return interpolator(x_new)
819
+ else:
820
+ return interpolator
821
+
822
+
823
+ def bottom(f, order=1, iterations=40, eps=0.001, poly=False, weight=1, **kwargs):
824
+ """
825
+ bottom tries to fit a smooth curve to the lower envelope
826
+ of 1D data array f. Filter size "filter"
827
+ together with the total number of iterations determine
828
+ the smoothness and the quality of the fit. The total
829
+ number of iterations can be controlled by limiting the
830
+ maximum number of iterations (iter) and/or by setting
831
+ the convergence criterion for the fit (eps)
832
+ 04-Nov-2000 N.Piskunov wrote.
833
+ 09-Nov-2011 NP added weights and 2nd derivative constraint as LAM2
834
+
835
+ Parameters
836
+ ----------
837
+ f : Callable
838
+ Function to fit
839
+ filter : int
840
+ Smoothing parameter of the optimal filter (or polynomial degree of poly is True)
841
+ iter : int
842
+ maximum number of iterations [def: 40]
843
+ eps : float
844
+ convergence level [def: 0.001]
845
+ mn : float
846
+ minimum function values to be considered [def: min(f)]
847
+ mx : float
848
+ maximum function values to be considered [def: max(f)]
849
+ lam2 : float
850
+ constraint on 2nd derivative
851
+ weight : array(float)
852
+ vector of weights.
853
+ """
854
+
855
+ mn = kwargs.get("min", np.min(f))
856
+ mx = kwargs.get("max", np.max(f))
857
+ lambda2 = kwargs.get("lambda2", -1)
858
+
859
+ if poly:
860
+ j = np.where((f >= mn) & (f <= mx))
861
+ xx = np.linspace(-1, 1, num=len(f))
862
+ fmin = np.min(f[j]) - 1
863
+ fmax = np.max(f[j]) + 1
864
+ ff = (f[j] - fmin) / (fmax - fmin)
865
+ ff_old = np.copy(ff)
866
+ else:
867
+ fff = middle(
868
+ f, order, iterations=iterations, eps=eps, weight=weight, lambda2=lambda2
869
+ )
870
+ fmin = min(f) - 1
871
+ fmax = max(f) + 1
872
+ fff = (fff - fmin) / (fmax - fmin)
873
+ ff = (f - fmin) / (fmax - fmin) / fff
874
+ ff_old = np.copy(ff)
875
+
876
+ for _ in range(iterations):
877
+ if poly:
878
+ if order > 0: # this is a bug in rsi poly routine
879
+ t = median_filter(np.polyval(np.polyfit(xx, ff, order), xx), 3)
880
+ t = np.clip(t - ff, 0, None) ** 2
881
+ tmp = np.polyval(np.polyfit(xx, t, order), xx)
882
+ dev = np.sqrt(np.nan_to_num(tmp))
883
+ else:
884
+ t = np.tile(np.polyfit(xx, ff, order), len(f))
885
+ t = np.polyfit(xx, np.clip(t - ff, 0, None) ** 2, order)
886
+ t = np.tile(t, len(f))
887
+ dev = np.nan_to_num(t)
888
+ dev = np.sqrt(t)
889
+ else:
890
+ t = median_filter(opt_filter(ff, order, weight=weight, lambda2=lambda2), 3)
891
+ dev = np.sqrt(
892
+ opt_filter(
893
+ np.clip(weight * (t - ff), 0, None),
894
+ order,
895
+ weight=weight,
896
+ lambda2=lambda2,
897
+ )
898
+ )
899
+ ff = np.clip(
900
+ np.clip(t - dev, ff, None), None, t
901
+ ) # the order matters, t dominates
902
+ dev2 = np.max(weight * np.abs(ff - ff_old))
903
+ ff_old = ff
904
+ if dev2 <= eps:
905
+ break
906
+
907
+ if poly:
908
+ if order > 0: # this is a bug in rsi poly routine
909
+ t = median_filter(np.polyval(np.polyfit(xx, ff, order), xx), 3)
910
+ else:
911
+ t = np.tile(np.polyfit(xx, ff, order), len(f))
912
+ return t * (fmax - fmin) + fmin
913
+ else:
914
+ return t * fff * (fmax - fmin) + fmin
915
+
916
+
917
+ def middle(
918
+ f,
919
+ param,
920
+ x=None,
921
+ iterations=40,
922
+ eps=0.001,
923
+ poly=False,
924
+ weight=1,
925
+ lambda2=-1,
926
+ mn=None,
927
+ mx=None,
928
+ ):
929
+ """
930
+ middle tries to fit a smooth curve that is located
931
+ along the "middle" of 1D data array f. Filter size "filter"
932
+ together with the total number of iterations determine
933
+ the smoothness and the quality of the fit. The total
934
+ number of iterations can be controlled by limiting the
935
+ maximum number of iterations (iter) and/or by setting
936
+ the convergence criterion for the fit (eps)
937
+ 04-Nov-2000 N.Piskunov wrote.
938
+ 09-Nov-2011 NP added weights and 2nd derivative constraint as LAM2
939
+
940
+ Parameters
941
+ ----------
942
+ f : Callable
943
+ Function to fit
944
+ filter : int
945
+ Smoothing parameter of the optimal filter (or polynomial degree of poly is True)
946
+ iter : int
947
+ maximum number of iterations [def: 40]
948
+ eps : float
949
+ convergence level [def: 0.001]
950
+ mn : float
951
+ minimum function values to be considered [def: min(f)]
952
+ mx : float
953
+ maximum function values to be considered [def: max(f)]
954
+ lam2 : float
955
+ constraint on 2nd derivative
956
+ weight : array(float)
957
+ vector of weights.
958
+ """
959
+ mn = mn if mn is not None else np.min(f)
960
+ mx = mx if mx is not None else np.max(f)
961
+
962
+ f = np.asarray(f)
963
+
964
+ if x is None:
965
+ xx = np.linspace(-1, 1, num=f.size)
966
+ else:
967
+ xx = np.asarray(x)
968
+
969
+ if poly:
970
+ j = (f >= mn) & (f <= mx)
971
+ n = np.count_nonzero(j)
972
+ if n <= round(param):
973
+ return f
974
+
975
+ fmin = np.min(f[j]) - 1
976
+ fmax = np.max(f[j]) + 1
977
+ ff = (f[j] - fmin) / (fmax - fmin)
978
+ ff_old = ff
979
+ else:
980
+ fmin = np.min(f) - 1
981
+ fmax = np.max(f) + 1
982
+ ff = (f - fmin) / (fmax - fmin)
983
+ ff_old = ff
984
+ n = len(f)
985
+
986
+ for _ in range(iterations):
987
+ if poly:
988
+ param = round(param)
989
+ if param > 0:
990
+ t = median_filter(np.polyval(np.polyfit(xx, ff, param), xx), 3)
991
+ tmp = np.polyval(np.polyfit(xx, (t - ff) ** 2, param), xx)
992
+ else:
993
+ t = np.tile(np.polyfit(xx, ff, param), len(f))
994
+ tmp = np.tile(np.polyfit(xx, (t - ff) ** 2, param), len(f))
995
+ else:
996
+ t = median_filter(opt_filter(ff, param, weight=weight, lambda2=lambda2), 3)
997
+ tmp = opt_filter(
998
+ weight * (t - ff) ** 2, param, weight=weight, lambda2=lambda2
999
+ )
1000
+
1001
+ dev = np.sqrt(np.clip(tmp, 0, None))
1002
+ ff = np.clip(t - dev, ff, t + dev)
1003
+ dev2 = np.max(weight * np.abs(ff - ff_old))
1004
+ ff_old = ff
1005
+
1006
+ # print(dev2)
1007
+ if dev2 <= eps:
1008
+ break
1009
+
1010
+ if poly:
1011
+ xx = np.linspace(-1, 1, len(f))
1012
+ if param > 0:
1013
+ t = median_filter(np.polyval(np.polyfit(xx, ff, param), xx), 3)
1014
+ else:
1015
+ t = np.tile(np.polyfit(xx, ff, param), len(f))
1016
+
1017
+ return t * (fmax - fmin) + fmin
1018
+
1019
+
1020
+ def top(
1021
+ f,
1022
+ order=1,
1023
+ iterations=40,
1024
+ eps=0.001,
1025
+ poly=False,
1026
+ weight=1,
1027
+ lambda2=-1,
1028
+ mn=None,
1029
+ mx=None,
1030
+ ):
1031
+ """
1032
+ top tries to fit a smooth curve to the upper envelope
1033
+ of 1D data array f. Filter size "filter"
1034
+ together with the total number of iterations determine
1035
+ the smoothness and the quality of the fit. The total
1036
+ number of iterations can be controlled by limiting the
1037
+ maximum number of iterations (iter) and/or by setting
1038
+ the convergence criterion for the fit (eps)
1039
+ 04-Nov-2000 N.Piskunov wrote.
1040
+ 09-Nov-2011 NP added weights and 2nd derivative constraint as LAM2
1041
+
1042
+ Parameters
1043
+ ----------
1044
+ f : Callable
1045
+ Function to fit
1046
+ filter : int
1047
+ Smoothing parameter of the optimal filter (or polynomial degree of poly is True)
1048
+ iter : int
1049
+ maximum number of iterations [def: 40]
1050
+ eps : float
1051
+ convergence level [def: 0.001]
1052
+ mn : float
1053
+ minimum function values to be considered [def: min(f)]
1054
+ mx : float
1055
+ maximum function values to be considered [def: max(f)]
1056
+ lam2 : float
1057
+ constraint on 2nd derivative
1058
+ weight : array(float)
1059
+ vector of weights.
1060
+ """
1061
+ mn = mn if mn is not None else np.min(f)
1062
+ mx = mx if mx is not None else np.max(f)
1063
+
1064
+ f = np.asarray(f)
1065
+ xx = np.linspace(-1, 1, num=f.size)
1066
+
1067
+ if poly:
1068
+ j = (f >= mn) & (f <= mx)
1069
+ if np.count_nonzero(j) <= round(order):
1070
+ raise ValueError("Not enough points")
1071
+ fmin = np.min(f[j]) - 1
1072
+ fmax = np.max(f[j]) + 1
1073
+ ff = (f - fmin) / (fmax - fmin)
1074
+ ff_old = ff
1075
+ else:
1076
+ fff = middle(
1077
+ f, order, iterations=iterations, eps=eps, weight=weight, lambda2=lambda2
1078
+ )
1079
+ fmin = np.min(f) - 1
1080
+ fmax = np.max(f) + 1
1081
+ fff = (fff - fmin) / (fmax - fmin)
1082
+ ff = (f - fmin) / (fmax - fmin) / fff
1083
+ ff_old = ff
1084
+
1085
+ for _ in range(iterations):
1086
+ order = round(order)
1087
+ if poly:
1088
+ t = median_filter(np.polyval(np.polyfit(xx, ff, order), xx), 3)
1089
+ tmp = np.polyval(np.polyfit(xx, np.clip(ff - t, 0, None) ** 2, order), xx)
1090
+ dev = np.sqrt(np.clip(tmp, 0, None))
1091
+ else:
1092
+ t = median_filter(opt_filter(ff, order, weight=weight, lambda2=lambda2), 3)
1093
+ tmp = opt_filter(
1094
+ np.clip(weight * (ff - t), 0, None),
1095
+ order,
1096
+ weight=weight,
1097
+ lambda2=lambda2,
1098
+ )
1099
+ dev = np.sqrt(np.clip(tmp, 0, None))
1100
+
1101
+ ff = np.clip(t - eps, ff, t + dev * 3)
1102
+ dev2 = np.max(weight * np.abs(ff - ff_old))
1103
+ ff_old = ff
1104
+ if dev2 <= eps:
1105
+ break
1106
+
1107
+ if poly:
1108
+ t = median_filter(np.polyval(np.polyfit(xx, ff, order), xx), 3)
1109
+ return t * (fmax - fmin) + fmin
1110
+ else:
1111
+ return t * fff * (fmax - fmin) + fmin
1112
+
1113
+
1114
+ def opt_filter(y, par, par1=None, weight=None, lambda2=-1, maxiter=100):
1115
+ """
1116
+ Optimal filtering of 1D and 2D arrays.
1117
+ Uses tridiag in 1D case and sprsin and linbcg in 2D case.
1118
+ Written by N.Piskunov 8-May-2000
1119
+
1120
+ Parameters
1121
+ ----------
1122
+ f : array
1123
+ 1d or 2d array
1124
+ xwidth : int
1125
+ filter width (for 2d array width in x direction (1st index)
1126
+ ywidth : int
1127
+ (for 2d array only) filter width in y direction (2nd index) if ywidth is missing for 2d array, it set equal to xwidth
1128
+ weight : array(float)
1129
+ an array of the same size(s) as f containing values between 0 and 1
1130
+ lambda1: float
1131
+ regularization parameter
1132
+ maxiter : int
1133
+ maximum number of iteration for filtering of 2d array
1134
+ """
1135
+
1136
+ y = np.asarray(y)
1137
+
1138
+ if y.ndim not in [1, 2]:
1139
+ raise ValueError("Input y must have 1 or 2 dimensions")
1140
+
1141
+ if par < 1:
1142
+ par = 1
1143
+
1144
+ # 1D case
1145
+ if y.ndim == 1 or (y.ndim == 2 and (y.shape[0] == 1 or y.shape[1] == 1)):
1146
+ y = y.ravel()
1147
+ n = y.size
1148
+
1149
+ if weight is None:
1150
+ weight = np.ones(n)
1151
+ elif np.isscalar(weight):
1152
+ weight = np.full(n, weight)
1153
+ else:
1154
+ weight = weight[:n]
1155
+
1156
+ if lambda2 > 0:
1157
+ # Apply regularization lambda
1158
+ aij = np.zeros((5, n))
1159
+ # 2nd lower subdiagonal
1160
+ aij[0, 2:] = lambda2
1161
+ # Lower subdiagonal
1162
+ aij[1, 1] = -par - 2 * lambda2
1163
+ aij[1, 2:-1] = -par - 4 * lambda2
1164
+ aij[1, -1] = -par - 2 * lambda2
1165
+ # Main diagonal
1166
+ aij[2, 0] = weight[0] + par + lambda2
1167
+ aij[2, 1] = weight[1] + 2 * par + 5 * lambda2
1168
+ aij[2, 2:-2] = weight[2:-2] + 2 * par + 6 * lambda2
1169
+ aij[2, -2] = weight[-2] + 2 * par + 5 * lambda2
1170
+ aij[2, -1] = weight[-1] + par + lambda2
1171
+ # Upper subdiagonal
1172
+ aij[3, 0] = -par - 2 * lambda2
1173
+ aij[3, 1:-2] = -par - 4 * lambda2
1174
+ aij[3, -2] = -par - 2 * lambda2
1175
+ # 2nd lower subdiagonal
1176
+ aij[4, 0:-2] = lambda2
1177
+ # RHS
1178
+ b = weight * y
1179
+
1180
+ f = solve_banded((2, 2), aij, b)
1181
+ else:
1182
+ a = np.full(n, -abs(par))
1183
+ b = np.copy(weight) + abs(par)
1184
+ b[1:-1] += abs(par)
1185
+ aba = np.array([a, b, a])
1186
+
1187
+ f = solve_banded((1, 1), aba, weight * y)
1188
+
1189
+ return f
1190
+ else:
1191
+ # 2D case
1192
+ if par1 is None:
1193
+ par1 = par
1194
+ if par == 0 and par1 == 0:
1195
+ raise ValueError("xwidth and ywidth can't both be 0")
1196
+ n = y.size
1197
+ nx, ny = y.shape
1198
+
1199
+ lam_x = abs(par)
1200
+ lam_y = abs(par1)
1201
+
1202
+ n = nx * ny
1203
+ ndiag = 2 * nx + 1
1204
+ aij = np.zeros((n, ndiag))
1205
+ aij[nx, 0] = weight[0, 0] + lam_x + lam_y
1206
+ aij[nx, 1:nx] = weight[0, 1:nx] + 2 * lam_x + lam_y
1207
+ aij[nx, nx : n - nx] = weight[1 : ny - 1] + 2 * (lam_x + lam_y)
1208
+ aij[nx, n - nx : n - 1] = weight[ny - 1, 0 : nx - 1] + 2 * lam_x + lam_y
1209
+ aij[nx, n - 1] = weight[ny - 1, nx - 1] + lam_x + lam_y
1210
+
1211
+ aij[nx - 1, 1:n] = -lam_x
1212
+ aij[nx + 1, 0 : n - 1] = -lam_x
1213
+
1214
+ ind = np.arrange(ny - 1) * nx + nx + nx * n
1215
+ aij[ind - 1] = aij[ind - 1] - lam_x
1216
+ aij[ind] = aij[ind] - lam_x
1217
+
1218
+ ind = np.arrange(ny - 1) * nx + nx
1219
+ aij[nx + 1, ind - 1] = 0
1220
+ aij[nx - 1, ind] = 0
1221
+
1222
+ aij[0, nx:n] = -lam_y
1223
+ aij[ndiag - 1, 0 : n - nx] = -lam_y
1224
+
1225
+ rhs = f * weight
1226
+
1227
+ model = solve_banded((nx, nx), aij, rhs)
1228
+ model = np.reshape(model, (ny, nx))
1229
+ return model
1230
+
1231
+
1232
+ def helcorr(obs_long, obs_lat, obs_alt, ra2000, dec2000, jd, system="barycentric"):
1233
+ """
1234
+ calculates heliocentric Julian date, barycentric and heliocentric radial
1235
+ velocity corrections, using astropy functions
1236
+
1237
+ Parameters
1238
+ ---------
1239
+ obs_long : float
1240
+ Longitude of observatory (degrees, western direction is positive)
1241
+ obs_lat : float
1242
+ Latitude of observatory (degrees)
1243
+ obs_alt : float
1244
+ Altitude of observatory (meters)
1245
+ ra2000 : float
1246
+ Right ascension of object for epoch 2000.0 (hours)
1247
+ dec2000 : float
1248
+ Declination of object for epoch 2000.0 (degrees)
1249
+ jd : float
1250
+ Julian date for the middle of exposure in MJD
1251
+ system : {"barycentric", "heliocentric"}, optional
1252
+ reference system of the result, barycentric: around earth-sun gravity center,
1253
+ heliocentric: around sun, usually barycentric is preferred (default: "barycentric)
1254
+
1255
+ Returns
1256
+ -------
1257
+ correction : float
1258
+ radial velocity correction due to barycentre offset
1259
+ hjd : float
1260
+ Heliocentric Julian date for middle of exposure
1261
+ """
1262
+
1263
+ # jd = 2400000.5 + jd
1264
+ jd = time.Time(jd, format="mjd")
1265
+
1266
+ ra = coord.Longitude(ra2000, unit=u.hour)
1267
+ dec = coord.Latitude(dec2000, unit=u.degree)
1268
+
1269
+ observatory = coord.EarthLocation.from_geodetic(obs_long, obs_lat, height=obs_alt)
1270
+ sky_location = coord.SkyCoord(ra, dec, obstime=jd, location=observatory)
1271
+ times = time.Time(jd, location=observatory)
1272
+
1273
+ if system == "barycentric":
1274
+ correction = sky_location.radial_velocity_correction().to(u.km / u.s).value
1275
+ ltt = times.light_travel_time(sky_location)
1276
+ elif system == "heliocentric":
1277
+ correction = (
1278
+ sky_location.radial_velocity_correction("heliocentric").to(u.km / u.s).value
1279
+ )
1280
+ ltt = times.light_travel_time(sky_location, "heliocentric")
1281
+ else:
1282
+ raise AttributeError(
1283
+ "Could not parse system, values are: ('barycentric', 'heliocentric')"
1284
+ )
1285
+
1286
+ times = (times.utc + ltt).value - 2400000
1287
+
1288
+ return -correction, times