ExoIris 0.20.0__py3-none-any.whl → 0.22.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- exoiris/exoiris.py +198 -37
- exoiris/loglikelihood.py +139 -0
- exoiris/tslpf.py +80 -11
- exoiris/util.py +46 -3
- {exoiris-0.20.0.dist-info → exoiris-0.22.0.dist-info}/METADATA +2 -1
- {exoiris-0.20.0.dist-info → exoiris-0.22.0.dist-info}/RECORD +9 -8
- {exoiris-0.20.0.dist-info → exoiris-0.22.0.dist-info}/WHEEL +0 -0
- {exoiris-0.20.0.dist-info → exoiris-0.22.0.dist-info}/licenses/LICENSE +0 -0
- {exoiris-0.20.0.dist-info → exoiris-0.22.0.dist-info}/top_level.txt +0 -0
exoiris/exoiris.py
CHANGED
|
@@ -32,7 +32,8 @@ from emcee import EnsembleSampler
|
|
|
32
32
|
from matplotlib.pyplot import subplots, setp, figure, Figure, Axes
|
|
33
33
|
from numpy import (any, where, sqrt, clip, percentile, median, squeeze, floor, ndarray, isfinite,
|
|
34
34
|
array, inf, arange, argsort, concatenate, full, nan, r_, nanpercentile, log10,
|
|
35
|
-
ceil, unique)
|
|
35
|
+
ceil, unique, zeros, cov)
|
|
36
|
+
from numpy.typing import ArrayLike
|
|
36
37
|
from numpy.random import normal
|
|
37
38
|
from pytransit import UniformPrior, NormalPrior
|
|
38
39
|
from pytransit.param import ParameterSet
|
|
@@ -44,6 +45,7 @@ from .ldtkld import LDTkLD
|
|
|
44
45
|
from .tsdata import TSData, TSDataGroup
|
|
45
46
|
from .tslpf import TSLPF
|
|
46
47
|
from .wlpf import WhiteLPF
|
|
48
|
+
from .loglikelihood import LogLikelihood
|
|
47
49
|
|
|
48
50
|
|
|
49
51
|
def load_model(fname: Path | str, name: str | None = None):
|
|
@@ -131,6 +133,12 @@ def load_model(fname: Path | str, name: str | None = None):
|
|
|
131
133
|
for i in range(hdr['NSPOTS']):
|
|
132
134
|
a.add_spot(hdr[f'SP{i+1:02d}_EG'])
|
|
133
135
|
|
|
136
|
+
# Read the free k knot indices if they exist.
|
|
137
|
+
# ==========================================
|
|
138
|
+
if 'N_FREE_K' in hdr and hdr['N_FREE_K'] > 0:
|
|
139
|
+
n_free_k = hdr['N_FREE_K']
|
|
140
|
+
a._tsa.set_free_k_knots([int(hdr[f'KK_IX_{i:03d}']) for i in range(n_free_k)])
|
|
141
|
+
|
|
134
142
|
# Read the priors.
|
|
135
143
|
# ================
|
|
136
144
|
priors = pickle.loads(codecs.decode(json.loads(hdul['PRIORS'].header['PRIORS']).encode(), "base64"))
|
|
@@ -152,7 +160,7 @@ class ExoIris:
|
|
|
152
160
|
|
|
153
161
|
def __init__(self, name: str, ldmodel, data: TSDataGroup | TSData, nk: int = 50, nldc: int = 10, nthreads: int = 1,
|
|
154
162
|
tmpars: dict | None = None, noise_model: Literal["white", "fixed_gp", "free_gp"] = 'white',
|
|
155
|
-
interpolation: Literal['
|
|
163
|
+
interpolation: Literal['nearest', 'linear', 'pchip', 'makima', 'bspline', 'bspline-quadratic'] = 'makima'):
|
|
156
164
|
"""
|
|
157
165
|
Parameters
|
|
158
166
|
----------
|
|
@@ -209,6 +217,7 @@ class ExoIris:
|
|
|
209
217
|
self._white_fluxes: None | list[ndarray] = None
|
|
210
218
|
self._white_errors: None | list[ndarray] = None
|
|
211
219
|
self._white_models: None | list[ndarray] = None
|
|
220
|
+
self.white_gp_models: None | list[ndarray] = None
|
|
212
221
|
|
|
213
222
|
def lnposterior(self, pvp: ndarray) -> ndarray:
|
|
214
223
|
"""Calculate the log posterior probability for a single parameter vector or an array of parameter vectors.
|
|
@@ -651,6 +660,9 @@ class ExoIris:
|
|
|
651
660
|
the data and the number of columns specified (ncol). If the provided axes array (axs)
|
|
652
661
|
does not accommodate all the subplots, the behavior is undefined.
|
|
653
662
|
"""
|
|
663
|
+
if self.white_gp_models is None:
|
|
664
|
+
raise ValueError("White light curve GP predictions are not available. Run 'optimize_gp_hyperparameters' first.")
|
|
665
|
+
|
|
654
666
|
ndata = self.data.size
|
|
655
667
|
|
|
656
668
|
if axs is None:
|
|
@@ -869,13 +881,13 @@ class ExoIris:
|
|
|
869
881
|
|
|
870
882
|
if result == 'fit':
|
|
871
883
|
pv = self._tsa._de_population[self._tsa._de_imin]
|
|
872
|
-
ks = self._tsa._eval_k(pv
|
|
884
|
+
ks = self._tsa._eval_k(pv)
|
|
873
885
|
ar = 1e2 * concatenate([squeeze(k) for k in ks]) ** 2
|
|
874
886
|
ax.plot(wavelength[ix], ar[ix], c='k')
|
|
875
887
|
ax.plot(self._tsa.k_knots, 1e2 * pv[self._tsa._sl_rratios] ** 2, 'k.')
|
|
876
888
|
else:
|
|
877
889
|
df = pd.DataFrame(self._tsa._mc_chains.reshape([-1, self._tsa.ndim]), columns=self._tsa.ps.names)
|
|
878
|
-
ks = self._tsa._eval_k(df.
|
|
890
|
+
ks = self._tsa._eval_k(df.values)
|
|
879
891
|
ar = 1e2 * concatenate(ks, axis=1) ** 2
|
|
880
892
|
ax.fill_between(wavelength[ix], *percentile(ar[:, ix], [16, 84], axis=0), alpha=0.25)
|
|
881
893
|
ax.plot(wavelength[ix], median(ar, 0)[ix], c='k')
|
|
@@ -891,7 +903,11 @@ class ExoIris:
|
|
|
891
903
|
ax.set_xticks(xticks, labels=xticks)
|
|
892
904
|
return ax.get_figure()
|
|
893
905
|
|
|
894
|
-
def plot_limb_darkening_parameters(
|
|
906
|
+
def plot_limb_darkening_parameters(
|
|
907
|
+
self,
|
|
908
|
+
result: None | Literal["fit", "mcmc"] = None,
|
|
909
|
+
axs: None | tuple[Axes, Axes] = None,
|
|
910
|
+
) -> None | Figure:
|
|
895
911
|
"""Plot the limb darkening parameters.
|
|
896
912
|
|
|
897
913
|
Parameters
|
|
@@ -920,56 +936,68 @@ class ExoIris:
|
|
|
920
936
|
This method plots the limb darkening parameters for two-parameter limb darkening models. It supports only
|
|
921
937
|
quadratic, quadratic-tri, power-2, and power-2-pm models.
|
|
922
938
|
"""
|
|
923
|
-
if not self._tsa.ldmodel in (
|
|
939
|
+
if not self._tsa.ldmodel in (
|
|
940
|
+
"quadratic",
|
|
941
|
+
"quadratic-tri",
|
|
942
|
+
"power-2",
|
|
943
|
+
"power-2-pm",
|
|
944
|
+
):
|
|
924
945
|
return None
|
|
925
946
|
|
|
926
947
|
if axs is None:
|
|
927
|
-
fig, axs = subplots(1, 2, sharey=
|
|
948
|
+
fig, axs = subplots(1, 2, sharey="all", figsize=(13, 4))
|
|
928
949
|
else:
|
|
929
950
|
fig = axs[0].get_figure()
|
|
930
951
|
|
|
931
952
|
if result is None:
|
|
932
|
-
result =
|
|
933
|
-
if result not in (
|
|
953
|
+
result = "mcmc" if self._tsa.sampler is not None else "fit"
|
|
954
|
+
if result not in ("fit", "mcmc"):
|
|
934
955
|
raise ValueError("Result must be either 'fit', 'mcmc', or None")
|
|
935
|
-
if result ==
|
|
936
|
-
|
|
956
|
+
if result == "mcmc" and not (
|
|
957
|
+
self._tsa.sampler is not None or self.mcmc_chains is not None
|
|
958
|
+
):
|
|
959
|
+
raise ValueError(
|
|
960
|
+
"Cannot plot posterior solution before running the MCMC sampler."
|
|
961
|
+
)
|
|
937
962
|
|
|
938
963
|
wavelength = concatenate(self.data.wavelengths)
|
|
939
964
|
ix = argsort(wavelength)
|
|
940
965
|
|
|
941
|
-
if result ==
|
|
966
|
+
if result == "fit":
|
|
942
967
|
pv = self._tsa._de_population[self._tsa._de_imin]
|
|
943
968
|
ldc = squeeze(concatenate(self._tsa._eval_ldc(pv), axis=1))
|
|
944
|
-
axs[0].plot(self._tsa.ld_knots, pv[self._tsa._sl_ld][0::2],
|
|
945
|
-
axs[0].plot(wavelength[ix], ldc[:,0][ix])
|
|
946
|
-
axs[1].plot(self._tsa.ld_knots, pv[self._tsa._sl_ld][1::2],
|
|
947
|
-
axs[1].plot(wavelength[ix], ldc[:,1][ix])
|
|
969
|
+
axs[0].plot(self._tsa.ld_knots, pv[self._tsa._sl_ld][0::2], "ok")
|
|
970
|
+
axs[0].plot(wavelength[ix], ldc[:, 0][ix])
|
|
971
|
+
axs[1].plot(self._tsa.ld_knots, pv[self._tsa._sl_ld][1::2], "ok")
|
|
972
|
+
axs[1].plot(wavelength[ix], ldc[:, 1][ix])
|
|
948
973
|
else:
|
|
949
|
-
|
|
950
|
-
|
|
974
|
+
if self._tsa.sampler is not None:
|
|
975
|
+
pvp = self._tsa._mc_chains.reshape([-1, self._tsa.ndim])
|
|
976
|
+
else:
|
|
977
|
+
pvp = self.mcmc_chains.reshape([-1, self._tsa.ndim])
|
|
978
|
+
ldc = pvp[:, self._tsa._sl_ld]
|
|
951
979
|
|
|
952
|
-
ld1m = median(ldc[
|
|
953
|
-
ld1e = ldc[
|
|
954
|
-
ld2m = median(ldc[:,1::2], 0)
|
|
955
|
-
ld2e = ldc[:,1::2].std(0)
|
|
980
|
+
ld1m = median(ldc[:, ::2], 0)
|
|
981
|
+
ld1e = ldc[:, ::2].std(0)
|
|
982
|
+
ld2m = median(ldc[:, 1::2], 0)
|
|
983
|
+
ld2e = ldc[:, 1::2].std(0)
|
|
956
984
|
|
|
957
985
|
ldc = concatenate(self._tsa._eval_ldc(pvp), axis=1)
|
|
958
|
-
ld1p = percentile(ldc[
|
|
959
|
-
ld2p = percentile(ldc[
|
|
986
|
+
ld1p = percentile(ldc[:, :, 0], [50, 16, 84], axis=0)
|
|
987
|
+
ld2p = percentile(ldc[:, :, 1], [50, 16, 84], axis=0)
|
|
960
988
|
|
|
961
989
|
axs[0].fill_between(wavelength[ix], ld1p[1, ix], ld1p[2, ix], alpha=0.5)
|
|
962
|
-
axs[0].plot(wavelength[ix], ld1p[0][ix],
|
|
990
|
+
axs[0].plot(wavelength[ix], ld1p[0][ix], "k")
|
|
963
991
|
axs[1].fill_between(wavelength[ix], ld2p[1, ix], ld2p[2, ix], alpha=0.5)
|
|
964
|
-
axs[1].plot(wavelength[ix], ld2p[0][ix],
|
|
992
|
+
axs[1].plot(wavelength[ix], ld2p[0][ix], "k")
|
|
965
993
|
|
|
966
|
-
axs[0].errorbar(self._tsa.ld_knots, ld1m, ld1e, fmt=
|
|
967
|
-
axs[1].errorbar(self._tsa.ld_knots, ld2m, ld2e, fmt=
|
|
994
|
+
axs[0].errorbar(self._tsa.ld_knots, ld1m, ld1e, fmt="ok")
|
|
995
|
+
axs[1].errorbar(self._tsa.ld_knots, ld2m, ld2e, fmt="ok")
|
|
968
996
|
|
|
969
997
|
ldp = full((self.nldp, 2, 2), nan)
|
|
970
998
|
for i in range(self.nldp):
|
|
971
999
|
for j in range(2):
|
|
972
|
-
p = self.ps[self._tsa._sl_ld][i*2+j].prior
|
|
1000
|
+
p = self.ps[self._tsa._sl_ld][i * 2 + j].prior
|
|
973
1001
|
if isinstance(p, UniformPrior):
|
|
974
1002
|
ldp[i, j, 0] = p.a
|
|
975
1003
|
ldp[i, j, 1] = p.b
|
|
@@ -979,11 +1007,15 @@ class ExoIris:
|
|
|
979
1007
|
|
|
980
1008
|
for i in range(2):
|
|
981
1009
|
for j in range(2):
|
|
982
|
-
axs[i].plot(self._tsa.ld_knots, ldp[:, i, j],
|
|
983
|
-
|
|
984
|
-
setp(
|
|
985
|
-
|
|
986
|
-
|
|
1010
|
+
axs[i].plot(self._tsa.ld_knots, ldp[:, i, j], ":", c="C0")
|
|
1011
|
+
|
|
1012
|
+
setp(
|
|
1013
|
+
axs,
|
|
1014
|
+
xlim=(wavelength.min(), wavelength.max()),
|
|
1015
|
+
xlabel=r"Wavelength [$\mu$m]",
|
|
1016
|
+
)
|
|
1017
|
+
setp(axs[0], ylabel="Limb darkening coefficient 1")
|
|
1018
|
+
setp(axs[1], ylabel="Limb darkening coefficient 2")
|
|
987
1019
|
return fig
|
|
988
1020
|
|
|
989
1021
|
def plot_residuals(self, result: Optional[str] = None, ax: None | Axes | Sequence[Axes] = None,
|
|
@@ -1107,8 +1139,8 @@ class ExoIris:
|
|
|
1107
1139
|
return fig
|
|
1108
1140
|
|
|
1109
1141
|
@property
|
|
1110
|
-
def
|
|
1111
|
-
"""Get the posterior transmission spectrum as
|
|
1142
|
+
def transmission_spectrum_table(self) -> Table:
|
|
1143
|
+
"""Get the posterior transmission spectrum as an Astropy Table.
|
|
1112
1144
|
|
|
1113
1145
|
Raises
|
|
1114
1146
|
------
|
|
@@ -1120,7 +1152,7 @@ class ExoIris:
|
|
|
1120
1152
|
|
|
1121
1153
|
pvp = self.posterior_samples
|
|
1122
1154
|
wls = concatenate(self.data.wavelengths)
|
|
1123
|
-
ks = concatenate(self._tsa._eval_k(pvp.values
|
|
1155
|
+
ks = concatenate(self._tsa._eval_k(pvp.values), axis=1)
|
|
1124
1156
|
ar = ks**2
|
|
1125
1157
|
ix = argsort(wls)
|
|
1126
1158
|
return Table(data=[wls[ix]*u.micrometer,
|
|
@@ -1128,6 +1160,101 @@ class ExoIris:
|
|
|
1128
1160
|
median(ar, 0)[ix], ar.std(0)[ix]],
|
|
1129
1161
|
names = ['wavelength', 'radius_ratio', 'radius_ratio_e', 'area_ratio', 'area_ratio_e'])
|
|
1130
1162
|
|
|
1163
|
+
def transmission_spectrum_samples(self, wavelengths: ndarray | None = None,
|
|
1164
|
+
kind: Literal['radius_ratio', 'depth'] = 'depth',
|
|
1165
|
+
samples: ndarray | None = None) -> tuple[ndarray, ndarray]:
|
|
1166
|
+
"""Calculate posterior transmission spectrum samples.
|
|
1167
|
+
|
|
1168
|
+
This method computes the posterior samples of the transmission spectrum,
|
|
1169
|
+
either as radius ratios or as transit depths, depending on the specified
|
|
1170
|
+
kind. It interpolates the data for given wavelengths or uses the
|
|
1171
|
+
instrumental wavelength grid if none is provided. Requires that MCMC
|
|
1172
|
+
sampling has been performed prior to calling this method.
|
|
1173
|
+
|
|
1174
|
+
Parameters
|
|
1175
|
+
----------
|
|
1176
|
+
wavelengths
|
|
1177
|
+
The array of wavelengths at which the spectrum should be sampled.
|
|
1178
|
+
If None, the default wavelength grid defined by the instrumental data
|
|
1179
|
+
will be used.
|
|
1180
|
+
kind
|
|
1181
|
+
Specifies the desired representation of the transmission spectrum.
|
|
1182
|
+
'radius_ratio' returns the spectrum in radius ratio units, while
|
|
1183
|
+
'depth' returns the spectrum in transit depth units. Default is 'depth'.
|
|
1184
|
+
samples
|
|
1185
|
+
Array of posterior samples to use for calculation. If None,
|
|
1186
|
+
the method will use previously stored posterior samples.
|
|
1187
|
+
|
|
1188
|
+
Returns
|
|
1189
|
+
-------
|
|
1190
|
+
ndarray
|
|
1191
|
+
Array containing the transmission spectrum samples for the specified
|
|
1192
|
+
wavelengths. The representation (radius ratio or depth) depends on the
|
|
1193
|
+
specified `kind`.
|
|
1194
|
+
"""
|
|
1195
|
+
if self.mcmc_chains is None:
|
|
1196
|
+
raise ValueError("Cannot calculate posterior transmission spectrum before running the MCMC sampler.")
|
|
1197
|
+
|
|
1198
|
+
if kind not in ('radius_ratio', 'depth'):
|
|
1199
|
+
raise ValueError("Invalid value for `kind`. Must be either 'radius_ratio' or 'depth'.")
|
|
1200
|
+
|
|
1201
|
+
if samples is None:
|
|
1202
|
+
samples = self.posterior_samples.values
|
|
1203
|
+
|
|
1204
|
+
if wavelengths is None:
|
|
1205
|
+
wavelengths = concatenate(self.data.wavelengths)
|
|
1206
|
+
wavelengths.sort()
|
|
1207
|
+
|
|
1208
|
+
k_posteriors = zeros((samples.shape[0], wavelengths.size))
|
|
1209
|
+
for i, pv in enumerate(samples):
|
|
1210
|
+
k_posteriors[i, :] = self._tsa._ip(wavelengths, self._tsa.k_knots, pv[self._tsa._sl_rratios])
|
|
1211
|
+
|
|
1212
|
+
if kind == 'radius_ratio':
|
|
1213
|
+
return wavelengths, k_posteriors
|
|
1214
|
+
else:
|
|
1215
|
+
return wavelengths, k_posteriors**2
|
|
1216
|
+
|
|
1217
|
+
def transmission_spectrum(self, wavelengths: ndarray | None = None, kind: Literal['radius_ratio', 'depth'] = 'depth', samples: ndarray | None = None, return_cov: bool = True) -> tuple[ndarray, ndarray]:
|
|
1218
|
+
"""Compute the transmission spectrum.
|
|
1219
|
+
|
|
1220
|
+
This method calculates the mean transmission spectrum values and the covariance matrix
|
|
1221
|
+
(or standard deviations) for the given parameter set. The mean represents the average
|
|
1222
|
+
transmission spectrum, and the covariance provides information on the uncertainties and
|
|
1223
|
+
correlations between wavelengths or samples.
|
|
1224
|
+
|
|
1225
|
+
Parameters
|
|
1226
|
+
----------
|
|
1227
|
+
wavelengths
|
|
1228
|
+
Array of wavelength values at which to calculate the transmission spectrum.
|
|
1229
|
+
If None, the default grid will be used.
|
|
1230
|
+
kind
|
|
1231
|
+
Specifies the method to represent the spectrum. 'radius_ratio' computes the
|
|
1232
|
+
spectrum in terms of the planet-to-star radius ratio, while 'depth' computes
|
|
1233
|
+
the spectrum in terms of transit depth.
|
|
1234
|
+
samples
|
|
1235
|
+
Array of samples used to compute the spectrum uncertainties. If None, previously
|
|
1236
|
+
stored samples will be utilized.
|
|
1237
|
+
|
|
1238
|
+
return_cov : bool, optional
|
|
1239
|
+
Indicates whether to return the covariance matrix of the computed transmission
|
|
1240
|
+
spectrum. If True, the covariance matrix is returned along with the mean spectrum.
|
|
1241
|
+
If False, the standard deviation of the spectrum is returned.
|
|
1242
|
+
|
|
1243
|
+
Returns
|
|
1244
|
+
-------
|
|
1245
|
+
tuple[ndarray, ndarray]
|
|
1246
|
+
A tuple containing two arrays:
|
|
1247
|
+
- The mean transmission spectrum.
|
|
1248
|
+
- The covariance matrix of the spectrum (if `return_cov` is True), or the
|
|
1249
|
+
standard deviation (if `return_cov` is False).
|
|
1250
|
+
"""
|
|
1251
|
+
sp_samples = self.transmission_spectrum_samples(wavelengths, kind, samples)[1]
|
|
1252
|
+
mean = sp_samples.mean(0)
|
|
1253
|
+
if return_cov:
|
|
1254
|
+
return mean, cov(sp_samples, rowvar=False)
|
|
1255
|
+
else:
|
|
1256
|
+
return mean, sp_samples.std(0)
|
|
1257
|
+
|
|
1131
1258
|
def save(self, overwrite: bool = False) -> None:
|
|
1132
1259
|
"""Save the ExoIris analysis to a FITS file.
|
|
1133
1260
|
|
|
@@ -1145,6 +1272,13 @@ class ExoIris:
|
|
|
1145
1272
|
pri.header['interp'] = self._tsa.interpolation
|
|
1146
1273
|
pri.header['noise'] = self._tsa.noise_model
|
|
1147
1274
|
|
|
1275
|
+
if self._tsa.free_k_knot_ids is None:
|
|
1276
|
+
pri.header['n_free_k'] = 0
|
|
1277
|
+
else:
|
|
1278
|
+
pri.header['n_free_k'] = len(self._tsa.free_k_knot_ids)
|
|
1279
|
+
for i, ix in enumerate(self._tsa.free_k_knot_ids):
|
|
1280
|
+
pri.header[f'kk_ix_{i:03d}'] = ix
|
|
1281
|
+
|
|
1148
1282
|
# Priors
|
|
1149
1283
|
# ======
|
|
1150
1284
|
pr = pf.ImageHDU(name='priors')
|
|
@@ -1228,6 +1362,33 @@ class ExoIris:
|
|
|
1228
1362
|
|
|
1229
1363
|
hdul.writeto(f"{self.name}.fits", overwrite=True)
|
|
1230
1364
|
|
|
1365
|
+
def create_loglikelihood_function(self, wavelengths: ndarray, kind: Literal['radius_ratio', 'depth'] = 'depth',
|
|
1366
|
+
method: Literal['svd', 'randomized_svd', 'eigh'] = 'svd',
|
|
1367
|
+
n_max_samples: int = 10000) -> LogLikelihood:
|
|
1368
|
+
"""Create a reduced-rank Gaussian log-likelihood function for retrieval.
|
|
1369
|
+
|
|
1370
|
+
Parameters
|
|
1371
|
+
----------
|
|
1372
|
+
wavelengths
|
|
1373
|
+
The wavelength grid used in the theoretical transmission spectra.
|
|
1374
|
+
|
|
1375
|
+
kind
|
|
1376
|
+
The transmission spectrum type. Can be either 'radius_ratio' or 'depth'.
|
|
1377
|
+
|
|
1378
|
+
Returns
|
|
1379
|
+
-------
|
|
1380
|
+
LogLikelihood
|
|
1381
|
+
An instance of LogLikelihood for analyzing the consistency of the model
|
|
1382
|
+
with the provided wavelengths and chosen log-likelihood kind.
|
|
1383
|
+
"""
|
|
1384
|
+
if self.mcmc_chains is None:
|
|
1385
|
+
raise ValueError("Cannot create log-likelihood function before running the MCMC sampler.")
|
|
1386
|
+
return LogLikelihood(wavelengths,
|
|
1387
|
+
self.transmission_spectrum_samples(wavelengths, kind)[1],
|
|
1388
|
+
method=method,
|
|
1389
|
+
n_max_samples=n_max_samples,
|
|
1390
|
+
nk=self.nk)
|
|
1391
|
+
|
|
1231
1392
|
def create_initial_population(self, n: int, source: str, add_noise: bool = True) -> ndarray:
|
|
1232
1393
|
"""Create an initial parameter vector population for the DE optimisation.
|
|
1233
1394
|
|
exoiris/loglikelihood.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
# ExoIris: fast, flexible, and easy exoplanet transmission spectroscopy in Python.
|
|
2
|
+
# Copyright (C) 2025 Hannu Parviainen
|
|
3
|
+
#
|
|
4
|
+
# This program is free software: you can redistribute it and/or modify
|
|
5
|
+
# it under the terms of the GNU General Public License as published by
|
|
6
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
7
|
+
# (at your option) any later version.
|
|
8
|
+
#
|
|
9
|
+
# This program is distributed in the hope that it will be useful,
|
|
10
|
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
11
|
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
12
|
+
# GNU General Public License for more details.
|
|
13
|
+
#
|
|
14
|
+
# You should have received a copy of the GNU General Public License
|
|
15
|
+
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
16
|
+
from typing import Literal
|
|
17
|
+
|
|
18
|
+
from numpy import full, cov, sum, ndarray, log, pi, asarray
|
|
19
|
+
from numpy.linalg import eigh, svd
|
|
20
|
+
from sklearn.utils.extmath import randomized_svd
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class LogLikelihood:
|
|
24
|
+
def __init__(self, wavelength: ndarray, spectra: None | ndarray = None, spmean: None | ndarray = None,
|
|
25
|
+
spcov: None | ndarray = None, eps: float = 1e-10, method: Literal['svd', 'randomized_svd', 'eigh'] = 'svd',
|
|
26
|
+
n_max_samples: int = 10000, nk: int | None = None):
|
|
27
|
+
"""Reduced-rank Normal log-likelihood.
|
|
28
|
+
|
|
29
|
+
This class constructs a statistically robust log-likelihood function for
|
|
30
|
+
comparing a theoretical transmission spectrum to the posterior distribution
|
|
31
|
+
inferred by ExoIris.
|
|
32
|
+
|
|
33
|
+
Because the posterior samples are generated from a spline with $K$ knots
|
|
34
|
+
but evaluated on $M$ wavelengths ($M \gg K$), the empirical covariance
|
|
35
|
+
matrix is singular or strongly ill-conditioned. This class solves the
|
|
36
|
+
rank-deficiency problem by projecting the model into the principal
|
|
37
|
+
subspace of the posterior (Karhunen-Loève compression).
|
|
38
|
+
|
|
39
|
+
Parameters
|
|
40
|
+
----------
|
|
41
|
+
wavelength
|
|
42
|
+
The wavelength grid with a shape (M,) on which the posterior samples and theoretical
|
|
43
|
+
spectra are evaluated.
|
|
44
|
+
spectra
|
|
45
|
+
The posterior spectrum samples with shape (N_samples, M_wavelengths).
|
|
46
|
+
If provided, ``spmean`` and ``spcov`` are computed automatically.
|
|
47
|
+
Mutually exclusive with ``spmean`` and ``spcov``.
|
|
48
|
+
spmean
|
|
49
|
+
The pre-computed mean spectrum with shape (M,). Must be provided
|
|
50
|
+
along with ``spcov`` if ``spectra`` is None.
|
|
51
|
+
spcov
|
|
52
|
+
The pre-computed covariance matrix with shape (M, M). Must be provided
|
|
53
|
+
along with ``spmean`` if ``spectra`` is None.
|
|
54
|
+
eps
|
|
55
|
+
Relative tolerance factor used to determine which eigenvalues of
|
|
56
|
+
the covariance matrix are considered significant. Eigenvalues smaller
|
|
57
|
+
than ``eps * max_eigenvalue`` are discarded. Default is ``1e-10``.
|
|
58
|
+
|
|
59
|
+
Notes
|
|
60
|
+
-----
|
|
61
|
+
This implementation follows the "Signal-to-Noise Eigenmode" formalism
|
|
62
|
+
described by Tegmark et al. (1997) for analyzing rank-deficient
|
|
63
|
+
cosmological datasets.
|
|
64
|
+
|
|
65
|
+
The log-likelihood is evaluated as:
|
|
66
|
+
|
|
67
|
+
.. math:: \ln \mathcal{L} = -\frac{1}{2} \left[ \sum_{i=1}^{K} \frac{p_i^2}{\lambda_i} + \sum_{i=1}^{K} \ln(\lambda_i) + K \ln(2\pi) \right]
|
|
68
|
+
|
|
69
|
+
where $\lambda_i$ are the significant eigenvalues of the covariance
|
|
70
|
+
matrix, and $p_i$ are the projections of the model residuals onto the
|
|
71
|
+
corresponding eigenvectors (principal components).
|
|
72
|
+
|
|
73
|
+
References
|
|
74
|
+
----------
|
|
75
|
+
Tegmark, M., Taylor, A. N., & Heavens, A. F. (1997). Karhunen-Loève
|
|
76
|
+
eigenvalue problems in cosmology: how should we tackle large data sets?
|
|
77
|
+
*The Astrophysical Journal*, 480(1), 22.
|
|
78
|
+
"""
|
|
79
|
+
self.wavelength = wavelength
|
|
80
|
+
self.eps = eps
|
|
81
|
+
|
|
82
|
+
if spectra is not None and (spmean is not None or spcov is not None):
|
|
83
|
+
raise ValueError("Cannot specify both `spectra` and `spmean` and `spcov`.")
|
|
84
|
+
|
|
85
|
+
if spectra is None and (spmean is None or spcov is None):
|
|
86
|
+
raise ValueError("Must specify either `spectra` or both `spmean` and `spcov`.")
|
|
87
|
+
|
|
88
|
+
if spectra is not None:
|
|
89
|
+
spectra = spectra[:n_max_samples, :]
|
|
90
|
+
self.spmean = spectra.mean(axis=0)
|
|
91
|
+
|
|
92
|
+
if method == 'svd':
|
|
93
|
+
_, sigma, evecs = svd(spectra - spectra.mean(0), full_matrices=False)
|
|
94
|
+
evals = (sigma**2) / (spectra.shape[0] - 1)
|
|
95
|
+
evecs = evecs.T
|
|
96
|
+
elif method == 'randomized_svd':
|
|
97
|
+
if nk is None:
|
|
98
|
+
raise ValueError("Must specify `nk` when using `method='randomized_svd'`.")
|
|
99
|
+
_, sigma, evecs = randomized_svd(spectra - spectra.mean(0), n_components=nk, n_iter=5, random_state=0)
|
|
100
|
+
evals = (sigma ** 2) / (spectra.shape[0] - 1)
|
|
101
|
+
evecs = evecs.T
|
|
102
|
+
elif method == 'eigh' or (spmean is not None and spcov is not None):
|
|
103
|
+
if spectra is not None:
|
|
104
|
+
self.spcov = cov(spectra, rowvar=False)
|
|
105
|
+
else:
|
|
106
|
+
self.spmean = spmean
|
|
107
|
+
self.spcov = spcov
|
|
108
|
+
evals, evecs = eigh(self.spcov)
|
|
109
|
+
|
|
110
|
+
keep = evals > eps * evals.max()
|
|
111
|
+
self.eigenvalues, self.eigenvectors = evals[keep], evecs[:, keep]
|
|
112
|
+
self.log_det = sum(log(self.eigenvalues))
|
|
113
|
+
self.log_twopi = self.eigenvalues.size * log(2*pi)
|
|
114
|
+
|
|
115
|
+
def __call__(self, model: ndarray | float) -> ndarray:
|
|
116
|
+
"""Evaluate the log-likelihood of a model spectrum.
|
|
117
|
+
|
|
118
|
+
Parameters
|
|
119
|
+
----------
|
|
120
|
+
model : float or ndarray
|
|
121
|
+
The theoretical model spectrum. If a float is provided, it is
|
|
122
|
+
broadcast to a flat spectrum. If an array, it must match the
|
|
123
|
+
wavelength grid size used during initialization.
|
|
124
|
+
|
|
125
|
+
Returns
|
|
126
|
+
-------
|
|
127
|
+
float
|
|
128
|
+
The natural log-likelihood $\ln \mathcal{L}$.
|
|
129
|
+
"""
|
|
130
|
+
if isinstance(model, float):
|
|
131
|
+
model = full(self.wavelength.size, model)
|
|
132
|
+
else:
|
|
133
|
+
model = asarray(model)
|
|
134
|
+
|
|
135
|
+
# Project the residuals onto the eigenvectors (Basis Rotation)
|
|
136
|
+
# and Compute the Mahalanobis Distance (Chi-Squared in Subspace).
|
|
137
|
+
p = (self.spmean - model) @ self.eigenvectors
|
|
138
|
+
chisq = sum(p**2 / self.eigenvalues)
|
|
139
|
+
return -0.5 * (chisq + self.log_det + self.log_twopi)
|
exoiris/tslpf.py
CHANGED
|
@@ -20,7 +20,7 @@ from typing import Optional, Literal
|
|
|
20
20
|
from ldtk import BoxcarFilter, LDPSetCreator # noqa
|
|
21
21
|
from numba import njit, prange
|
|
22
22
|
from numpy import zeros, log, pi, linspace, inf, atleast_2d, newaxis, clip, arctan2, ones, floor, sum, concatenate, \
|
|
23
|
-
sort, ndarray, zeros_like, array, tile, arange, squeeze, dstack
|
|
23
|
+
sort, ndarray, zeros_like, array, tile, arange, squeeze, dstack, nan, diff, all
|
|
24
24
|
from numpy.random import default_rng
|
|
25
25
|
from celerite2 import GaussianProcess as GP, terms
|
|
26
26
|
|
|
@@ -35,7 +35,6 @@ from scipy.interpolate import (
|
|
|
35
35
|
splev,
|
|
36
36
|
Akima1DInterpolator,
|
|
37
37
|
interp1d,
|
|
38
|
-
FloaterHormannInterpolator,
|
|
39
38
|
)
|
|
40
39
|
|
|
41
40
|
from .tsmodel import TransmissionSpectroscopyModel as TSModel
|
|
@@ -86,6 +85,10 @@ def ip_bspline(x, xk, yk):
|
|
|
86
85
|
return splev(x, splrep(xk, yk))
|
|
87
86
|
|
|
88
87
|
|
|
88
|
+
def ip_bspline_quadratic(x, xk, yk):
|
|
89
|
+
return splev(x, splrep(xk, yk, k=2))
|
|
90
|
+
|
|
91
|
+
|
|
89
92
|
def ip_makima(x, xk, yk):
|
|
90
93
|
return Akima1DInterpolator(xk, yk, method='makima', extrapolate=True)(x)
|
|
91
94
|
|
|
@@ -102,6 +105,13 @@ def add_knots(x_new, x_old):
|
|
|
102
105
|
return sort(concatenate([x_new, x_old]))
|
|
103
106
|
|
|
104
107
|
|
|
108
|
+
interpolator_choices = ("bspline", "pchip", "makima", "nearest", "linear", "bspline-quadratic")
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
interpolators = {'bspline': ip_bspline, 'bspline-quadratic': ip_bspline_quadratic, 'pchip': ip_pchip,
|
|
112
|
+
'makima': ip_makima, 'nearest': ip_nearest, 'linear': ip_linear}
|
|
113
|
+
|
|
114
|
+
|
|
105
115
|
def clean_knots(knots, min_distance, lmin=0, lmax=inf):
|
|
106
116
|
"""Clean the knot table by replacing groups of adjacent knots with a single knot at the group mean.
|
|
107
117
|
|
|
@@ -143,19 +153,21 @@ def clean_knots(knots, min_distance, lmin=0, lmax=inf):
|
|
|
143
153
|
class TSLPF(LogPosteriorFunction):
|
|
144
154
|
def __init__(self, runner, name: str, ldmodel, data: TSDataGroup, nk: int = 50, nldc: int = 10, nthreads: int = 1,
|
|
145
155
|
tmpars = None, noise_model: Literal["white", "fixed_gp", "free_gp"] = 'white',
|
|
146
|
-
interpolation: Literal['
|
|
156
|
+
interpolation: Literal['nearest', 'linear', 'pchip', 'makima', 'bspline', 'bspline-quadratic'] = 'makima'):
|
|
147
157
|
super().__init__(name)
|
|
148
158
|
self._runner = runner
|
|
149
159
|
self._original_data: TSDataGroup | None = None
|
|
150
160
|
self.data: TSDataGroup | None = None
|
|
151
161
|
self.npb: list[int] | None= None
|
|
152
162
|
self.npt: list[int] | None = None
|
|
153
|
-
self.ndim: int | None = None
|
|
154
163
|
self._baseline_models: list[ndarray] | None = None
|
|
155
164
|
self.interpolation: str = interpolation
|
|
156
165
|
|
|
157
|
-
|
|
158
|
-
|
|
166
|
+
if interpolation not in interpolator_choices:
|
|
167
|
+
raise ValueError(f'interpolation must be one of {interpolator_choices}')
|
|
168
|
+
|
|
169
|
+
self._ip = interpolators[interpolation]
|
|
170
|
+
self._ip_ld = interpolators['bspline']
|
|
159
171
|
|
|
160
172
|
self._gp: Optional[list[GP]] = None
|
|
161
173
|
self._gp_time: Optional[list[ndarray]] = None
|
|
@@ -179,6 +191,7 @@ class TSLPF(LogPosteriorFunction):
|
|
|
179
191
|
self.nk = nk
|
|
180
192
|
|
|
181
193
|
self.k_knots = linspace(data.wlmin, data.wlmax, self.nk)
|
|
194
|
+
self.free_k_knot_ids = None
|
|
182
195
|
|
|
183
196
|
if isinstance(ldmodel, LDTkLD):
|
|
184
197
|
self.ld_knots = array([])
|
|
@@ -209,6 +222,10 @@ class TSLPF(LogPosteriorFunction):
|
|
|
209
222
|
def errors(self) -> list[ndarray]:
|
|
210
223
|
return self.data.errors
|
|
211
224
|
|
|
225
|
+
@property
|
|
226
|
+
def ndim(self) -> int:
|
|
227
|
+
return len(self.ps)
|
|
228
|
+
|
|
212
229
|
def set_data(self, data: TSDataGroup):
|
|
213
230
|
self._original_data = deepcopy(data)
|
|
214
231
|
self.data = data
|
|
@@ -230,7 +247,6 @@ class TSLPF(LogPosteriorFunction):
|
|
|
230
247
|
self._init_p_baseline()
|
|
231
248
|
self._init_p_bias()
|
|
232
249
|
self.ps.freeze()
|
|
233
|
-
self.ndim = len(self.ps)
|
|
234
250
|
|
|
235
251
|
def initialize_spots(self, tstar: float, wlref: float, include_tlse: bool = True) -> None:
|
|
236
252
|
self.spot_model = SpotModel(self, tstar, wlref, include_tlse)
|
|
@@ -523,6 +539,56 @@ class TSLPF(LogPosteriorFunction):
|
|
|
523
539
|
self._mc_chains = fmcn.reshape([mco.shape[0], mco.shape[1], ndn])
|
|
524
540
|
self.sampler = None
|
|
525
541
|
|
|
542
|
+
def set_free_k_knots(self, ids):
|
|
543
|
+
self.free_k_knot_ids = ids
|
|
544
|
+
|
|
545
|
+
# Remove existing parameter block if one exists
|
|
546
|
+
block_names = [b.name for b in self.ps.blocks]
|
|
547
|
+
try:
|
|
548
|
+
bid = block_names.index('free_k_knot_locations')
|
|
549
|
+
del self.ps[self.ps.blocks[bid].slice]
|
|
550
|
+
del self.ps.blocks[bid]
|
|
551
|
+
except ValueError:
|
|
552
|
+
pass
|
|
553
|
+
|
|
554
|
+
# Calculate minimum distances between knots
|
|
555
|
+
min_distances = zeros(self.nk)
|
|
556
|
+
min_distances[0] = self.k_knots[1] - self.k_knots[0]
|
|
557
|
+
min_distances[self.nk-1] = self.k_knots[self.nk-1] - self.k_knots[self.nk-2]
|
|
558
|
+
for i in range(1, self.nk-1):
|
|
559
|
+
for j in range(i):
|
|
560
|
+
min_distances[i] = min(self.k_knots[i] - self.k_knots[i-1], self.k_knots[i+1] - self.k_knots[i])
|
|
561
|
+
|
|
562
|
+
# Create new parameter block
|
|
563
|
+
ps = []
|
|
564
|
+
for kid in ids:
|
|
565
|
+
sigma = min_distances[kid]/6 if (kid+1 in ids or kid-1 in ids) else min_distances[kid]/4
|
|
566
|
+
ps.append(GParameter(f'kl_{kid:04d}', f'k knot {kid} location', 'um', NP(self.k_knots[kid], sigma), [0, inf]))
|
|
567
|
+
self.ps.thaw()
|
|
568
|
+
self.ps.add_global_block('free_k_knot_locations', ps)
|
|
569
|
+
self.ps.freeze()
|
|
570
|
+
self._start_kloc = self.ps.blocks[-1].start
|
|
571
|
+
self._sl_kloc = self.ps.blocks[-1].slice
|
|
572
|
+
|
|
573
|
+
try:
|
|
574
|
+
pid = [p.__name__ for p in self._additional_log_priors].index('k_knot_order_priors')
|
|
575
|
+
del self._additional_log_priors[pid]
|
|
576
|
+
except ValueError:
|
|
577
|
+
pass
|
|
578
|
+
|
|
579
|
+
# Add a prior on the order of the knots
|
|
580
|
+
def k_knot_order_prior(pv):
|
|
581
|
+
pv = atleast_2d(pv)
|
|
582
|
+
logp = zeros(pv.shape[0])
|
|
583
|
+
k_knots = self.k_knots.copy()
|
|
584
|
+
for i in range(pv.shape[0]):
|
|
585
|
+
k_knots[self.free_k_knot_ids] = pv[i, self._sl_kloc]
|
|
586
|
+
original_separations = diff(self.k_knots)
|
|
587
|
+
current_separations = diff(k_knots)
|
|
588
|
+
logp[i] = 1e2*(clip(current_separations / original_separations / 0.25, -inf, 1.0) - 1.).sum()
|
|
589
|
+
return logp
|
|
590
|
+
self._additional_log_priors.append(k_knot_order_prior)
|
|
591
|
+
|
|
526
592
|
|
|
527
593
|
def add_ld_knots(self, knot_wavelengths) -> None:
|
|
528
594
|
"""Add limb darkening knots to the model.
|
|
@@ -590,9 +656,12 @@ class TSLPF(LogPosteriorFunction):
|
|
|
590
656
|
"""
|
|
591
657
|
pvp = atleast_2d(pvp)
|
|
592
658
|
ks = [zeros((pvp.shape[0], npb)) for npb in self.npb]
|
|
659
|
+
k_knots = self.k_knots.copy()
|
|
593
660
|
for ids in range(self.data.size):
|
|
594
661
|
for ipv in range(pvp.shape[0]):
|
|
595
|
-
|
|
662
|
+
if self.free_k_knot_ids is not None:
|
|
663
|
+
k_knots[self.free_k_knot_ids] = pvp[ipv, self._sl_kloc]
|
|
664
|
+
ks[ids][ipv,:] = self._ip(self.wavelengths[ids], k_knots, pvp[ipv, self._sl_rratios])
|
|
596
665
|
return ks
|
|
597
666
|
|
|
598
667
|
def _eval_ldc(self, pvp):
|
|
@@ -608,8 +677,8 @@ class TSLPF(LogPosteriorFunction):
|
|
|
608
677
|
ldp = [zeros((pvp.shape[0], npb, 2)) for npb in self.npb]
|
|
609
678
|
for ids in range(self.data.size):
|
|
610
679
|
for ipv in range(pvp.shape[0]):
|
|
611
|
-
ldp[ids][ipv, :, 0] =
|
|
612
|
-
ldp[ids][ipv, :, 1] =
|
|
680
|
+
ldp[ids][ipv, :, 0] = self._ip_ld(self.wavelengths[ids], self.ld_knots, ldk[ipv, :, 0])
|
|
681
|
+
ldp[ids][ipv, :, 1] = self._ip_ld(self.wavelengths[ids], self.ld_knots, ldk[ipv, :, 1])
|
|
613
682
|
return ldp
|
|
614
683
|
|
|
615
684
|
def transit_model(self, pv, copy=True):
|
|
@@ -640,7 +709,7 @@ class TSLPF(LogPosteriorFunction):
|
|
|
640
709
|
pv = atleast_2d(pv)
|
|
641
710
|
ldp = self._eval_ldc(pv)
|
|
642
711
|
t0s = pv[:, self._sl_tcs]
|
|
643
|
-
k = self._eval_k(pv
|
|
712
|
+
k = self._eval_k(pv)
|
|
644
713
|
p = pv[:, 1]
|
|
645
714
|
aor = as_from_rhop(pv[:, 0], p)
|
|
646
715
|
inc = i_from_ba(pv[:, 2], aor)
|
exoiris/util.py
CHANGED
|
@@ -15,8 +15,10 @@
|
|
|
15
15
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
16
16
|
|
|
17
17
|
from numba import njit
|
|
18
|
-
from numpy import zeros, sum, sqrt, linspace, vstack, concatenate, floor, dot, ndarray, nan
|
|
19
|
-
|
|
18
|
+
from numpy import (zeros, sum, sqrt, linspace, vstack, concatenate, floor, dot, ndarray, nan, asarray, tile)
|
|
19
|
+
from numpy._typing import ArrayLike
|
|
20
|
+
from pytransit import TSModel
|
|
21
|
+
from pytransit.orbits import i_from_ba
|
|
20
22
|
|
|
21
23
|
@njit
|
|
22
24
|
def bin2d(v, e, el, er, bins, estimate_errors: bool = False) -> tuple[ndarray, ndarray]:
|
|
@@ -116,4 +118,45 @@ def create_binning(ranges, bwidths):
|
|
|
116
118
|
n = int(floor((r[1] - r[0]) / w))
|
|
117
119
|
e = linspace(*r, num=n)
|
|
118
120
|
bins.append(vstack([e[:-1], e[1:]]).T)
|
|
119
|
-
return concatenate(bins)
|
|
121
|
+
return concatenate(bins)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def create_mock_model(ks: ArrayLike, times: ArrayLike = None, ldc: ArrayLike = None, t0: float = 0.0, p: float =2.0, a: float =8.0, b: float =0.0) -> ndarray:
|
|
125
|
+
"""Create a mock transmission spectrum observation using given parameters.
|
|
126
|
+
|
|
127
|
+
Parameters
|
|
128
|
+
----------
|
|
129
|
+
ks
|
|
130
|
+
Array of radius ratios, one radius ratio per wavelength.
|
|
131
|
+
times
|
|
132
|
+
Array of time values to set the data points. If None, defaults to a
|
|
133
|
+
linspace of 500 points in the range [-0.1, 0.1].
|
|
134
|
+
ldc
|
|
135
|
+
Array representing the limb darkening coefficients. If None, defaults to
|
|
136
|
+
a tile of [0.4, 0.4] for each wavelength element.
|
|
137
|
+
t0
|
|
138
|
+
Transit center.
|
|
139
|
+
p
|
|
140
|
+
Orbital period.
|
|
141
|
+
a
|
|
142
|
+
Semi-major axis.
|
|
143
|
+
b
|
|
144
|
+
Impact parameter.
|
|
145
|
+
|
|
146
|
+
Returns
|
|
147
|
+
-------
|
|
148
|
+
ndarray
|
|
149
|
+
Mock spectrophotometric light curves.
|
|
150
|
+
|
|
151
|
+
"""
|
|
152
|
+
ks = asarray(ks)
|
|
153
|
+
if times is None:
|
|
154
|
+
times = linspace(-0.1, 0.1, 500)
|
|
155
|
+
if ldc is None:
|
|
156
|
+
ldc = tile([0.4, 0.4], (1, ks.size, 1))
|
|
157
|
+
inc = i_from_ba(b, a)
|
|
158
|
+
|
|
159
|
+
m1 = TSModel('power-2', ng=100, nzin=50, nzlimb=50)
|
|
160
|
+
m1.set_data(times)
|
|
161
|
+
f1 = m1.evaluate(ks, ldc, t0, p, a, inc)[0]
|
|
162
|
+
return f1
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ExoIris
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.22.0
|
|
4
4
|
Summary: Easy and robust exoplanet transmission spectroscopy.
|
|
5
5
|
Author-email: Hannu Parviainen <hannu@iac.es>
|
|
6
6
|
License: GPLv3
|
|
@@ -29,6 +29,7 @@ Requires-Dist: xarray
|
|
|
29
29
|
Requires-Dist: seaborn
|
|
30
30
|
Requires-Dist: astropy
|
|
31
31
|
Requires-Dist: uncertainties
|
|
32
|
+
Requires-Dist: scikit-learn
|
|
32
33
|
Dynamic: license-file
|
|
33
34
|
|
|
34
35
|
# ExoIris: Fast and Flexible Transmission Spectroscopy in Python
|
|
@@ -1,16 +1,17 @@
|
|
|
1
1
|
exoiris/__init__.py,sha256=LU5jAE7_OVPLHFO0UAOGS0e0wuWV6rdSD0Qveet11K8,1147
|
|
2
2
|
exoiris/binning.py,sha256=-Y9hdK0jZj8DOS82keaprneid2lZ4rCx-keWlKi0LP8,6455
|
|
3
3
|
exoiris/ephemeris.py,sha256=dthBkJztT5yAP6VnnO7jGvxikboFUQBUGPUfBCFrA3w,1316
|
|
4
|
-
exoiris/exoiris.py,sha256=
|
|
4
|
+
exoiris/exoiris.py,sha256=U7bMoEovZ8lyerN4t7Rb3DA1jMnx0s3QVTo83_BTosQ,60863
|
|
5
5
|
exoiris/ldtkld.py,sha256=7H1r1xail3vSKdsNKorMTqivnRKU9WrOVH-uE4Ky2jM,3495
|
|
6
|
+
exoiris/loglikelihood.py,sha256=lVm7kMyzy-xxP5VvZMV7kJY3KovbhlEz3u_6O7R6zgI,6309
|
|
6
7
|
exoiris/spotmodel.py,sha256=9-DxvVzGzxf6AjQfrzZreyJB4Htw0gsIAD3nWl0tQMc,7160
|
|
7
8
|
exoiris/tsdata.py,sha256=WqId5rfZR08pFZ83UZiyO39-QjX6WcB1GrUYolZsM-4,35323
|
|
8
|
-
exoiris/tslpf.py,sha256=
|
|
9
|
+
exoiris/tslpf.py,sha256=FYEAv2lNU4TQPVVuB7qIfQFefsru0tf_kZOnlQaRbS0,34096
|
|
9
10
|
exoiris/tsmodel.py,sha256=6NaGY48fWHUT_7ti6Ao618PN-LgyoIhfQd8lZQqZ7hU,5160
|
|
10
|
-
exoiris/util.py,sha256=
|
|
11
|
+
exoiris/util.py,sha256=uNv_c3Kuv1lml8MuDAuyElO4s3f1tRIQ1QMlLaI7Yak,5921
|
|
11
12
|
exoiris/wlpf.py,sha256=g6h1cLk2-nKD8u_FzwXNVVGFK4dry8fBr0A70LA5gJw,6281
|
|
12
|
-
exoiris-0.
|
|
13
|
-
exoiris-0.
|
|
14
|
-
exoiris-0.
|
|
15
|
-
exoiris-0.
|
|
16
|
-
exoiris-0.
|
|
13
|
+
exoiris-0.22.0.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
14
|
+
exoiris-0.22.0.dist-info/METADATA,sha256=Y5gNCFu6JH-lv65UPuY_Qts_HV9SgXKr1vRc1U0km48,5084
|
|
15
|
+
exoiris-0.22.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
16
|
+
exoiris-0.22.0.dist-info/top_level.txt,sha256=EoNxT6c5mQDcM0f_LUQB-ETsYg03lNaV3o2L_Yc6-aE,8
|
|
17
|
+
exoiris-0.22.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|