ExoIris 1.0.0__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
exoiris/bin1d.py ADDED
@@ -0,0 +1,97 @@
1
+ # ExoIris: fast, flexible, and easy exoplanet transmission spectroscopy in Python.
2
+ # Copyright (C) 2026 Hannu Parviainen
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify
5
+ # it under the terms of the GNU General Public License as published by
6
+ # the Free Software Foundation, either version 3 of the License, or
7
+ # (at your option) any later version.
8
+ #
9
+ # This program is distributed in the hope that it will be useful,
10
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
11
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
+ # GNU General Public License for more details.
13
+ #
14
+ # You should have received a copy of the GNU General Public License
15
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
16
+
17
+ from numba import njit
18
+ from numpy import zeros, isfinite, where, nan, sqrt, ndarray, sum
19
+
20
+ @njit
21
+ def bin1d(v, e, el, er, bins, estimate_errors: bool = False) -> tuple[ndarray, ndarray]:
22
+ """Bin 2D spectrophotometry data with its uncertainties into predefined bins along the first axis.
23
+
24
+ Parameters
25
+ ----------
26
+ v : ndarray
27
+ A 2D spectrophotometry array.
28
+ e : ndarray
29
+ A 2D array of uncertainties associated with the spectrophotometry in `v`, matching the shape of `v`.
30
+ el : ndarray
31
+ A 1D array containing the left edges of the integration ranges for each spectral data point.
32
+ er : ndarray
33
+ A 1D array containing the right edges of the integration ranges for each spectral data point.
34
+ bins : ndarray
35
+ A 2D array containing the edges of the bins. These should be sorted in ascending order.
36
+ estimate_errors: bool, optional.
37
+ Should the uncertainties be estimated from the data? Default value is False.
38
+
39
+ Returns
40
+ -------
41
+ tuple of ndarrays
42
+ A tuple containing two 2D arrays:
43
+ - The first array (`bv`) contains the binned values of the transmission spectrum.
44
+ - The second array (`be`) contains the binned uncertainties.
45
+ """
46
+ nbins = len(bins)
47
+ ndata = v.shape[0]
48
+ bv = zeros((nbins, v.shape[1]))
49
+ be = zeros((nbins, v.shape[1]))
50
+ nonfin_weights = isfinite(v).astype('d')
51
+ v = where(nonfin_weights, v, 0.0)
52
+ e2 = where(nonfin_weights, e**2, 0.0)
53
+ weights = zeros(v.shape)
54
+ npt = zeros(v.shape[1])
55
+
56
+ i = 0
57
+ for ibin in range(nbins):
58
+ npt[:] = 0
59
+ bel, ber = bins[ibin]
60
+ for i in range(i, ndata - 1):
61
+ if el[i + 1] > bel:
62
+ break
63
+ il = i
64
+ if er[i] > ber:
65
+ weights[i, :] = ber - bel
66
+ npt += 1
67
+ else:
68
+ weights[i, :] = er[i] - max(el[i], bel)
69
+ npt += 1
70
+ for i in range(i + 1, ndata):
71
+ if er[i] < ber:
72
+ weights[i, :] = er[i] - el[i]
73
+ npt += 1
74
+
75
+ else:
76
+ weights[i, :] = ber - el[i]
77
+ npt += 1
78
+ break
79
+ ir = i
80
+
81
+ weights[il:ir+1, :] *= nonfin_weights[il:ir+1, :]
82
+ weights[il:ir+1, :] /= weights[il:ir+1, :].sum(0)
83
+ npt += (nonfin_weights[il:ir+1, :]-1.0).sum(0)
84
+ ws = sum(weights[il:ir+1, :], 0)
85
+ ws2 = sum(weights[il:ir+1, :]**2, 0)
86
+ ws = where(ws > 0, ws, nan)
87
+ bv[ibin] = vmean = sum(weights[il:ir+1, :] * v[il:ir+1,:], 0) / ws
88
+
89
+ if estimate_errors:
90
+ var_sum = sum(weights[il:ir+1, :] * (v[il:ir+1, :] - vmean)**2, 0)
91
+ denominator = ws - ws2 / ws
92
+ sample_variance = var_sum / denominator
93
+ be[ibin, :] = where((npt > 1) & (ws**2 > ws2),
94
+ sqrt(sample_variance * ws2 / (ws * ws)), nan)
95
+ else:
96
+ be[ibin] = sqrt(sum(weights[il:ir+1, :]**2 * e2[il:ir+1,:], 0)) / ws
97
+ return bv, be
exoiris/bin2d.py ADDED
@@ -0,0 +1,218 @@
1
+ # ExoIris: fast, flexible, and easy exoplanet transmission spectroscopy in Python.
2
+ # Copyright (C) 2026 Hannu Parviainen
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify
5
+ # it under the terms of the GNU General Public License as published by
6
+ # the Free Software Foundation, either version 3 of the License, or
7
+ # (at your option) any later version.
8
+ #
9
+ # This program is distributed in the hope that it will be useful,
10
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
11
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
+ # GNU General Public License for more details.
13
+ #
14
+ # You should have received a copy of the GNU General Public License
15
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
16
+
17
+ from numba import njit
18
+ from numpy import zeros, isfinite, where, nan, sqrt, ndarray, int32, float64
19
+
20
+ @njit
21
+ def bin2d(v: ndarray, e: ndarray,
22
+ wl_l: ndarray, wl_r: ndarray,
23
+ tm_l: ndarray, tm_r: ndarray,
24
+ wl_bins: ndarray, tm_bins: ndarray,
25
+ estimate_errors: bool = False) -> tuple[ndarray, ndarray, ndarray]:
26
+ """Bin 2D spectrophotometry data in both wavelength and time dimensions.
27
+
28
+ Parameters
29
+ ----------
30
+ v : ndarray
31
+ A 2D spectrophotometry array with shape (n_wavelength, n_exposure).
32
+ e : ndarray
33
+ A 2D array of uncertainties matching the shape of `v`.
34
+ wl_l : ndarray
35
+ A 1D array of left wavelength edges for each spectral data point.
36
+ wl_r : ndarray
37
+ A 1D array of right wavelength edges for each spectral data point.
38
+ tm_l : ndarray
39
+ A 1D array of left time edges for each exposure.
40
+ tm_r : ndarray
41
+ A 1D array of right time edges for each exposure.
42
+ wl_bins : ndarray
43
+ A 2D array (n_wl_bins, 2) of wavelength bin edges [left, right].
44
+ tm_bins : ndarray
45
+ A 2D array (n_tm_bins, 2) of time bin edges [left, right].
46
+ estimate_errors : bool, optional
47
+ If True, estimate uncertainties from data scatter. Default is False.
48
+
49
+ Returns
50
+ -------
51
+ tuple of ndarrays
52
+ - bv: Binned values with shape (n_wl_bins, n_tm_bins).
53
+ - be: Binned uncertainties with shape (n_wl_bins, n_tm_bins).
54
+ - bn: Number of original finite pixels in each bin with shape (n_wl_bins, n_tm_bins).
55
+ """
56
+ n_wl_bins = len(wl_bins)
57
+ n_tm_bins = len(tm_bins)
58
+ n_wl = v.shape[0]
59
+ n_tm = v.shape[1]
60
+
61
+ bv = zeros((n_wl_bins, n_tm_bins))
62
+ be = zeros((n_wl_bins, n_tm_bins))
63
+ bn = zeros((n_wl_bins, n_tm_bins), dtype=int32)
64
+
65
+ # Pre-compute masks and cleaned data
66
+ nonfin_mask = isfinite(v)
67
+ v_clean = where(nonfin_mask, v, 0.0)
68
+ e2_clean = where(nonfin_mask, e**2, 0.0)
69
+ nonfin_weights = nonfin_mask.astype(float64)
70
+
71
+ # Pre-compute time bin indices and weights
72
+ tm_il_arr = zeros(n_tm_bins, dtype=int32)
73
+ tm_ir_arr = zeros(n_tm_bins, dtype=int32)
74
+ tm_weights_all = zeros((n_tm_bins, n_tm))
75
+
76
+ for itm_bin in range(n_tm_bins):
77
+ tm_bel, tm_ber = tm_bins[itm_bin]
78
+
79
+ # Find first time index
80
+ tm_il = 0
81
+ for j in range(n_tm - 1):
82
+ if tm_l[j + 1] > tm_bel:
83
+ tm_il = j
84
+ break
85
+ else:
86
+ tm_il = n_tm - 1
87
+
88
+ tm_il_arr[itm_bin] = tm_il
89
+
90
+ # Calculate time weights with ROBUST overlap logic
91
+ # Overlap = max(0, min(pixel_right, bin_right) - max(pixel_left, bin_left))
92
+ # We perform the loop starting from the found index until pixels no longer overlap
93
+
94
+ tm_idx = tm_il
95
+ # Iterate until pixel starts after bin ends or we run out of pixels
96
+ while tm_idx < n_tm:
97
+ # Optimization: If pixel starts after bin ends, stop.
98
+ if tm_l[tm_idx] >= tm_ber:
99
+ # But careful: tm_il search might land us on a pixel that starts after bin
100
+ # if the bin is in a gap. If so, we just want to record the index and break.
101
+ break
102
+
103
+ # Calculate overlap
104
+ # min(pixel_r, bin_r)
105
+ r_bound = tm_r[tm_idx] if tm_r[tm_idx] < tm_ber else tm_ber
106
+ # max(pixel_l, bin_l)
107
+ l_bound = tm_l[tm_idx] if tm_l[tm_idx] > tm_bel else tm_bel
108
+
109
+ if r_bound > l_bound:
110
+ tm_weights_all[itm_bin, tm_idx] = r_bound - l_bound
111
+
112
+ # If this pixel goes past the bin, we are done with this bin
113
+ if tm_r[tm_idx] >= tm_ber:
114
+ break
115
+
116
+ tm_idx += 1
117
+
118
+ tm_ir_arr[itm_bin] = min(tm_idx, n_tm - 1)
119
+
120
+ # Allocation moved outside loop
121
+ wl_weights = zeros(n_wl)
122
+
123
+ # Process wavelength bins
124
+ wl_start = 0
125
+ for iwl_bin in range(n_wl_bins):
126
+ wl_bel, wl_ber = wl_bins[iwl_bin]
127
+
128
+ # Reset weights for this iteration (faster than re-allocating)
129
+ # We only need to zero out the range we are about to use or used previously?
130
+ # Actually, since we calculate specific indices [wl_il, wl_ir],
131
+ # we can just zero them out at the end of the loop, or re-zero the whole array.
132
+ # Given n_wl is usually small (spectroscopy), zeroing whole array is fine.
133
+ wl_weights[:] = 0.0
134
+
135
+ # Find first wavelength index
136
+ wl_il = wl_start
137
+ for wl_il in range(wl_start, n_wl - 1):
138
+ if wl_l[wl_il + 1] > wl_bel:
139
+ break
140
+
141
+ # Calculate wavelength weights (Same robust logic as time)
142
+ wl_idx = wl_il
143
+ while wl_idx < n_wl:
144
+ if wl_l[wl_idx] >= wl_ber:
145
+ break
146
+
147
+ r_bound = wl_r[wl_idx] if wl_r[wl_idx] < wl_ber else wl_ber
148
+ l_bound = wl_l[wl_idx] if wl_l[wl_idx] > wl_bel else wl_bel
149
+
150
+ if r_bound > l_bound:
151
+ wl_weights[wl_idx] = r_bound - l_bound
152
+
153
+ if wl_r[wl_idx] >= wl_ber:
154
+ break
155
+ wl_idx += 1
156
+
157
+ wl_ir = min(wl_idx, n_wl - 1)
158
+
159
+ # Optimization for next bin search
160
+ wl_start = wl_il
161
+
162
+ # Process all time bins for this wavelength bin
163
+ for itm_bin in range(n_tm_bins):
164
+ tm_il = tm_il_arr[itm_bin]
165
+ tm_ir = tm_ir_arr[itm_bin]
166
+
167
+ total_weight = 0.0
168
+ sum_w2 = 0.0
169
+ weighted_sum = 0.0
170
+ weighted_e2_sum = 0.0
171
+ npt = 0
172
+
173
+ for i in range(wl_il, wl_ir + 1):
174
+ w_wl = wl_weights[i]
175
+ if w_wl <= 0: continue # Skip if no overlap
176
+
177
+ for j in range(tm_il, tm_ir + 1):
178
+ # Combine weights
179
+ w = w_wl * tm_weights_all[itm_bin, j] * nonfin_weights[i, j]
180
+
181
+ if w > 0:
182
+ total_weight += w
183
+ sum_w2 += w * w
184
+ weighted_sum += w * v_clean[i, j]
185
+ weighted_e2_sum += w * w * e2_clean[i, j]
186
+ npt += 1
187
+
188
+ bn[iwl_bin, itm_bin] = npt
189
+
190
+ if total_weight > 0:
191
+ bv[iwl_bin, itm_bin] = vmean = weighted_sum / total_weight
192
+
193
+ if estimate_errors:
194
+ # Need at least 2 points (or effective degrees of freedom) to estimate variance
195
+ if npt > 1 and total_weight**2 > sum_w2:
196
+ var_sum = 0.0
197
+ for i in range(wl_il, wl_ir + 1):
198
+ w_wl = wl_weights[i]
199
+ if w_wl <= 0: continue
200
+
201
+ for j in range(tm_il, tm_ir + 1):
202
+ w = w_wl * tm_weights_all[itm_bin, j] * nonfin_weights[i, j]
203
+ if w > 0:
204
+ var_sum += w * (v_clean[i, j] - vmean) ** 2
205
+
206
+ denominator = total_weight - (sum_w2 / total_weight)
207
+ sample_variance = var_sum / denominator
208
+ be[iwl_bin, itm_bin] = sqrt(sample_variance * (sum_w2 / (total_weight * total_weight)))
209
+ else:
210
+ be[iwl_bin, itm_bin] = nan
211
+ else:
212
+ # Propagate input errors: SE = sqrt(sum(w² * σ²)) / sum(w)
213
+ be[iwl_bin, itm_bin] = sqrt(weighted_e2_sum) / total_weight
214
+ else:
215
+ bv[iwl_bin, itm_bin] = nan
216
+ be[iwl_bin, itm_bin] = nan
217
+
218
+ return bv, be, bn
exoiris/exoiris.py CHANGED
@@ -170,7 +170,7 @@ class ExoIris:
170
170
  """
171
171
 
172
172
  def __init__(self, name: str, ldmodel, data: TSDataGroup | TSData, nk: int = 50, nldc: int = 10, nthreads: int = 1,
173
- tmpars: dict | None = None, noise_model: Literal["white", "fixed_gp", "free_gp"] = 'white',
173
+ tmpars: dict | None = None, noise_model: Literal["white_profiled", "white_marginalized", "fixed_gp", "free_gp"] = 'white_profiled',
174
174
  interpolation: Literal['nearest', 'linear', 'pchip', 'makima', 'bspline', 'bspline-quadratic', 'bspline-cubic'] = 'linear'):
175
175
  """
176
176
  Parameters
@@ -1450,42 +1450,6 @@ class ExoIris:
1450
1450
  """
1451
1451
  return self._tsa.create_initial_population(n, source, add_noise)
1452
1452
 
1453
- def add_noise_to_solution(self, result: str = 'fit') -> None:
1454
- """Add noise to the global optimization result or MCMC parameter posteriors.
1455
-
1456
- Add noise to the global optimization result or MCMC parameter posteriors. You may want to do this if you
1457
- create a new analysis from another one, for example, by adding radius ratio knots or changing the intrinsic
1458
- data resolution.
1459
-
1460
- Parameters
1461
- ----------
1462
- result
1463
- Determines which result to add noise to. Default is 'fit'.
1464
-
1465
- Raises
1466
- ------
1467
- ValueError
1468
- If the 'result' argument is not 'fit' or 'mcmc'.
1469
- """
1470
- if result == 'fit':
1471
- pvp = self._tsa._de_population[:, :].copy()
1472
- elif result == 'mcmc':
1473
- pvp = self._tsa._mc_chains[:, -1, :].copy()
1474
- else:
1475
- raise ValueError("The 'result' argument must be either 'fit' or 'mcmc'")
1476
-
1477
- npv = pvp.shape[0]
1478
- pvp[:, 0] += normal(0, 0.005, size=npv)
1479
- pvp[:, 1] += normal(0, 0.001, size=npv)
1480
- pvp[:, 3] += normal(0, 0.005, size=npv)
1481
- pvp[:, self._tsa._sl_rratios] += normal(0, 1, pvp[:, self._tsa._sl_rratios].shape) * 0.002 * pvp[:, self._tsa._sl_rratios]
1482
- pvp[:, self._tsa._sl_ld] += normal(0, 1, pvp[:, self._tsa._sl_ld].shape) * 0.002 * pvp[:, self._tsa._sl_ld]
1483
-
1484
- if result == 'fit':
1485
- self._tsa._de_population[:, :] = pvp
1486
- else:
1487
- pvp = self._tsa._mc_chains[:, -1, :] = pvp
1488
-
1489
1453
  def optimize_gp_hyperparameters(self,
1490
1454
  log10_sigma_bounds: float | tuple[float, float] | None = None,
1491
1455
  log10_rho_bounds: float | tuple[float, float] = (-5, 0),
@@ -0,0 +1,172 @@
1
+ # ExoIris: fast, flexible, and easy exoplanet transmission spectroscopy in Python.
2
+ # Copyright (C) 2026 Hannu Parviainen
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify
5
+ # it under the terms of the GNU General Public License as published by
6
+ # the Free Software Foundation, either version 3 of the License, or
7
+ # (at your option) any later version.
8
+ #
9
+ # This program is distributed in the hope that it will be useful,
10
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
11
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
+ # GNU General Public License for more details.
13
+ #
14
+ # You should have received a copy of the GNU General Public License
15
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
16
+
17
+ import numpy as np
18
+ from numba import njit
19
+
20
+ @njit
21
+ def _chol_solve(l: np.ndarray, b: np.ndarray) -> np.ndarray:
22
+ """Solve L L^T x = b given lower-triangular L."""
23
+ y = np.linalg.solve(l, b)
24
+ return np.linalg.solve(l.T, y)
25
+
26
+
27
+ @njit
28
+ def marginalized_loglike_mbl1d(
29
+ obs: np.ndarray,
30
+ mod: np.ndarray,
31
+ covs: np.ndarray,
32
+ sigma: np.ndarray,
33
+ tau: float = 1e6,
34
+ drop_constant: bool = False,
35
+ ) -> float:
36
+ """Compute the analytically marginalized log-likelihood for a multiplicative baseline model.
37
+
38
+ JIT-compiled implementation of the collapsed (marginalized) log-likelihood for a
39
+ model where systematic trends multiply the physical signal:
40
+
41
+ obs = mod(θ) · covs·a + ε
42
+
43
+ where ε ~ N(0, diag(σ²)) and a ~ N(0, Λ). The baseline coefficients a are
44
+ integrated out analytically, yielding a likelihood that depends only on the
45
+ physical model mod(θ).
46
+
47
+ This formulation is appropriate when the out-of-transit baseline level must be
48
+ estimated from the data. The design matrix `covs` should include a constant
49
+ column (ones) to capture the baseline flux level, with additional columns for
50
+ systematic trends.
51
+
52
+ The computation avoids explicit matrix inversion by exploiting the Woodbury
53
+ identity and matrix determinant lemma, working with k×k matrices rather than
54
+ n×n matrices. This makes the function efficient when k ≪ n.
55
+
56
+ Parameters
57
+ ----------
58
+ obs : ndarray of shape (n,), dtype float64
59
+ Observed flux values. Must be contiguous float64 array.
60
+ mod : ndarray of shape (n,), dtype float64
61
+ Physical transit model evaluated at the current parameters θ, normalized
62
+ such that the out-of-transit level is unity. Must be contiguous float64.
63
+ covs : ndarray of shape (n, k), dtype float64
64
+ Design matrix for the multiplicative baseline. Should include a constant
65
+ column (ones) as the first column to capture the baseline flux level.
66
+ Additional columns represent systematic trends (e.g., airmass, detector
67
+ position, PSF width). Trend columns should typically be mean-centered.
68
+ Must be contiguous float64 with C ordering.
69
+ sigma : ndarray of shape (n,), dtype float64
70
+ Per-observation measurement uncertainties (standard deviations). All
71
+ values must be strictly positive. Must be contiguous float64.
72
+ tau : float, default 1e6
73
+ Prior standard deviation for all baseline coefficients (Λ = τ²I).
74
+ drop_constant : bool, default False
75
+ If True, omit terms constant in θ (log|Σ| and n·log(2π)). Use this for
76
+ MCMC sampling over θ when σ is fixed, as these terms only shift the
77
+ log-posterior by a constant without affecting sampling.
78
+
79
+ Returns
80
+ -------
81
+ float
82
+ The marginalized log-likelihood value. If drop_constant is True, this
83
+ omits θ-independent normalization terms.
84
+
85
+ Raises
86
+ ------
87
+ numpy.linalg.LinAlgError
88
+ If Lambda or the internal matrix K is not positive definite (Cholesky
89
+ decomposition fails). No other validation is performed.
90
+
91
+ Notes
92
+ -----
93
+ The marginalized likelihood is obtained by integrating over the baseline
94
+ coefficients:
95
+
96
+ L(θ) = ∫ p(obs | θ, a) p(a) da
97
+
98
+ Defining Φ = diag(mod)·covs, the marginal distribution of obs is:
99
+
100
+ obs ~ N(0, C) where C = Σ + τ²ΦΦᵀ
101
+
102
+ Rather than inverting the n×n matrix C directly, the implementation uses:
103
+
104
+ C⁻¹ = W − τ²W·Φ·K⁻¹·Φᵀ·W (Woodbury identity)
105
+ |C| = |Σ|·|K| (matrix determinant lemma)
106
+
107
+ where W = Σ⁻¹ = diag(1/σ²) and K = I + τ²ΦᵀWΦ is a k×k matrix.
108
+
109
+ The log-likelihood is:
110
+
111
+ log L = −½ [obsᵀC⁻¹obs + log|C| + n·log(2π)]
112
+ = −½ [(obsᵀWobs − τ²cᵀK⁻¹c) + log|Σ| + log|K| + n·log(2π)]
113
+
114
+ where c = ΦᵀW·obs.
115
+
116
+ With the isotropic prior Λ = τ²I, the Cholesky factorization of Λ is trivially
117
+ L_Λ = τI, eliminating one matrix decomposition compared to the general case.
118
+ All operations involve only k×k matrices, giving O(nk² + k³) complexity
119
+ rather than O(n³).
120
+
121
+ In the limit τ → ∞, the marginalized likelihood approaches the profile
122
+ likelihood with a at its maximum likelihood estimate (ordinary least squares).
123
+ Numerical stability requires finite τ; values around 10⁶ are effectively
124
+ uninformative for normalized flux data while maintaining well-conditioned K.
125
+ """
126
+ n = obs.shape[0]
127
+ ncov = covs.shape[1]
128
+ tau2 = tau * tau
129
+ phi = mod[:, None] * covs
130
+ w = 1.0 / (sigma * sigma)
131
+ wphi = w[:, None] * phi
132
+ k = np.eye(ncov) + tau2 * (phi.T @ wphi)
133
+ c = phi.T @ (w * obs)
134
+ lk = np.linalg.cholesky(k)
135
+ obswobs = np.dot(w * obs, obs)
136
+ kinvc = _chol_solve(lk, c)
137
+ quad = obswobs - tau2 * np.dot(c, kinvc)
138
+ logdetk = 2.0 * np.sum(np.log(np.diag(lk)))
139
+ if drop_constant:
140
+ return -0.5 * (quad + logdetk)
141
+ logdetsigma = np.sum(np.log(sigma * sigma))
142
+ return -0.5 * (quad + logdetsigma + logdetk + n * np.log(2.0 * np.pi))
143
+
144
+ @njit
145
+ def marginalized_loglike_mbl2d(
146
+ obs: np.ndarray,
147
+ mod: np.ndarray,
148
+ err: np.ndarray,
149
+ covs: np.ndarray,
150
+ mask: np.ndarray,
151
+ tau: float = 1e6,
152
+ ) -> float:
153
+ nwl = obs.shape[0]
154
+ ncov = covs.shape[1]
155
+ tau2 = tau * tau
156
+ w = 1.0 / (err * err)
157
+
158
+ lnlike = 0.0
159
+ for i in range(nwl):
160
+ m = mask[i]
161
+ phi = mod[i, m, None] * covs[m]
162
+ wphi = w[i, m, None] * phi
163
+ k = np.eye(ncov) + tau2 * (phi.T @ wphi)
164
+ c = phi.T @ (w[i, m] * obs[i, m])
165
+ lk = np.linalg.cholesky(k)
166
+ obswobs = np.dot(w[i, m] * obs[i, m], obs[i, m])
167
+ kinvc = _chol_solve(lk, c)
168
+ quad = obswobs - tau2 * np.dot(c, kinvc)
169
+ logdetk = 2.0 * np.sum(np.log(np.diag(lk)))
170
+ logdetsigma = np.sum(np.log(err[i, m] * err[i, m]))
171
+ lnlike += -0.5 * (quad + logdetsigma + logdetk + mask[i].sum() * np.log(2.0 * np.pi))
172
+ return lnlike
exoiris/spotmodel.py CHANGED
@@ -36,7 +36,7 @@ from pytransit.stars import create_bt_settl_interpolator, create_husser2013_inte
36
36
  from pytransit.param import GParameter, UniformPrior as U
37
37
 
38
38
  from exoiris.tsdata import TSData
39
- from exoiris.util import bin2d
39
+ from exoiris.bin1d import bin1d
40
40
 
41
41
 
42
42
  @njit
@@ -81,7 +81,7 @@ def bin_stellar_spectrum_model(sp: RegularGridInterpolator, data: TSData):
81
81
  wl_l_edges = wave - 0.5
82
82
  wl_r_edges = wave + 0.5
83
83
 
84
- bflux = bin2d(flux.T, flux.T, wl_l_edges*1e-3, wl_r_edges*1e-3, vstack([data._wl_l_edges, data._wl_r_edges]).T)[0].T
84
+ bflux = bin1d(flux.T, flux.T, wl_l_edges*1e-3, wl_r_edges*1e-3, vstack([data._wl_l_edges, data._wl_r_edges]).T)[0].T
85
85
  return RegularGridInterpolator((teff, data.wavelength), bflux, bounds_error=False, fill_value=nan)
86
86
 
87
87
 
exoiris/tsdata.py CHANGED
@@ -56,12 +56,14 @@ from numpy import (
56
56
  ascontiguousarray,
57
57
  vstack,
58
58
  ones_like,
59
+ average,
59
60
  )
60
61
  from pytransit.orbits import fold
61
62
 
62
63
  from .binning import Binning, CompoundBinning
63
64
  from .ephemeris import Ephemeris
64
- from .util import bin2d
65
+ from .bin1d import bin1d
66
+ from .bin2d import bin2d
65
67
 
66
68
 
67
69
  class TSData:
@@ -603,6 +605,14 @@ class TSData:
603
605
  ax.axy2 = axy2
604
606
  return fig
605
607
 
608
+ def create_white_light_curve(self, data=None) -> ndarray:
609
+ """Create a white light curve."""
610
+ if data is not None and data.shape != self.fluxes.shape:
611
+ raise ValueError("The data must have the same shape as the 2D flux array.")
612
+ data = data if data is not None else self.fluxes
613
+ weights = where(isfinite(data) & isfinite(self.errors), 1/self.errors**2, 0.0)
614
+ return average(where(isfinite(data), data, 0), axis=0, weights=weights)
615
+
606
616
  def plot_white(self, ax: Axes | None = None, figsize: tuple[float, float] | None = None) -> Figure:
607
617
  """Plot a white light curve.
608
618
 
@@ -623,7 +633,7 @@ class TSData:
623
633
  fig = ax.figure
624
634
  tref = floor(self.time.min())
625
635
 
626
- ax.plot(self.time, nanmean(self.fluxes, 0))
636
+ ax.plot(self.time, self.create_white_light_curve())
627
637
  if self.ephemeris is not None:
628
638
  [ax.axvline(tl, ls='--', c='k') for tl in self.ephemeris.transit_limits(self.time.mean())]
629
639
 
@@ -677,6 +687,76 @@ class TSData:
677
687
  else:
678
688
  return TSDataGroup([self]) + other
679
689
 
690
+ def bin(self,
691
+ wave_binning: Optional[Union[Binning, CompoundBinning]] = None,
692
+ time_binning: Optional[Union[Binning, CompoundBinning]] = None,
693
+ wave_nb: Optional[int] = None, wave_bw: Optional[float] = None, wave_r: Optional[float] = None,
694
+ time_nb: Optional[int] = None, time_bw: Optional[float] = None,
695
+ estimate_errors: bool = False) -> 'TSData':
696
+ """Bin the data along the wavelength axis.
697
+
698
+ Bin the data along the wavelength and/or time axes. If binning is not specified, a Binning object is created using the
699
+ minimum and maximum time and wavelength values.
700
+
701
+ Parameters
702
+ ----------
703
+ binning
704
+ The binning method to use.
705
+ nb
706
+ Number of bins.
707
+ bw
708
+ Bin width.
709
+ r
710
+ Bin resolution.
711
+ estimate_errors
712
+ Should the uncertainties be estimated from the data.
713
+
714
+ Returns
715
+ -------
716
+ TSData
717
+ """
718
+
719
+ if wave_binning is None and wave_nb is None and wave_bw is None and wave_r is None:
720
+ return self.bin_time(time_binning, time_nb, time_bw, estimate_errors=estimate_errors)
721
+ if time_binning is None and time_nb is None and time_bw is None:
722
+ return self.bin_wavelength(wave_binning, wave_nb, wave_bw, wave_r, estimate_errors=estimate_errors)
723
+
724
+ with warnings.catch_warnings():
725
+ warnings.simplefilter('ignore', numba.NumbaPerformanceWarning)
726
+ if wave_binning is None:
727
+ wave_binning = Binning(self.bbox_wl[0], self.bbox_wl[1], nb=wave_nb, bw=wave_bw, r=wave_r)
728
+ if time_binning is None:
729
+ time_binning = Binning(self.time.min(), self.time.max(), nb=time_nb, bw=time_bw/(24*60*60) if time_bw is not None else None)
730
+
731
+ bf, be, bn = bin2d(self.fluxes, self.errors,
732
+ self._wl_l_edges, self._wl_r_edges,
733
+ self._tm_l_edges, self._tm_r_edges,
734
+ wave_binning.bins, time_binning.bins,
735
+ estimate_errors=estimate_errors)
736
+
737
+ bc, _ = bin1d(self.covs, ones_like(self.covs),
738
+ self._tm_l_edges, self._tm_r_edges,
739
+ time_binning.bins,
740
+ estimate_errors=False)
741
+
742
+ if not all(isfinite(be)):
743
+ warnings.warn('Error estimation failed for some bins, check the error array.')
744
+
745
+ d = TSData(time_binning.bins.mean(1), wave_binning.bins.mean(1), bf, be,
746
+ name=self.name,
747
+ wl_edges=(wave_binning.bins[:, 0], wave_binning.bins[:, 1]),
748
+ tm_edges=(time_binning.bins[:, 0], time_binning.bins[:, 1]),
749
+ noise_group=self.noise_group,
750
+ epoch_group=self.epoch_group,
751
+ offset_group=self.offset_group,
752
+ ephemeris=self.ephemeris,
753
+ n_baseline=self.n_baseline,
754
+ covs=bc)
755
+ if self.ephemeris is not None:
756
+ d.mask_transit(ephemeris=self.ephemeris)
757
+ return d
758
+
759
+
680
760
  def bin_wavelength(self, binning: Optional[Union[Binning, CompoundBinning]] = None,
681
761
  nb: Optional[int] = None, bw: Optional[float] = None, r: Optional[float] = None,
682
762
  estimate_errors: bool = False) -> 'TSData':
@@ -706,7 +786,7 @@ class TSData:
706
786
  warnings.simplefilter('ignore', numba.NumbaPerformanceWarning)
707
787
  if binning is None:
708
788
  binning = Binning(self.bbox_wl[0], self.bbox_wl[1], nb=nb, bw=bw, r=r)
709
- bf, be = bin2d(self.fluxes, self.errors, self._wl_l_edges, self._wl_r_edges,
789
+ bf, be = bin1d(self.fluxes, self.errors, self._wl_l_edges, self._wl_r_edges,
710
790
  binning.bins, estimate_errors=estimate_errors)
711
791
  if not all(isfinite(be)):
712
792
  warnings.warn('Error estimation failed for some bins, check the error array.')
@@ -749,9 +829,9 @@ class TSData:
749
829
  warnings.simplefilter('ignore', numba.NumbaPerformanceWarning)
750
830
  if binning is None:
751
831
  binning = Binning(self.time.min(), self.time.max(), nb=nb, bw=bw/(24*60*60) if bw is not None else None)
752
- bf, be = bin2d(self.fluxes.T, self.errors.T, self._tm_l_edges, self._tm_r_edges,
832
+ bf, be = bin1d(self.fluxes.T, self.errors.T, self._tm_l_edges, self._tm_r_edges,
753
833
  binning.bins, estimate_errors=estimate_errors)
754
- bc, _ = bin2d(self.covs, ones_like(self.covs), self._tm_l_edges, self._tm_r_edges, binning.bins, False)
834
+ bc, _ = bin1d(self.covs, ones_like(self.covs), self._tm_l_edges, self._tm_r_edges, binning.bins, False)
755
835
  d = TSData(binning.bins.mean(1), self.wavelength, bf.T, be.T,
756
836
  wl_edges=(self._wl_l_edges, self._wl_r_edges),
757
837
  tm_edges=(binning.bins[:,0], binning.bins[:,1]),
exoiris/tslpf.py CHANGED
@@ -55,17 +55,19 @@ from scipy.interpolate import (
55
55
  interp1d,
56
56
  )
57
57
 
58
+ from .lmlikelihood import marginalized_loglike_mbl2d
58
59
  from .ldtkld import LDTkLD
59
60
  from .spotmodel import SpotModel
60
61
  from .tsdata import TSDataGroup
61
62
  from .tsmodel import TransmissionSpectroscopyModel as TSModel
62
63
 
63
- NM_WHITE = 0
64
+ NM_WHITE_MARGINALIZED = 0
64
65
  NM_GP_FIXED = 1
65
66
  NM_GP_FREE = 2
67
+ NM_WHITE_PROFILED = 3
66
68
 
67
- noise_models = dict(white=NM_WHITE, fixed_gp=NM_GP_FIXED, free_gp=NM_GP_FREE)
68
-
69
+ noise_models = dict(white=NM_WHITE_PROFILED, white_profiled=NM_WHITE_PROFILED, white_marginalized=NM_WHITE_MARGINALIZED,
70
+ fixed_gp=NM_GP_FIXED, free_gp=NM_GP_FREE)
69
71
 
70
72
  @njit
71
73
  def nlstsq(covs, res, mask, wlmask, with_nans):
@@ -74,7 +76,10 @@ def nlstsq(covs, res, mask, wlmask, with_nans):
74
76
  x = zeros((nc, nwl))
75
77
  x[:, wlmask] = lstsq(covs, ascontiguousarray(res[wlmask].T))[0]
76
78
  for i in with_nans:
77
- x[:, i] = lstsq(covs[mask[i]], res[i, mask[i]])[0]
79
+ try:
80
+ x[:, i] = lstsq(covs[mask[i]], res[i, mask[i]])[0]
81
+ except:
82
+ x[:, i] = nan
78
83
  return x
79
84
 
80
85
 
@@ -181,7 +186,7 @@ def clean_knots(knots, min_distance, lmin=0, lmax=inf):
181
186
 
182
187
  class TSLPF(LogPosteriorFunction):
183
188
  def __init__(self, runner, name: str, ldmodel, data: TSDataGroup, nk: int = 50, nldc: int = 10, nthreads: int = 1,
184
- tmpars = None, noise_model: Literal["white", "fixed_gp", "free_gp"] = 'white',
189
+ tmpars = None, noise_model: Literal["white_profiled", "white_marginalized", "fixed_gp", "free_gp"] = 'white_profiled',
185
190
  interpolation: Literal['nearest', 'linear', 'pchip', 'makima', 'bspline', 'bspline-quadratic', 'bspline-cubic'] = 'linear'):
186
191
  super().__init__(name)
187
192
  self._runner = runner
@@ -296,7 +301,7 @@ class TSLPF(LogPosteriorFunction):
296
301
  Parameters
297
302
  ----------
298
303
  noise_model : str
299
- The noise model to be used. Must be one of the following: white, fixed_gp, free_gp.
304
+ The noise model to be used. Must be one of the following: white_profiled, white_marginalized, fixed_gp, free_gp.
300
305
 
301
306
  Raises
302
307
  ------
@@ -304,7 +309,7 @@ class TSLPF(LogPosteriorFunction):
304
309
  If noise_model is not one of the specified options.
305
310
  """
306
311
  if noise_model not in noise_models.keys():
307
- raise ValueError('noise_model must be one of: white, fixed_gp, free_gp')
312
+ raise ValueError('noise_model must be one of: white_profiled, white_marginalized, fixed_gp, free_gp')
308
313
  self.noise_model = noise_model
309
314
  self._nm = noise_models[noise_model]
310
315
  if self._nm in (NM_GP_FIXED, NM_GP_FREE):
@@ -822,15 +827,16 @@ class TSLPF(LogPosteriorFunction):
822
827
  self._baseline_models[i][ipv, :, :] = nan
823
828
  return self._baseline_models
824
829
 
825
- def flux_model(self, pv):
830
+ def flux_model(self, pv, include_baseline: bool = True):
826
831
  transit_models = self.transit_model(pv)
827
- baseline_models = self.baseline_model(transit_models)
828
832
  if self.spot_model is not None:
829
833
  self.spot_model.apply_spots(pv, transit_models)
830
834
  if self.spot_model.include_tlse:
831
835
  self.spot_model.apply_tlse(pv, transit_models)
832
- for i in range(self.data.size):
833
- transit_models[i][:, :, :] *= baseline_models[i][:, :, :]
836
+ if include_baseline:
837
+ baseline_models = self.baseline_model(transit_models)
838
+ for i in range(self.data.size):
839
+ transit_models[i][:, :, :] *= baseline_models[i][:, :, :]
834
840
  return transit_models
835
841
 
836
842
  def create_pv_population(self, npop: int = 50) -> ndarray:
@@ -867,13 +873,22 @@ class TSLPF(LogPosteriorFunction):
867
873
  """
868
874
  pv = atleast_2d(pv)
869
875
  npv = pv.shape[0]
870
- fmod = self.flux_model(pv)
871
876
  wn_multipliers = pv[:, self._sl_wnm]
872
877
  lnl = zeros(npv)
873
- if self._nm == NM_WHITE:
878
+ if self._nm == NM_WHITE_MARGINALIZED:
879
+ fmod = self.flux_model(pv, include_baseline=False)
880
+ for ipv in range(npv):
881
+ try:
882
+ for i, d in enumerate(self.data):
883
+ lnl[ipv] += marginalized_loglike_mbl2d(d.fluxes, fmod[i][ipv], d.errors*wn_multipliers[ipv, d.noise_group], d.covs, d.mask)
884
+ except LinAlgError:
885
+ lnl[ipv] = -inf
886
+ elif self._nm == NM_WHITE_PROFILED:
887
+ fmod = self.flux_model(pv, include_baseline=True)
874
888
  for i, d in enumerate(self.data):
875
889
  lnl += lnlike_normal(d.fluxes, fmod[i], d.errors, wn_multipliers[:, d.noise_group], d.mask)
876
890
  else:
891
+ fmod = self.flux_model(pv)
877
892
  for j in range(npv):
878
893
  if self._nm == NM_GP_FREE:
879
894
  self.set_gp_hyperparameters(*pv[j, self._sl_gp])
@@ -881,7 +896,7 @@ class TSLPF(LogPosteriorFunction):
881
896
  lnl[j] += self._gp[i].log_likelihood(self._gp_flux[i] - fmod[i][j][self.data[i].mask])
882
897
  return lnl if npv > 1 else lnl[0]
883
898
 
884
- def create_initial_population(self, n: int, source: str, add_noise: bool = True) -> ndarray:
899
+ def create_initial_population(self, n: int, source: str, add_noise: bool = False) -> ndarray:
885
900
  """Create an initial parameter vector population for DE.
886
901
 
887
902
  Parameters
@@ -891,7 +906,7 @@ class TSLPF(LogPosteriorFunction):
891
906
  source : str
892
907
  Source of the initial population. Must be either 'fit' or 'mcmc'.
893
908
  add_noise : bool, optional
894
- Flag indicating whether to add noise to the initial population. Default is True.
909
+ Flag indicating whether to add noise to the initial population. Default is False.
895
910
 
896
911
  Returns
897
912
  -------
@@ -921,12 +936,9 @@ class TSLPF(LogPosteriorFunction):
921
936
  else:
922
937
  pvp = rng.choice(pvs.reshape([-1, self.ndim]), size=n)
923
938
 
924
- if pvp[0, self._sl_baseline][0] < 0.5:
925
- pvp[:, self._sl_baseline] = rng.normal(1.0, 1e-6, size=(n, sum(self.n_baselines)))
926
-
927
939
  if add_noise:
928
- pvp[:, self._sl_rratios] += rng.normal(0, 1, pvp[:, self._sl_rratios].shape) * 0.002 * pvp[:, self._sl_rratios]
929
- pvp[:, self._sl_ld] += rng.normal(0, 1, pvp[:, self._sl_ld].shape) * 0.002 * pvp[:, self._sl_ld]
940
+ pvp[:, self._sl_rratios] += rng.normal(0, 1e-4, pvp[:, self._sl_rratios].shape)
941
+ pvp[:, self._sl_ld] += rng.normal(0, 1e-3, pvp[:, self._sl_ld].shape)
930
942
  return pvp
931
943
 
932
944
  def optimize_global(self, niter=200, npop=50, population=None, pool=None, lnpost=None, vectorize=True,
exoiris/util.py CHANGED
@@ -14,79 +14,11 @@
14
14
  # You should have received a copy of the GNU General Public License
15
15
  # along with this program. If not, see <https://www.gnu.org/licenses/>.
16
16
 
17
- from numba import njit
18
- from numpy import (zeros, sum, sqrt, linspace, vstack, concatenate, floor, dot, ndarray, nan, asarray, tile)
17
+ from numpy import (linspace, vstack, concatenate, floor, ndarray, asarray, tile)
19
18
  from numpy._typing import ArrayLike
20
19
  from pytransit import TSModel
21
20
  from pytransit.orbits import i_from_ba
22
21
 
23
- @njit
24
- def bin2d(v, e, el, er, bins, estimate_errors: bool = False) -> tuple[ndarray, ndarray]:
25
- """Bin 2D exoplanet transmission spectrum data with its uncertainties into predefined bins in wavelength.
26
-
27
- Parameters
28
- ----------
29
- v : ndarray
30
- A 2D array of the exoplanet transmission spectrum data with a shape (n_wavelength, n_exposure).
31
- e : ndarray
32
- A 2D array of uncertainties associated with the spectrum data in `v`, matching the shape of `v`.
33
- el : ndarray
34
- A 1D array containing the left wavelength edges of the integration ranges for each spectral data point.
35
- er : ndarray
36
- A 1D array containing the right wavelength edges of the integration ranges for each spectral data point.
37
- bins : ndarray
38
- A 2D array containing the edges of the wavelength bins. These should be sorted in ascending order.
39
- estimate_errors: bool, optional.
40
- Should the uncertainties be estimated from the data. Default value is False.
41
-
42
- Returns
43
- -------
44
- tuple of ndarrays
45
- A tuple containing two 2D arrays:
46
- - The first array (`bv`) contains the binned values of the transmission spectrum.
47
- - The second array (`be`) contains the binned uncertainties.
48
- """
49
- nbins = len(bins)
50
- ndata = v.shape[0]
51
- bv = zeros((nbins, v.shape[1]))
52
- be = zeros((nbins, v.shape[1]))
53
- e2 = e**2
54
- weights = zeros(ndata)
55
- i = 0
56
- for ibin in range(nbins):
57
- weights[:] = 0.0
58
- npt = 0
59
- bel, ber = bins[ibin]
60
- for i in range(i, ndata - 1):
61
- if el[i + 1] > bel:
62
- break
63
- il = i
64
- if er[i] > ber:
65
- weights[i] = ber - bel
66
- npt += 1
67
- else:
68
- weights[i] = er[i] - bel
69
- npt += 1
70
- for i in range(i + 1, ndata):
71
- if er[i] < ber:
72
- weights[i] = er[i] - el[i]
73
- npt += 1
74
- else:
75
- weights[i] = ber - el[i]
76
- npt += 1
77
- break
78
- ir = i
79
- ws = sum(weights)
80
- bv[ibin] = vmean = dot(weights[il:ir+1], v[il:ir+1,:]) / ws
81
- if estimate_errors:
82
- if npt > 1:
83
- be[ibin] = sqrt(dot(weights[il:ir+1], (v[il:ir+1,:] - vmean)**2) / ws) / sqrt(npt)
84
- else:
85
- be[ibin] = nan
86
- else:
87
- be[ibin] = sqrt(dot(weights[il:ir+1], e2[il:ir+1,:])) / ws
88
- return bv, be
89
-
90
22
 
91
23
  def create_binning(ranges, bwidths):
92
24
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ExoIris
3
- Version: 1.0.0
3
+ Version: 1.1.0
4
4
  Summary: Easy and robust exoplanet transmission spectroscopy.
5
5
  Author-email: Hannu Parviainen <hannu@iac.es>
6
6
  License: GPLv3
@@ -0,0 +1,21 @@
1
+ exoiris/__init__.py,sha256=LU5jAE7_OVPLHFO0UAOGS0e0wuWV6rdSD0Qveet11K8,1147
2
+ exoiris/bin1d.py,sha256=6X7Wzf-l2TohruJRcrzHtGdy0tWIO4cyeu2YMbKy_Fs,3857
3
+ exoiris/bin2d.py,sha256=SE-MllYRFKf4rvy-z9Z7xDSkKB7f0feaMzVDDWoO6xU,8287
4
+ exoiris/binning.py,sha256=-Y9hdK0jZj8DOS82keaprneid2lZ4rCx-keWlKi0LP8,6455
5
+ exoiris/ephemeris.py,sha256=dthBkJztT5yAP6VnnO7jGvxikboFUQBUGPUfBCFrA3w,1316
6
+ exoiris/exoiris.py,sha256=h188V6SqkIju2Z505JMSMz1X5l05kDn5Xqar7D2O_QI,61006
7
+ exoiris/ldtkld.py,sha256=7H1r1xail3vSKdsNKorMTqivnRKU9WrOVH-uE4Ky2jM,3495
8
+ exoiris/lmlikelihood.py,sha256=ukMW-VV7t_7HosBNCxMQVrGYhgrGd_a8coeN-3nAM8g,6823
9
+ exoiris/loglikelihood.py,sha256=lVm7kMyzy-xxP5VvZMV7kJY3KovbhlEz3u_6O7R6zgI,6309
10
+ exoiris/prtretrieval.py,sha256=PBUk-61kRgRGaJtUWLYWYpTpOj4FMDRSdYwI89U_fyw,7060
11
+ exoiris/spotmodel.py,sha256=jB_J9_5_Gb1ymEd2SfyYuXKDkNCp7ivf5ZAbfGV2s_g,7157
12
+ exoiris/tsdata.py,sha256=vvJi3_v1QzNKSbWRmEfiNualxrXQx-U7I8BpvaG5xu8,40375
13
+ exoiris/tslpf.py,sha256=kvAmeB6YZw2MpL7N8yLWCek2AMGlEj1RbAIZcRL8YuQ,36951
14
+ exoiris/tsmodel.py,sha256=6NaGY48fWHUT_7ti6Ao618PN-LgyoIhfQd8lZQqZ7hU,5160
15
+ exoiris/util.py,sha256=eEDK1rRXKE1mszNyELtakF_TnAT3Wrf2_wMdidoYKvQ,3337
16
+ exoiris/wlpf.py,sha256=ixEsDXLHSglYOWhJKA87NJPrLWqhEk4DYhn1IaShN8U,6294
17
+ exoiris-1.1.0.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
18
+ exoiris-1.1.0.dist-info/METADATA,sha256=FthAETbYQCboYaTrf0cVlxJK2Umys_YHv1qbzSE-04M,5083
19
+ exoiris-1.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
20
+ exoiris-1.1.0.dist-info/top_level.txt,sha256=EoNxT6c5mQDcM0f_LUQB-ETsYg03lNaV3o2L_Yc6-aE,8
21
+ exoiris-1.1.0.dist-info/RECORD,,
@@ -1,18 +0,0 @@
1
- exoiris/__init__.py,sha256=LU5jAE7_OVPLHFO0UAOGS0e0wuWV6rdSD0Qveet11K8,1147
2
- exoiris/binning.py,sha256=-Y9hdK0jZj8DOS82keaprneid2lZ4rCx-keWlKi0LP8,6455
3
- exoiris/ephemeris.py,sha256=dthBkJztT5yAP6VnnO7jGvxikboFUQBUGPUfBCFrA3w,1316
4
- exoiris/exoiris.py,sha256=bPbHp0YRJEhVogCD206kuQlKh4ZOdx3M47LT3T55RfQ,62457
5
- exoiris/ldtkld.py,sha256=7H1r1xail3vSKdsNKorMTqivnRKU9WrOVH-uE4Ky2jM,3495
6
- exoiris/loglikelihood.py,sha256=lVm7kMyzy-xxP5VvZMV7kJY3KovbhlEz3u_6O7R6zgI,6309
7
- exoiris/prtretrieval.py,sha256=PBUk-61kRgRGaJtUWLYWYpTpOj4FMDRSdYwI89U_fyw,7060
8
- exoiris/spotmodel.py,sha256=p7csTJs6Yicmaz93KHdnFsenvLvC9MDvx89oljnzDTM,7156
9
- exoiris/tsdata.py,sha256=s3R6sByy7Ud26F3TdaRaXNQi-rYeJHnm7XHk2FLd1Go,36622
10
- exoiris/tslpf.py,sha256=UqTmYqnqwdiStIX7My2W-VBfadFD4QQ-bYJYk0kIBEs,36214
11
- exoiris/tsmodel.py,sha256=6NaGY48fWHUT_7ti6Ao618PN-LgyoIhfQd8lZQqZ7hU,5160
12
- exoiris/util.py,sha256=uNv_c3Kuv1lml8MuDAuyElO4s3f1tRIQ1QMlLaI7Yak,5921
13
- exoiris/wlpf.py,sha256=ixEsDXLHSglYOWhJKA87NJPrLWqhEk4DYhn1IaShN8U,6294
14
- exoiris-1.0.0.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
15
- exoiris-1.0.0.dist-info/METADATA,sha256=RsDlI-uyi4NgcBNX0Uqlsresgc_Pl0wgbMKjiF8niYQ,5083
16
- exoiris-1.0.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
17
- exoiris-1.0.0.dist-info/top_level.txt,sha256=EoNxT6c5mQDcM0f_LUQB-ETsYg03lNaV3o2L_Yc6-aE,8
18
- exoiris-1.0.0.dist-info/RECORD,,