ExoIris 0.16.0__py3-none-any.whl → 0.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ExoIris
3
- Version: 0.16.0
3
+ Version: 0.18.0
4
4
  Summary: Easy and robust exoplanet transmission spectroscopy.
5
5
  Author-email: Hannu Parviainen <hannu@iac.es>
6
6
  License: GPLv3
@@ -0,0 +1,15 @@
1
+ exoiris/__init__.py,sha256=gaNfxNKXH41yRK35JghqackXpENyB-NOg-JlciqO1YU,1145
2
+ exoiris/binning.py,sha256=-Y9hdK0jZj8DOS82keaprneid2lZ4rCx-keWlKi0LP8,6455
3
+ exoiris/ephemeris.py,sha256=dthBkJztT5yAP6VnnO7jGvxikboFUQBUGPUfBCFrA3w,1316
4
+ exoiris/exoiris.py,sha256=UgwujRngsk-VNXi8DnDQ2z9-v5_E8_eC2_uaE_pYe0k,47974
5
+ exoiris/ldtkld.py,sha256=u4c5Yh6JlG0Zfec7-nOCQGXxIt1VVp9mniFMM7h-L6s,3489
6
+ exoiris/tsdata.py,sha256=c1HLbVBR_Ib291NPAi4gaD44NLtXHcmlRRwIO_mD7Fw,33254
7
+ exoiris/tslpf.py,sha256=7qrVm_HDeiwgPFdu2GsGdS2f7H13hPc5hrVpVPN9Xc8,28581
8
+ exoiris/tsmodel.py,sha256=6NaGY48fWHUT_7ti6Ao618PN-LgyoIhfQd8lZQqZ7hU,5160
9
+ exoiris/util.py,sha256=5PynwYYHRrzyXJHskBtp2J-pcM59zsA1_VtDxencQm4,4630
10
+ exoiris/wlpf.py,sha256=8VB5ChEbtS8398QctIxbwtDInLoLEkr4yoRkSniEnwA,4537
11
+ ExoIris-0.18.0.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
12
+ ExoIris-0.18.0.dist-info/METADATA,sha256=P1UH_KJ4kEvToWhzHRMFhkMUhXikCu1HDF2DWB48rUk,4189
13
+ ExoIris-0.18.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
14
+ ExoIris-0.18.0.dist-info/top_level.txt,sha256=EoNxT6c5mQDcM0f_LUQB-ETsYg03lNaV3o2L_Yc6-aE,8
15
+ ExoIris-0.18.0.dist-info/RECORD,,
exoiris/exoiris.py CHANGED
@@ -33,7 +33,8 @@ from celerite2 import GaussianProcess, terms
33
33
  from emcee import EnsembleSampler
34
34
  from matplotlib.pyplot import subplots, setp, figure, Figure, Axes
35
35
  from numpy import (where, sqrt, clip, percentile, median, squeeze, floor, ndarray,
36
- array, inf, newaxis, arange, tile, sort, argsort, concatenate, full, nan, r_, nanpercentile)
36
+ array, inf, newaxis, arange, tile, sort, argsort, concatenate, full, nan, r_, nanpercentile, log10,
37
+ ceil)
37
38
  from numpy.random import normal, permutation
38
39
  from pytransit import UniformPrior, NormalPrior
39
40
  from pytransit.orbits import epoch
@@ -94,7 +95,7 @@ def load_model(fname: Path | str, name: str | None = None):
94
95
  a.period = hdul[0].header['P']
95
96
  a.zero_epoch = hdul[0].header['T0']
96
97
  a.transit_duration = hdul[0].header['T14']
97
- [d.calculate_ootmask(a.zero_epoch, a.period, a.transit_duration) for d in a.data]
98
+ [d.mask_transit(a.zero_epoch, a.period, a.transit_duration) for d in a.data]
98
99
  except KeyError:
99
100
  pass
100
101
 
@@ -164,13 +165,13 @@ class ExoIris:
164
165
  """
165
166
  return squeeze(self._tsa.lnposterior(pvp))
166
167
 
167
- def set_noise_model(self, noise_model: str) -> None:
168
+ def set_noise_model(self, noise_model: Literal['white', 'fixed_gp', 'free_gp']) -> None:
168
169
  """Set the noise model for the analysis.
169
170
 
170
171
  Parameters
171
172
  ----------
172
173
  noise_model
173
- The noise model to be used. Must be one of the following: white, fixed_gp, free_gp.
174
+ The noise model to be used.
174
175
 
175
176
  Raises
176
177
  ------
@@ -374,6 +375,26 @@ class ExoIris:
374
375
  """Posterior samples from the MCMC sampler."""
375
376
  return pd.DataFrame(self._tsa._mc_chains.reshape([-1, self.ndim]), columns=self.ps.names)
376
377
 
378
+ @property
379
+ def white_times(self) -> list[ndarray]:
380
+ """White light curve time arrays."""
381
+ return self._wa.times
382
+
383
+ @property
384
+ def white_fluxes(self) -> list[ndarray]:
385
+ """White light curve flux arrays."""
386
+ return self._wa.fluxes
387
+
388
+ @property
389
+ def white_models(self) -> list[ndarray]:
390
+ fm = self._wa.flux_model(self._wa._local_minimization.x)
391
+ return [fm[sl] for sl in self._wa.lcslices]
392
+
393
+ @property
394
+ def white_errors(self) -> list[ndarray]:
395
+ """White light curve flux error arrays."""
396
+ return self._wa.std_errors
397
+
377
398
  def add_radius_ratio_knots(self, knot_wavelengths: Sequence) -> None:
378
399
  """Add radius ratio (k) knots.
379
400
 
@@ -535,6 +556,22 @@ class ExoIris:
535
556
  """
536
557
  return self._wa.plot(axs=axs, figsize=figsize, ncols=ncols or min(self.data.size, 2))
537
558
 
559
+ def plot_white_gp_predictions(self, axs = None, ncol: int = 1, figsize: tuple[float, float] | None = None):
560
+ ndata = self.data.size
561
+
562
+ if axs is None:
563
+ nrow = int(ceil(ndata / ncol))
564
+ fig, axs = subplots(nrow, ncol, sharey='all', constrained_layout=True, squeeze=False, figsize=figsize)
565
+ else:
566
+ fig = axs[0].axes
567
+
568
+ for i in range(ndata):
569
+ tref = floor(self.white_times[i][0])
570
+ axs.flat[i].plot(self.white_times[i] - tref, self.white_fluxes[i]-self.white_models[i])
571
+ axs.flat[i].plot(self.white_times[i] - tref, self.white_gp_models[i], 'k')
572
+ setp(axs.flat[i], xlabel=f'Time - {tref:.0f} [d]', xlim=self.white_times[i][[0,-1]]-tref)
573
+ setp(axs[:, 0], ylabel='Residuals')
574
+
538
575
  def normalize_baseline(self, deg: int = 1) -> None:
539
576
  """Normalize the baseline flux for each spectroscopic light curve.
540
577
 
@@ -915,9 +952,6 @@ class ExoIris:
915
952
  pp = nanpercentile(residuals, [pmin, pmax])
916
953
  data.plot(ax=ax, data=residuals, vmin=pp[0], vmax=pp[1], cmap=cmap)
917
954
 
918
- tc = pv[1] + pv[2]*epoch(data.time.mean(), pv[1], pv[2])
919
- td = self.transit_duration
920
-
921
955
  if not show_names:
922
956
  ax.set_title("")
923
957
 
@@ -1103,87 +1137,15 @@ class ExoIris:
1103
1137
  pvp = self._tsa._mc_chains[:, -1, :] = pvp
1104
1138
 
1105
1139
  def optimize_gp_hyperparameters(self,
1106
- log10_sigma_bounds: float | tuple[float, float] = (-5, -2),
1140
+ log10_sigma_bounds: float | tuple[float, float] | None = None,
1107
1141
  log10_rho_bounds: float | tuple[float, float] = (-5, 0),
1108
1142
  log10_sigma_prior=None, log10_rho_prior=None,
1109
- npop: int = 10, niter: int = 100, subset = None):
1110
- """Optimize the Matern-3/2 kernel Gaussian Process hyperparameters.
1111
-
1112
- Parameters
1113
- ----------
1114
- log10_sigma_bounds
1115
- The bounds for the log10 of the sigma hyperparameter. If float is provided, the parameter will be
1116
- fixed to the given value. Default is (-5, -2).
1117
- log10_rho_bounds
1118
- The bounds for the log10 of the rho hyperparameter. If float is provided, the parameter will be fixed
1119
- to the given value. Default is (-5, 0).
1120
- log10_sigma_prior
1121
- The prior distribution for the sigma hyperparameter expressed as an object with a `logpdf` method
1122
- or as an iterable containing the mean and standard deviation of the prior distribution. Default is None.
1123
- log10_rho_prior
1124
- The prior distribution for the rho hyperparameter expressed as an object with a `logpdf` method
1125
- or as an iterable containing the mean and standard deviation of the prior distribution. Default is None.
1126
- npop
1127
- The population size for the differential evolution optimizer. Default is 10.
1128
- niter
1129
- The number of iterations for the differential evolution optimization process. Default is 100.
1130
- subset
1131
- The subset used for the optimization process. If `subset` is a float, a random subset of size
1132
- `0.5 * self.npb` is used. If `subset` is an iterable, it must contain the indices of the subset.
1133
- Default is None.
1134
-
1135
- Returns
1136
- -------
1137
- tuple[float, float]
1138
- The optimized values for the log10 of the sigma and rho hyperparameters.
1139
- float
1140
- The fitness value.
1141
-
1142
- Raises
1143
- ------
1144
- ValueError
1145
- If `subset` is not an iterable or a float.
1146
- ValueError
1147
- If `log10_sigma_prior` is not an object with a `logpdf` method or iterable.
1148
- ValueError
1149
- If `log10_rho_prior` is not an object with a `logpdf` method or iterable.
1150
-
1151
- Notes
1152
- -----
1153
- - The Gaussian Process is reconfigured with the optimal hyperparameters. Any previous kernels are overwritten.
1154
- """
1155
-
1143
+ npop: int = 10, niter: int = 100):
1156
1144
  if self._tsa.noise_model != 'fixed_gp':
1157
1145
  raise ValueError("The noise model must be set to 'fixed_gp' before the hyperparameter optimization.")
1158
1146
 
1159
- sb = log10_sigma_bounds if isinstance(log10_sigma_bounds, Sequence) else [log10_sigma_bounds-1, log10_sigma_bounds+1]
1160
- rb = log10_rho_bounds if isinstance(log10_rho_bounds, Sequence) else [log10_rho_bounds-1, log10_rho_bounds+1]
1161
- bounds = array([sb, rb])
1162
-
1163
- data = self.data[0]
1164
- if subset is not None:
1165
- if isinstance(subset, float):
1166
- ids = sort(permutation(data.nwl)[:int(subset*data.nwl)])
1167
- elif isinstance(subset, Sequence):
1168
- ids = array(subset, int)
1169
- else:
1170
- raise ValueError("subset must be either an iterable or a float.")
1171
- else:
1172
- ids = arange(data.nwl)
1173
-
1174
- class DummyPrior:
1175
- def logpdf(self, x):
1176
- return 0.0
1177
-
1178
- if log10_sigma_prior is not None:
1179
- if isinstance(log10_sigma_prior, Sequence):
1180
- sp = norm(*log10_sigma_prior)
1181
- elif hasattr(log10_sigma_prior, 'logpdf'):
1182
- sp = log10_sigma_prior
1183
- else:
1184
- raise ValueError('Bad sigma_prior')
1185
- else:
1186
- sp = DummyPrior()
1147
+ if self._wa is None:
1148
+ raise ValueError("The white light curves must be fit using 'fit_white()' before the hyperparameter optimization.")
1187
1149
 
1188
1150
  if log10_rho_prior is not None:
1189
1151
  if isinstance(log10_rho_prior, Sequence):
@@ -1193,29 +1155,69 @@ class ExoIris:
1193
1155
  else:
1194
1156
  raise ValueError('Bad rho_prior')
1195
1157
  else:
1196
- rp = DummyPrior()
1197
-
1198
- npb = ids.size
1199
- time = (tile(data.time[newaxis, data.transit_mask], (npb, 1)) + arange(npb)[:, newaxis]).ravel()
1200
- flux = (data.fluxes[ids, :][:, data.transit_mask]).ravel() - 1
1201
- ferr = (data.errors[ids, :][:, data.transit_mask]).ravel()
1202
- gp = GaussianProcess(terms.Matern32Term(sigma=flux.std(), rho=0.1))
1203
-
1204
- def nll(log10x):
1205
- x = 10**log10x
1206
- if any(log10x < bounds[:,0]) or any(log10x > bounds[:,1]):
1207
- return inf
1208
- gp.kernel = terms.Matern32Term(sigma=x[0], rho=x[1])
1209
- gp.compute(time, yerr=ferr, quiet=True)
1210
- return -(gp.log_likelihood(flux) + sp.logpdf(log10x[0]) + rp.logpdf(log10x[1]))
1211
-
1212
- de = DiffEvol(nll, bounds, npop, min_ptp=0.2)
1213
- if isinstance(log10_sigma_bounds, float):
1214
- de.population[:, 0] = log10_sigma_bounds
1215
- if isinstance(log10_rho_bounds, float):
1216
- de.population[:, 1] = log10_rho_bounds
1217
-
1218
- de.optimize(niter)
1219
- x = de.minimum_location
1220
- self._tsa.set_gp_hyperparameters(10**x[0], 10**x[1])
1221
- return 10**x, de._fitness.ptp()
1158
+ rp = norm(-2, 1)
1159
+
1160
+ times = self.white_times
1161
+ errors = self.white_errors
1162
+ residuals = [o-m for o,m in zip(self.white_fluxes, self.white_models)]
1163
+ self.white_gp_models = []
1164
+
1165
+ gp_hyperparameters = []
1166
+ for i in range(len(times)):
1167
+ time = times[i]
1168
+ fres = residuals[i]
1169
+ ferr = errors[i]
1170
+
1171
+ log10_sigma_guess = log10(fres.std())
1172
+
1173
+ match log10_sigma_bounds:
1174
+ case None:
1175
+ sb = [log10_sigma_guess-1, log10_sigma_guess+1]
1176
+ case _ if isinstance(log10_sigma_bounds, Sequence):
1177
+ sb = log10_sigma_bounds
1178
+ case _ if isinstance(log10_sigma_bounds, float):
1179
+ sb = [log10_sigma_bounds-1, log10_sigma_bounds+1]
1180
+
1181
+ match log10_rho_bounds:
1182
+ case None:
1183
+ rb = [-5, -2]
1184
+ case _ if isinstance(log10_rho_bounds, Sequence):
1185
+ rb = log10_rho_bounds
1186
+ case _ if isinstance(log10_rho_bounds, float):
1187
+ rb = [log10_rho_bounds-1, log10_rho_bounds+1]
1188
+
1189
+ bounds = array([sb, rb])
1190
+
1191
+ if log10_sigma_prior is not None:
1192
+ if isinstance(log10_sigma_prior, Sequence):
1193
+ sp = norm(*log10_sigma_prior)
1194
+ elif hasattr(log10_sigma_prior, 'logpdf'):
1195
+ sp = log10_sigma_prior
1196
+ else:
1197
+ raise ValueError('Bad sigma_prior')
1198
+ else:
1199
+ sp = norm(log10_sigma_guess, 0.1)
1200
+
1201
+ gp = GaussianProcess(terms.Matern32Term(sigma=fres.std(), rho=0.1))
1202
+
1203
+ def nll(log10x):
1204
+ x = 10**log10x
1205
+ if any(log10x < bounds[:,0]) or any(log10x > bounds[:,1]):
1206
+ return inf
1207
+ gp.kernel = terms.Matern32Term(sigma=x[0], rho=x[1])
1208
+ gp.compute(time, yerr=ferr, quiet=True)
1209
+ return -(gp.log_likelihood(fres) + sp.logpdf(log10x[0]) + rp.logpdf(log10x[1]))
1210
+
1211
+ de = DiffEvol(nll, bounds, npop, min_ptp=0.2)
1212
+ if isinstance(log10_sigma_bounds, float):
1213
+ de.population[:, 0] = log10_sigma_bounds
1214
+ if isinstance(log10_rho_bounds, float):
1215
+ de.population[:, 1] = log10_rho_bounds
1216
+
1217
+ de.optimize(niter)
1218
+ x = de.minimum_location
1219
+ gp_hyperparameters.append(10**x)
1220
+ gp.kernel = terms.Matern32Term(sigma=10**x[0], rho=10**x[1])
1221
+ self.white_gp_models.append(gp.predict(fres))
1222
+ self._tsa.set_gp_hyperparameters(10**x[0], 10**x[1], i)
1223
+ return gp_hyperparameters
exoiris/tsdata.py CHANGED
@@ -47,7 +47,7 @@ class TSData:
47
47
  def __init__(self, time: Sequence, wavelength: Sequence, fluxes: Sequence, errors: Sequence, name: str,
48
48
  noise_group: str = 'a', wl_edges : Sequence | None = None, tm_edges : Sequence | None = None,
49
49
  transit_mask: ndarray | None = None, ephemeris: Ephemeris | None = None, n_baseline: int = 1,
50
- mask: ndarray = None) -> None:
50
+ mask: ndarray = None, ephemeris_group: int = 0, offset_group: int = 0) -> None:
51
51
  """
52
52
  Parameters
53
53
  ----------
@@ -81,6 +81,12 @@ class TSData:
81
81
  if n_baseline < 1:
82
82
  raise ValueError("n_baseline must be greater than zero.")
83
83
 
84
+ if ephemeris_group < 0:
85
+ raise ValueError("ephemeris_group must be a non-negative integer.")
86
+
87
+ if offset_group < 0:
88
+ raise ValueError("offset_group must be a non-negative integer.")
89
+
84
90
  self.name: str = name
85
91
  self.time: ndarray = time.copy()
86
92
  self.wavelength: ndarray = wavelength
@@ -92,6 +98,8 @@ class TSData:
92
98
  self.ephemeris: Ephemeris | None = ephemeris
93
99
  self.n_baseline: int = n_baseline
94
100
  self._noise_group: str = noise_group
101
+ self.ephemeris_group: int = ephemeris_group
102
+ self.offset_group: int = offset_group
95
103
  self._dataset: Optional['TSDataSet'] = None
96
104
  self._update()
97
105
 
@@ -129,6 +137,8 @@ class TSData:
129
137
  mask = pf.ImageHDU(self.mask.astype(int), name=f'mask_{self.name}')
130
138
  data.header['ngroup'] = self.noise_group
131
139
  data.header['nbasel'] = self.n_baseline
140
+ data.header['epgroup'] = self.ephemeris_group
141
+ data.header['offgroup'] = self.offset_group
132
142
  #TODO: export ephemeris
133
143
  return pf.HDUList([time, wave, data, ootm, mask])
134
144
 
@@ -153,6 +163,8 @@ class TSData:
153
163
  ootm = hdul[f'OOTM_{name}'].data.astype(bool)
154
164
  mask = hdul[f'MASK_{name}'].data.astype(bool)
155
165
  noise_group = hdul[f'DATA_{name}'].header['NGROUP']
166
+ ephemeris_group = hdul[f'DATA_{name}'].header['EPGROUP']
167
+ offset_group = hdul[f'DATA_{name}'].header['OFFGROUP']
156
168
 
157
169
  try:
158
170
  n_baseline = hdul[f'DATA_{name}'].header['NBASEL']
@@ -161,7 +173,7 @@ class TSData:
161
173
 
162
174
  #TODO: import ephemeris
163
175
  return TSData(time, wave, data[0], data[1], name=name, noise_group=noise_group, transit_mask=ootm,
164
- n_baseline=n_baseline, mask=mask)
176
+ n_baseline=n_baseline, mask=mask, ephemeris_group=ephemeris_group, offset_group=offset_group)
165
177
 
166
178
  def __repr__(self) -> str:
167
179
  return f"TSData Name:'{self.name}' [{self.wavelength[0]:.2f} - {self.wavelength[-1]:.2f}] nwl={self.nwl} npt={self.npt}"
@@ -286,13 +298,20 @@ class TSData:
286
298
  m = masks[0]
287
299
  d = TSData(name=f'{self.name}_1', time=self.time[m], wavelength=self.wavelength,
288
300
  fluxes=self.fluxes[:, m], errors=self.errors[:, m], mask=self.mask[:, m],
289
- noise_group=self.noise_group, transit_mask=self.transit_mask[m],
290
- ephemeris=self.ephemeris, n_baseline=self.n_baseline)
301
+ noise_group=self.noise_group,
302
+ ephemeris_group=self.ephemeris_group,
303
+ offset_group=self.offset_group,
304
+ transit_mask=self.transit_mask[m],
305
+ ephemeris=self.ephemeris,
306
+ n_baseline=self.n_baseline)
291
307
  for i, m in enumerate(masks[1:]):
292
308
  d = d + TSData(name=f'{self.name}_{i+2}', time=self.time[m], wavelength=self.wavelength,
293
309
  fluxes=self.fluxes[:, m], errors=self.errors[:, m], mask=self.mask[:, m],
294
310
  noise_group=self.noise_group,
295
- transit_mask=self.transit_mask[m], ephemeris=self.ephemeris,
311
+ ephemeris_group=self.ephemeris_group,
312
+ offset_group=self.offset_group,
313
+ transit_mask=self.transit_mask[m],
314
+ ephemeris=self.ephemeris,
296
315
  n_baseline=self.n_baseline)
297
316
  return d
298
317
 
@@ -325,6 +344,8 @@ class TSData:
325
344
  errors=self.errors[m],
326
345
  mask=self.mask[m],
327
346
  noise_group=self.noise_group,
347
+ ephemeris_group=self.ephemeris_group,
348
+ offset_group=self.offset_group,
328
349
  wl_edges=(self._wl_l_edges[m], self._wl_r_edges[m]),
329
350
  tm_edges=(self._tm_l_edges, self._tm_r_edges),
330
351
  transit_mask=self.transit_mask, ephemeris=self.ephemeris,
@@ -360,6 +381,8 @@ class TSData:
360
381
  errors=self.errors[:, m],
361
382
  mask = self.mask[:, m],
362
383
  noise_group=self.noise_group,
384
+ ephemeris_group=self.ephemeris_group,
385
+ offset_group=self.offset_group,
363
386
  wl_edges=(self._wl_l_edges, self._wl_r_edges),
364
387
  tm_edges=(self._tm_l_edges[m], self._tm_r_edges[m]),
365
388
  transit_mask=self.transit_mask[m], ephemeris=self.ephemeris,
@@ -574,10 +597,16 @@ class TSData:
574
597
  binning.bins, estimate_errors=estimate_errors)
575
598
  if not all(isfinite(be)):
576
599
  warnings.warn('Error estimation failed for some bins, check the error array.')
577
- return TSData(self.time, binning.bins.mean(1), bf, be, wl_edges=(binning.bins[:,0], binning.bins[:,1]),
578
- name=self.name, tm_edges=(self._tm_l_edges, self._tm_r_edges), noise_group=self.noise_group,
579
- transit_mask=self.transit_mask, ephemeris=self.ephemeris, n_baseline=self.n_baseline)
580
-
600
+ return TSData(self.time, binning.bins.mean(1), bf, be,
601
+ wl_edges=(binning.bins[:,0], binning.bins[:,1]),
602
+ name=self.name,
603
+ tm_edges=(self._tm_l_edges, self._tm_r_edges),
604
+ noise_group=self.noise_group,
605
+ ephemeris_group=self.ephemeris_group,
606
+ offset_group=self.offset_group,
607
+ transit_mask=self.transit_mask,
608
+ ephemeris=self.ephemeris,
609
+ n_baseline=self.n_baseline)
581
610
 
582
611
  def bin_time(self, binning: Optional[Union[Binning, CompoundBinning]] = None,
583
612
  nb: Optional[int] = None, bw: Optional[float] = None,
@@ -609,10 +638,14 @@ class TSData:
609
638
  bf, be = bin2d(self.fluxes.T, self.errors.T, self._tm_l_edges, self._tm_r_edges,
610
639
  binning.bins, estimate_errors=estimate_errors)
611
640
  d = TSData(binning.bins.mean(1), self.wavelength, bf.T, be.T,
612
- wl_edges=(self._wl_l_edges, self._wl_r_edges),
613
- tm_edges=(binning.bins[:,0], binning.bins[:,1]),
614
- name=self.name, noise_group=self.noise_group,
615
- ephemeris=self.ephemeris, n_baseline=self.n_baseline)
641
+ wl_edges=(self._wl_l_edges, self._wl_r_edges),
642
+ tm_edges=(binning.bins[:,0], binning.bins[:,1]),
643
+ name=self.name,
644
+ noise_group=self.noise_group,
645
+ ephemeris=self.ephemeris,
646
+ n_baseline=self.n_baseline,
647
+ ephemeris_group=self.ephemeris_group,
648
+ offset_group=self.offset_group)
616
649
  if self.ephemeris is not None:
617
650
  d.mask_transit(ephemeris=self.ephemeris)
618
651
  return d
@@ -683,6 +716,11 @@ class TSDataSet:
683
716
  """Number of noise groups."""
684
717
  return len(set(self.noise_groups))
685
718
 
719
+ @property
720
+ def offset_groups(self) -> list[int]:
721
+ """List of offset groups."""
722
+ return [d.offset_group for d in self.data]
723
+
686
724
  @property
687
725
  def n_baselines(self) -> list[int]:
688
726
  """Number of baseline coefficients for each data set."""
exoiris/tslpf.py CHANGED
@@ -207,6 +207,7 @@ class TSLPF(LogPosteriorFunction):
207
207
  self._init_p_radius_ratios()
208
208
  self._init_p_noise()
209
209
  self._init_p_baseline()
210
+ self._init_p_bias()
210
211
  self.ps.freeze()
211
212
  self.ndim = len(self.ps)
212
213
 
@@ -240,23 +241,26 @@ class TSLPF(LogPosteriorFunction):
240
241
  self._gp_ferr = []
241
242
  self._gp = []
242
243
  for d in self.data:
243
- self._gp_time.append((tile(d.time[newaxis, :], (d.nwl, 1)) + arange(d.nwl)[:, newaxis]).ravel())
244
- self._gp_flux.append(d.fluxes.ravel())
245
- self._gp_ferr.append(d.errors.ravel())
244
+ self._gp_time.append((tile(d.time[newaxis, :], (d.nwl, 1)) + arange(d.nwl)[:, newaxis])[d.mask])
245
+ self._gp_flux.append(d.fluxes[d.mask])
246
+ self._gp_ferr.append(d.errors[d.mask])
246
247
  self._gp.append(GP(terms.Matern32Term(sigma=self._gp_flux[-1].std(), rho=0.1)))
247
248
  self._gp[-1].compute(self._gp_time[-1], yerr=self._gp_ferr[-1], quiet=True)
248
249
 
249
- def set_gp_hyperparameters(self, sigma: float, rho: float) -> None:
250
+ def set_gp_hyperparameters(self, sigma: float, rho: float, idata: int | None = None) -> None:
250
251
  """Sets the Gaussian Process hyperparameters assuming a Matern32 kernel.
251
252
 
252
253
  Parameters
253
254
  ----------
254
- sigma : float
255
+ sigma
255
256
  The kernel amplitude parameter.
256
257
 
257
- rho : float
258
+ rho
258
259
  The length scale parameter.
259
260
 
261
+ idata
262
+ The data set for which to set the hyperparameters. If None, the hyperparameters are set for all data sets.
263
+
260
264
  Raises
261
265
  ------
262
266
  RuntimeError
@@ -264,9 +268,10 @@ class TSLPF(LogPosteriorFunction):
264
268
  """
265
269
  if self._gp is None:
266
270
  raise RuntimeError('The GP needs to be initialized before setting hyperparameters.')
267
- for i, gp in enumerate(self._gp):
268
- gp.kernel = terms.Matern32Term(sigma=sigma, rho=rho)
269
- gp.compute(self._gp_time[i], yerr=self._gp_ferr[i], quiet=True)
271
+
272
+ for i in ([idata] or range(self.data.size)):
273
+ self._gp[i].kernel = terms.Matern32Term(sigma=sigma, rho=rho)
274
+ self._gp[i].compute(self._gp_time[i], yerr=self._gp_ferr[i], quiet=True)
270
275
 
271
276
  def set_gp_kernel(self, kernel: terms.Term) -> None:
272
277
  """Sets the kernel for the Gaussian Process (GP) model and recomputes the GP.
@@ -341,6 +346,16 @@ class TSLPF(LogPosteriorFunction):
341
346
  self._start_baseline = ps.blocks[-1].start
342
347
  self._sl_baseline = ps.blocks[-1].slice
343
348
 
349
+ def _init_p_bias(self):
350
+ ps = self.ps
351
+ offset_groups = self.data.offset_groups
352
+ pp = []
353
+ for i in range(1, max(offset_groups) + 1):
354
+ pp.append(GParameter(f'bias_{i:02d}', 'bias offset', '', NP(0.0, 1e-6), (-inf, inf)))
355
+ ps.add_global_block('bias_offsets', pp)
356
+ self._start_bias = ps.blocks[-1].start
357
+ self._sl_bias = ps.blocks[-1].slice
358
+
344
359
  def set_ldtk_prior(self, teff, logg, metal, dataset: str = 'visir-lowres', width: float = 50,
345
360
  uncertainty_multiplier: float = 10):
346
361
  hw = 0.5 * width
@@ -569,15 +584,21 @@ class TSLPF(LogPosteriorFunction):
569
584
  inc = i_from_ba(pv[:, 3], aor)
570
585
  ecc = pv[:, 4] ** 2 + pv[:, 5] ** 2
571
586
  w = arctan2(pv[:, 5], pv[:, 4])
587
+ fluxes = []
572
588
  if isinstance(self.ldmodel, LDTkLD):
573
589
  ldp, istar = self.ldmodel(self.tms[0].mu, ldp)
574
590
  ldpi = dstack([ldp, istar])
575
- flux = []
576
591
  for i, tm in enumerate(self.tms):
577
- flux.append(tm.evaluate(k[i], ldpi[:, self.ldmodel.wlslices[i], :], t0, p, aor, inc, ecc, w, copy))
578
- return flux
592
+ fluxes.append(tm.evaluate(k[i], ldpi[:, self.ldmodel.wlslices[i], :], t0, p, aor, inc, ecc, w, copy))
579
593
  else:
580
- return [tm.evaluate(k[i], ldp[i], t0, p, aor, inc, ecc, w, copy) for i,tm in enumerate(self.tms)]
594
+ for i, tm in enumerate(self.tms):
595
+ fluxes.append(tm.evaluate(k[i], ldp[i], t0, p, aor, inc, ecc, w, copy))
596
+
597
+ for i, d in enumerate(self.data):
598
+ if d.offset_group > 0:
599
+ biases = pv[:, self._start_bias + d.offset_group - 1][:, None, None]
600
+ fluxes[i] = biases + (1.0 - biases) * fluxes[i]
601
+ return fluxes
581
602
 
582
603
  def baseline_model(self, pv):
583
604
  pv = atleast_2d(pv)[:, self._sl_baseline]
@@ -646,7 +667,7 @@ class TSLPF(LogPosteriorFunction):
646
667
  elif self._nm == NM_GP_FIXED:
647
668
  for j in range(npv):
648
669
  for i in range(self.data.size):
649
- lnl[j] += self._gp[i].log_likelihood(self._gp_flux[i] - fmod[i][j].ravel())
670
+ lnl[j] += self._gp[i].log_likelihood(self._gp_flux[i] - fmod[i][j][self.data[i].mask])
650
671
  else:
651
672
  raise NotImplementedError("The free GP noise model hasn't been implemented yet.")
652
673
  return lnl if npv > 1 else lnl[0]
exoiris/wlpf.py CHANGED
@@ -15,7 +15,7 @@
15
15
  # along with this program. If not, see <https://www.gnu.org/licenses/>.
16
16
  from matplotlib.figure import Figure
17
17
  from matplotlib.pyplot import subplots, setp
18
- from numpy import log10, diff, sqrt, floor, ceil, arange, newaxis, nanmean, isfinite
18
+ from numpy import log10, diff, sqrt, floor, ceil, arange, newaxis, nanmean, isfinite, nan, where, nanstd
19
19
  from scipy.optimize import minimize
20
20
 
21
21
  from pytransit import BaseLPF, LinearModelBaseline
@@ -26,16 +26,18 @@ from .tslpf import TSLPF
26
26
  class WhiteLPF(BaseLPF):
27
27
  def __init__(self, tsa: TSLPF):
28
28
  self.tsa = tsa
29
- fluxes, times = [], []
29
+ fluxes, times, errors = [], [], []
30
30
  for t, f in zip(tsa.data.times, tsa.data.fluxes):
31
- f = nanmean(f, 0)
32
- m = isfinite(f)
31
+ mf = nanmean(f, 0)
32
+ m = isfinite(mf)
33
33
  times.append(t[m])
34
- fluxes.append(f[m])
34
+ fluxes.append(mf[m])
35
+ errors.append(nanstd(where(m, f, nan), 0) / sqrt(f.shape[0]))
35
36
  covs = [(t-t.mean())[:, newaxis] for t in times]
37
+ self.std_errors = errors
36
38
 
37
- super().__init__('white', tsa.data.unique_noise_groups, times, fluxes, covariates=covs, wnids=tsa.data.ngids,
38
- pbids=tsa.data.ngids)
39
+ super().__init__('white', tsa.data.unique_noise_groups, times, fluxes,
40
+ covariates=covs, wnids=tsa.data.ngids, pbids=tsa.data.ngids)
39
41
  self.set_prior('tc', tsa.ps[tsa.ps.find_pid('tc')].prior)
40
42
  self.set_prior('p', tsa.ps[tsa.ps.find_pid('p')].prior)
41
43
  self.set_prior('rho', tsa.ps[tsa.ps.find_pid('rho')].prior)
@@ -77,7 +79,7 @@ class WhiteLPF(BaseLPF):
77
79
  def plot(self, axs=None, figsize=None, ncols=2) -> Figure:
78
80
  if axs is None:
79
81
  nrows = int(ceil(self.nlc / ncols))
80
- fig, axs = subplots(nrows, ncols, figsize=figsize, sharey='all', squeeze=False)
82
+ fig, axs = subplots(nrows, ncols, figsize=figsize, sharey='all', squeeze=False, constrained_layout=True)
81
83
  else:
82
84
  fig = axs[0].get_figure()
83
85
 
@@ -1,15 +0,0 @@
1
- exoiris/__init__.py,sha256=gaNfxNKXH41yRK35JghqackXpENyB-NOg-JlciqO1YU,1145
2
- exoiris/binning.py,sha256=-Y9hdK0jZj8DOS82keaprneid2lZ4rCx-keWlKi0LP8,6455
3
- exoiris/ephemeris.py,sha256=dthBkJztT5yAP6VnnO7jGvxikboFUQBUGPUfBCFrA3w,1316
4
- exoiris/exoiris.py,sha256=QR6c-4M_koYm8YA6gZst5jWiSfG57ciOTqR9foX0AgI,48315
5
- exoiris/ldtkld.py,sha256=u4c5Yh6JlG0Zfec7-nOCQGXxIt1VVp9mniFMM7h-L6s,3489
6
- exoiris/tsdata.py,sha256=uIHml2RKRS6eAF9O0pkdvVj7xL7RqTHfHXQlCUhgtOc,31506
7
- exoiris/tslpf.py,sha256=gQJpLoz8WWmS6buAupDxFJnenUjXYQe_ZWcq_UZ2kls,27717
8
- exoiris/tsmodel.py,sha256=6NaGY48fWHUT_7ti6Ao618PN-LgyoIhfQd8lZQqZ7hU,5160
9
- exoiris/util.py,sha256=5PynwYYHRrzyXJHskBtp2J-pcM59zsA1_VtDxencQm4,4630
10
- exoiris/wlpf.py,sha256=un-aEevAhqDfCocnP3I5lTdQDkLdmUBEbtvORRVR5LM,4370
11
- ExoIris-0.16.0.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
12
- ExoIris-0.16.0.dist-info/METADATA,sha256=9_-HWCcU8GrM0a_k2PS_JD22bkmPf7Mi4BxrG0DBuXg,4189
13
- ExoIris-0.16.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
14
- ExoIris-0.16.0.dist-info/top_level.txt,sha256=EoNxT6c5mQDcM0f_LUQB-ETsYg03lNaV3o2L_Yc6-aE,8
15
- ExoIris-0.16.0.dist-info/RECORD,,