qpytorch 0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of qpytorch might be problematic. Click here for more details.
- qpytorch/__init__.py +327 -0
- qpytorch/constraints/__init__.py +3 -0
- qpytorch/distributions/__init__.py +21 -0
- qpytorch/distributions/delta.py +86 -0
- qpytorch/distributions/multitask_multivariate_qexponential.py +435 -0
- qpytorch/distributions/multivariate_qexponential.py +581 -0
- qpytorch/distributions/power.py +113 -0
- qpytorch/distributions/qexponential.py +153 -0
- qpytorch/functions/__init__.py +58 -0
- qpytorch/kernels/__init__.py +80 -0
- qpytorch/kernels/grid_interpolation_kernel.py +213 -0
- qpytorch/kernels/inducing_point_kernel.py +151 -0
- qpytorch/kernels/kernel.py +695 -0
- qpytorch/kernels/matern32_kernel_grad.py +155 -0
- qpytorch/kernels/matern52_kernel_grad.py +194 -0
- qpytorch/kernels/matern52_kernel_gradgrad.py +248 -0
- qpytorch/kernels/polynomial_kernel_grad.py +88 -0
- qpytorch/kernels/qexponential_symmetrized_kl_kernel.py +61 -0
- qpytorch/kernels/rbf_kernel_grad.py +125 -0
- qpytorch/kernels/rbf_kernel_gradgrad.py +186 -0
- qpytorch/kernels/rff_kernel.py +153 -0
- qpytorch/lazy/__init__.py +9 -0
- qpytorch/likelihoods/__init__.py +66 -0
- qpytorch/likelihoods/bernoulli_likelihood.py +75 -0
- qpytorch/likelihoods/beta_likelihood.py +76 -0
- qpytorch/likelihoods/gaussian_likelihood.py +472 -0
- qpytorch/likelihoods/laplace_likelihood.py +59 -0
- qpytorch/likelihoods/likelihood.py +437 -0
- qpytorch/likelihoods/likelihood_list.py +60 -0
- qpytorch/likelihoods/multitask_gaussian_likelihood.py +542 -0
- qpytorch/likelihoods/multitask_qexponential_likelihood.py +545 -0
- qpytorch/likelihoods/noise_models.py +184 -0
- qpytorch/likelihoods/qexponential_likelihood.py +494 -0
- qpytorch/likelihoods/softmax_likelihood.py +97 -0
- qpytorch/likelihoods/student_t_likelihood.py +90 -0
- qpytorch/means/__init__.py +23 -0
- qpytorch/metrics/__init__.py +17 -0
- qpytorch/mlls/__init__.py +53 -0
- qpytorch/mlls/_approximate_mll.py +79 -0
- qpytorch/mlls/deep_approximate_mll.py +30 -0
- qpytorch/mlls/deep_predictive_log_likelihood.py +32 -0
- qpytorch/mlls/exact_marginal_log_likelihood.py +96 -0
- qpytorch/mlls/gamma_robust_variational_elbo.py +106 -0
- qpytorch/mlls/inducing_point_kernel_added_loss_term.py +69 -0
- qpytorch/mlls/kl_qexponential_added_loss_term.py +41 -0
- qpytorch/mlls/leave_one_out_pseudo_likelihood.py +73 -0
- qpytorch/mlls/marginal_log_likelihood.py +48 -0
- qpytorch/mlls/predictive_log_likelihood.py +76 -0
- qpytorch/mlls/sum_marginal_log_likelihood.py +40 -0
- qpytorch/mlls/variational_elbo.py +77 -0
- qpytorch/models/__init__.py +72 -0
- qpytorch/models/approximate_qep.py +115 -0
- qpytorch/models/deep_qeps/__init__.py +22 -0
- qpytorch/models/deep_qeps/deep_qep.py +155 -0
- qpytorch/models/deep_qeps/dspp.py +114 -0
- qpytorch/models/exact_prediction_strategies.py +880 -0
- qpytorch/models/exact_qep.py +349 -0
- qpytorch/models/model_list.py +100 -0
- qpytorch/models/pyro/__init__.py +28 -0
- qpytorch/models/pyro/_pyro_mixin.py +57 -0
- qpytorch/models/pyro/distributions/__init__.py +5 -0
- qpytorch/models/pyro/pyro_qep.py +105 -0
- qpytorch/models/qep.py +7 -0
- qpytorch/models/qeplvm/__init__.py +6 -0
- qpytorch/models/qeplvm/bayesian_qeplvm.py +40 -0
- qpytorch/models/qeplvm/latent_variable.py +102 -0
- qpytorch/module.py +30 -0
- qpytorch/optim/__init__.py +5 -0
- qpytorch/priors/__init__.py +42 -0
- qpytorch/priors/qep_priors.py +81 -0
- qpytorch/test/__init__.py +22 -0
- qpytorch/test/base_likelihood_test_case.py +106 -0
- qpytorch/test/model_test_case.py +150 -0
- qpytorch/test/variational_test_case.py +400 -0
- qpytorch/utils/__init__.py +38 -0
- qpytorch/utils/warnings.py +37 -0
- qpytorch/variational/__init__.py +47 -0
- qpytorch/variational/_variational_distribution.py +61 -0
- qpytorch/variational/_variational_strategy.py +391 -0
- qpytorch/variational/additive_grid_interpolation_variational_strategy.py +90 -0
- qpytorch/variational/batch_decoupled_variational_strategy.py +256 -0
- qpytorch/variational/cholesky_variational_distribution.py +65 -0
- qpytorch/variational/ciq_variational_strategy.py +352 -0
- qpytorch/variational/delta_variational_distribution.py +41 -0
- qpytorch/variational/grid_interpolation_variational_strategy.py +113 -0
- qpytorch/variational/independent_multitask_variational_strategy.py +114 -0
- qpytorch/variational/lmc_variational_strategy.py +248 -0
- qpytorch/variational/mean_field_variational_distribution.py +58 -0
- qpytorch/variational/multitask_variational_strategy.py +317 -0
- qpytorch/variational/natural_variational_distribution.py +152 -0
- qpytorch/variational/nearest_neighbor_variational_strategy.py +487 -0
- qpytorch/variational/orthogonally_decoupled_variational_strategy.py +128 -0
- qpytorch/variational/tril_natural_variational_distribution.py +130 -0
- qpytorch/variational/uncorrelated_multitask_variational_strategy.py +114 -0
- qpytorch/variational/unwhitened_variational_strategy.py +225 -0
- qpytorch/variational/variational_strategy.py +280 -0
- qpytorch/version.py +4 -0
- qpytorch-0.1.dist-info/LICENSE +21 -0
- qpytorch-0.1.dist-info/METADATA +177 -0
- qpytorch-0.1.dist-info/RECORD +102 -0
- qpytorch-0.1.dist-info/WHEEL +5 -0
- qpytorch-0.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,545 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
|
|
3
|
+
from typing import Any, Optional, Tuple, Union
|
|
4
|
+
|
|
5
|
+
import torch
|
|
6
|
+
from linear_operator import to_linear_operator
|
|
7
|
+
from linear_operator.operators import (
|
|
8
|
+
ConstantDiagLinearOperator,
|
|
9
|
+
DiagLinearOperator,
|
|
10
|
+
KroneckerProductDiagLinearOperator,
|
|
11
|
+
KroneckerProductLinearOperator,
|
|
12
|
+
LinearOperator,
|
|
13
|
+
RootLinearOperator,
|
|
14
|
+
BlockDiagLinearOperator,
|
|
15
|
+
ZeroLinearOperator
|
|
16
|
+
)
|
|
17
|
+
from torch import Tensor
|
|
18
|
+
|
|
19
|
+
from ..constraints import GreaterThan, Interval
|
|
20
|
+
from ..distributions import base_distributions, QExponential, MultitaskMultivariateQExponential, Distribution
|
|
21
|
+
from ..lazy import LazyEvaluatedKernelTensor
|
|
22
|
+
from ..likelihoods import _QExponentialLikelihoodBase, Likelihood
|
|
23
|
+
from ..priors import Prior
|
|
24
|
+
from .noise_models import FixedNoise, MultitaskHomoskedasticNoise, Noise
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class _MultitaskQExponentialLikelihoodBase(_QExponentialLikelihoodBase):
|
|
28
|
+
r"""
|
|
29
|
+
Base class for multi-task QExponential Likelihoods, supporting general heteroskedastic noise models.
|
|
30
|
+
|
|
31
|
+
:param num_tasks: Number of tasks.
|
|
32
|
+
:param noise_covar: A model for the noise covariance. This can be a simple homoskedastic noise model, or a QEP
|
|
33
|
+
that is to be fitted on the observed measurement errors.
|
|
34
|
+
:param rank: The rank of the task noise covariance matrix to fit. If `rank`
|
|
35
|
+
is set to 0, then a diagonal covariance matrix is fit.
|
|
36
|
+
:param task_correlation_prior: Prior to use over the task noise correlation
|
|
37
|
+
matrix. Only used when :math:`\text{rank} > 0`.
|
|
38
|
+
:param batch_shape: Number of batches.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(
|
|
42
|
+
self,
|
|
43
|
+
num_tasks: int,
|
|
44
|
+
noise_covar: Union[Noise, FixedNoise],
|
|
45
|
+
rank: int = 0,
|
|
46
|
+
task_correlation_prior: Optional[Prior] = None,
|
|
47
|
+
batch_shape: torch.Size = torch.Size(),
|
|
48
|
+
**kwargs: Any,
|
|
49
|
+
) -> None:
|
|
50
|
+
super().__init__(noise_covar=noise_covar, **kwargs)
|
|
51
|
+
if rank != 0:
|
|
52
|
+
if rank > num_tasks:
|
|
53
|
+
raise ValueError(f"Cannot have rank ({rank}) greater than num_tasks ({num_tasks})")
|
|
54
|
+
tidcs = torch.tril_indices(num_tasks, rank, dtype=torch.long)
|
|
55
|
+
self.tidcs: Tensor = tidcs[:, 1:] # (1, 1) must be 1.0, no need to parameterize this
|
|
56
|
+
task_noise_corr = torch.randn(*batch_shape, self.tidcs.size(-1))
|
|
57
|
+
self.register_parameter("task_noise_corr", torch.nn.Parameter(task_noise_corr))
|
|
58
|
+
if task_correlation_prior is not None:
|
|
59
|
+
self.register_prior(
|
|
60
|
+
"MultitaskErrorCorrelationPrior", task_correlation_prior, lambda m: m._eval_corr_matrix
|
|
61
|
+
)
|
|
62
|
+
elif task_correlation_prior is not None:
|
|
63
|
+
raise ValueError("Can only specify task_correlation_prior if rank>0")
|
|
64
|
+
self.num_tasks = num_tasks
|
|
65
|
+
self.rank = rank
|
|
66
|
+
|
|
67
|
+
def _eval_corr_matrix(self) -> Tensor:
|
|
68
|
+
tnc = self.task_noise_corr
|
|
69
|
+
fac_diag = torch.ones(*tnc.shape[:-1], self.num_tasks, device=tnc.device, dtype=tnc.dtype)
|
|
70
|
+
Cfac = torch.diag_embed(fac_diag)
|
|
71
|
+
Cfac[..., self.tidcs[0], self.tidcs[1]] = self.task_noise_corr
|
|
72
|
+
# squared rows must sum to one for this to be a correlation matrix
|
|
73
|
+
C = Cfac / Cfac.pow(2).sum(dim=-1, keepdim=True).sqrt()
|
|
74
|
+
return C @ C.transpose(-1, -2)
|
|
75
|
+
|
|
76
|
+
def marginal(
|
|
77
|
+
self, function_dist: MultitaskMultivariateQExponential, *params: Any, **kwargs: Any
|
|
78
|
+
) -> MultitaskMultivariateQExponential: # pyre-ignore[14]
|
|
79
|
+
r"""
|
|
80
|
+
If :math:`\text{rank} = 0`, adds the task noises to the diagonal of the
|
|
81
|
+
covariance matrix of the supplied
|
|
82
|
+
:obj:`~qpytorch.distributions.MultivariateQExponential` or
|
|
83
|
+
:obj:`~qpytorch.distributions.MultitaskMultivariateQExponential`. Otherwise,
|
|
84
|
+
adds a rank `rank` covariance matrix to it.
|
|
85
|
+
|
|
86
|
+
To accomplish this, we form a new
|
|
87
|
+
:obj:`~linear_operator.operators.KroneckerProductLinearOperator`
|
|
88
|
+
between :math:`I_{n}`, an identity matrix with size equal to the data
|
|
89
|
+
and a (not necessarily diagonal) matrix containing the task noises
|
|
90
|
+
:math:`D_{t}`.
|
|
91
|
+
|
|
92
|
+
We also incorporate a shared `noise` parameter from the base
|
|
93
|
+
:class:`qpytorch.likelihoods.QExponentialLikelihood` that we extend.
|
|
94
|
+
|
|
95
|
+
The final covariance matrix after this method is then
|
|
96
|
+
:math:`\mathbf K + \mathbf D_{t} \otimes \mathbf I_{n} + \sigma^{2} \mathbf I_{nt}`.
|
|
97
|
+
|
|
98
|
+
:param function_dist: Random variable whose covariance
|
|
99
|
+
matrix is a :obj:`~linear_operator.operators.LinearOperator` we intend to augment.
|
|
100
|
+
:rtype: `qpytorch.distributions.MultitaskMultivariateQExponential`:
|
|
101
|
+
:return: A new random variable whose covariance matrix is a
|
|
102
|
+
:obj:`~linear_operator.operators.LinearOperator` with
|
|
103
|
+
:math:`\mathbf D_{t} \otimes \mathbf I_{n}` and :math:`\sigma^{2} \mathbf I_{nt}` added.
|
|
104
|
+
"""
|
|
105
|
+
mean, covar, power = function_dist.mean, function_dist.lazy_covariance_matrix, function_dist.power
|
|
106
|
+
|
|
107
|
+
# ensure that sumKroneckerLT is actually called
|
|
108
|
+
if isinstance(covar, LazyEvaluatedKernelTensor):
|
|
109
|
+
covar = covar.evaluate_kernel()
|
|
110
|
+
|
|
111
|
+
covar_kron_lt = self._shaped_noise_covar(
|
|
112
|
+
mean.shape, add_noise=self.has_global_noise, interleaved=function_dist._interleaved
|
|
113
|
+
)
|
|
114
|
+
covar = covar + covar_kron_lt
|
|
115
|
+
|
|
116
|
+
return function_dist.__class__(mean, covar, power, interleaved=function_dist._interleaved)
|
|
117
|
+
|
|
118
|
+
def _shaped_noise_covar(
|
|
119
|
+
self, shape: torch.Size, add_noise: Optional[bool] = True, interleaved: bool = True, *params: Any, **kwargs: Any
|
|
120
|
+
) -> LinearOperator:
|
|
121
|
+
if not self.has_task_noise:
|
|
122
|
+
noise = ConstantDiagLinearOperator(self.noise, diag_shape=shape[-2] * self.num_tasks)
|
|
123
|
+
return noise
|
|
124
|
+
|
|
125
|
+
if self.rank == 0:
|
|
126
|
+
task_noises = self.raw_task_noises_constraint.transform(self.raw_task_noises)
|
|
127
|
+
task_var_lt = DiagLinearOperator(task_noises)
|
|
128
|
+
dtype, device = task_noises.dtype, task_noises.device
|
|
129
|
+
ckl_init = KroneckerProductDiagLinearOperator
|
|
130
|
+
else:
|
|
131
|
+
task_noise_covar_factor = self.task_noise_covar_factor
|
|
132
|
+
task_var_lt = RootLinearOperator(task_noise_covar_factor)
|
|
133
|
+
dtype, device = task_noise_covar_factor.dtype, task_noise_covar_factor.device
|
|
134
|
+
ckl_init = KroneckerProductLinearOperator
|
|
135
|
+
|
|
136
|
+
eye_lt = ConstantDiagLinearOperator(
|
|
137
|
+
torch.ones(*shape[:-2], 1, dtype=dtype, device=device), diag_shape=shape[-2]
|
|
138
|
+
)
|
|
139
|
+
task_var_lt = task_var_lt.expand(*shape[:-2], *task_var_lt.matrix_shape) # pyre-ignore[6]
|
|
140
|
+
|
|
141
|
+
# to add the latent noise we exploit the fact that
|
|
142
|
+
# I \kron D_T + \sigma^2 I_{NT} = I \kron (D_T + \sigma^2 I)
|
|
143
|
+
# which allows us to move the latent noise inside the task dependent noise
|
|
144
|
+
# thereby allowing exploitation of Kronecker structure in this likelihood.
|
|
145
|
+
if add_noise and self.has_global_noise:
|
|
146
|
+
noise = ConstantDiagLinearOperator(self.noise, diag_shape=task_var_lt.shape[-1])
|
|
147
|
+
task_var_lt = task_var_lt + noise
|
|
148
|
+
|
|
149
|
+
if interleaved:
|
|
150
|
+
covar_kron_lt = ckl_init(eye_lt, task_var_lt)
|
|
151
|
+
else:
|
|
152
|
+
covar_kron_lt = ckl_init(task_var_lt, eye_lt)
|
|
153
|
+
|
|
154
|
+
return covar_kron_lt
|
|
155
|
+
|
|
156
|
+
def forward(self, function_samples: Tensor, *params: Any, **kwargs: Any) -> QExponential:
|
|
157
|
+
noise = self._shaped_noise_covar(function_samples.shape, *params, **kwargs).diagonal(dim1=-1, dim2=-2)
|
|
158
|
+
noise = noise.reshape(*noise.shape[:-1], *function_samples.shape[-2:])
|
|
159
|
+
return base_distributions.Independent(QExponential(function_samples, noise.sqrt(), self.power), 1)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
class MultitaskQExponentialLikelihood(_MultitaskQExponentialLikelihoodBase):
|
|
163
|
+
r"""
|
|
164
|
+
A convenient extension of the :class:`~qpytorch.likelihoods.QExponentialLikelihood` to the multitask setting that allows
|
|
165
|
+
for a full cross-task covariance structure for the noise. The fitted covariance matrix has rank `rank`.
|
|
166
|
+
If a strictly diagonal task noise covariance matrix is desired, then rank=0 should be set. (This option still
|
|
167
|
+
allows for a different `noise` parameter for each task.)
|
|
168
|
+
|
|
169
|
+
Like the Q-Exponential likelihood, this object can be used with exact inference.
|
|
170
|
+
|
|
171
|
+
.. note::
|
|
172
|
+
At least one of :attr:`has_global_noise` or :attr:`has_task_noise` should be specified.
|
|
173
|
+
|
|
174
|
+
.. note::
|
|
175
|
+
MultitaskQExponentialLikelihood has an analytic marginal distribution.
|
|
176
|
+
|
|
177
|
+
:param num_tasks: Number of tasks.
|
|
178
|
+
:param noise_covar: A model for the noise covariance. This can be a simple homoskedastic noise model, or a QEP
|
|
179
|
+
that is to be fitted on the observed measurement errors.
|
|
180
|
+
:param rank: The rank of the task noise covariance matrix to fit. If `rank`
|
|
181
|
+
is set to 0, then a diagonal covariance matrix is fit.
|
|
182
|
+
:param task_prior: Prior to use over the task noise correlation
|
|
183
|
+
matrix. Only used when :math:`\text{rank} > 0`.
|
|
184
|
+
:param batch_shape: Number of batches.
|
|
185
|
+
:param has_global_noise: Whether to include a :math:`\sigma^2 \mathbf I_{nt}` term in the noise model.
|
|
186
|
+
:param has_task_noise: Whether to include task-specific noise terms, which add
|
|
187
|
+
:math:`\mathbf I_n \otimes \mathbf D_T` into the noise model.
|
|
188
|
+
:param kwargs: power (default: 2.0), miu (default: False).
|
|
189
|
+
|
|
190
|
+
:ivar torch.Tensor task_noise_covar: The inter-task noise covariance matrix
|
|
191
|
+
:ivar torch.Tensor task_noises: (Optional) task specific noise variances (added onto the `task_noise_covar`)
|
|
192
|
+
:ivar torch.Tensor noise: (Optional) global noise variance (added onto the `task_noise_covar`)
|
|
193
|
+
"""
|
|
194
|
+
|
|
195
|
+
def __init__(
|
|
196
|
+
self,
|
|
197
|
+
num_tasks: int,
|
|
198
|
+
rank: int = 0,
|
|
199
|
+
batch_shape: torch.Size = torch.Size(),
|
|
200
|
+
task_prior: Optional[Prior] = None,
|
|
201
|
+
noise_prior: Optional[Prior] = None,
|
|
202
|
+
noise_constraint: Optional[Interval] = None,
|
|
203
|
+
has_global_noise: bool = True,
|
|
204
|
+
has_task_noise: bool = True,
|
|
205
|
+
**kwargs: Any,
|
|
206
|
+
) -> None:
|
|
207
|
+
super(Likelihood, self).__init__() # pyre-ignore[20]
|
|
208
|
+
self.power = kwargs.pop('power', torch.tensor(2.0))
|
|
209
|
+
self.miu = kwargs.pop('miu', False) # marginally identical but uncorrelated
|
|
210
|
+
if noise_constraint is None:
|
|
211
|
+
noise_constraint = GreaterThan(1e-4)
|
|
212
|
+
|
|
213
|
+
if not has_task_noise and not has_global_noise:
|
|
214
|
+
raise ValueError(
|
|
215
|
+
"At least one of has_task_noise or has_global_noise must be specified. "
|
|
216
|
+
"Attempting to specify a likelihood that has no noise terms."
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
if has_task_noise:
|
|
220
|
+
if rank == 0:
|
|
221
|
+
self.register_parameter(
|
|
222
|
+
name="raw_task_noises", parameter=torch.nn.Parameter(torch.zeros(*batch_shape, num_tasks))
|
|
223
|
+
)
|
|
224
|
+
self.register_constraint("raw_task_noises", noise_constraint)
|
|
225
|
+
if noise_prior is not None:
|
|
226
|
+
self.register_prior("raw_task_noises_prior", noise_prior, lambda m: m.task_noises)
|
|
227
|
+
if task_prior is not None:
|
|
228
|
+
raise RuntimeError("Cannot set a `task_prior` if rank=0")
|
|
229
|
+
else:
|
|
230
|
+
self.register_parameter(
|
|
231
|
+
name="task_noise_covar_factor",
|
|
232
|
+
parameter=torch.nn.Parameter(torch.randn(*batch_shape, num_tasks, rank)),
|
|
233
|
+
)
|
|
234
|
+
if task_prior is not None:
|
|
235
|
+
self.register_prior("MultitaskErrorCovariancePrior", task_prior, lambda m: m._eval_covar_matrix)
|
|
236
|
+
self.num_tasks = num_tasks
|
|
237
|
+
self.rank = rank
|
|
238
|
+
|
|
239
|
+
if has_global_noise:
|
|
240
|
+
self.register_parameter(name="raw_noise", parameter=torch.nn.Parameter(torch.zeros(*batch_shape, 1)))
|
|
241
|
+
self.register_constraint("raw_noise", noise_constraint)
|
|
242
|
+
if noise_prior is not None:
|
|
243
|
+
self.register_prior("raw_noise_prior", noise_prior, lambda m: m.noise)
|
|
244
|
+
|
|
245
|
+
self.has_global_noise = has_global_noise
|
|
246
|
+
self.has_task_noise = has_task_noise
|
|
247
|
+
|
|
248
|
+
@property
|
|
249
|
+
def noise(self) -> Optional[Tensor]:
|
|
250
|
+
return self.raw_noise_constraint.transform(self.raw_noise)
|
|
251
|
+
|
|
252
|
+
@noise.setter
|
|
253
|
+
def noise(self, value: Union[float, Tensor]) -> None:
|
|
254
|
+
self._set_noise(value)
|
|
255
|
+
|
|
256
|
+
@property
|
|
257
|
+
def task_noises(self) -> Optional[Tensor]:
|
|
258
|
+
if self.rank == 0:
|
|
259
|
+
return self.raw_task_noises_constraint.transform(self.raw_task_noises)
|
|
260
|
+
else:
|
|
261
|
+
raise AttributeError("Cannot set diagonal task noises when covariance has ", self.rank, ">0")
|
|
262
|
+
|
|
263
|
+
@task_noises.setter
|
|
264
|
+
def task_noises(self, value: Union[float, Tensor]) -> None:
|
|
265
|
+
if self.rank == 0:
|
|
266
|
+
self._set_task_noises(value)
|
|
267
|
+
else:
|
|
268
|
+
raise AttributeError("Cannot set diagonal task noises when covariance has ", self.rank, ">0")
|
|
269
|
+
|
|
270
|
+
def _set_noise(self, value: Union[float, Tensor]) -> None:
|
|
271
|
+
self.initialize(raw_noise=self.raw_noise_constraint.inverse_transform(value))
|
|
272
|
+
|
|
273
|
+
def _set_task_noises(self, value: Union[float, Tensor]) -> None:
|
|
274
|
+
self.initialize(raw_task_noises=self.raw_task_noises_constraint.inverse_transform(value))
|
|
275
|
+
|
|
276
|
+
@property
|
|
277
|
+
def task_noise_covar(self) -> Tensor:
|
|
278
|
+
if self.rank > 0:
|
|
279
|
+
return self.task_noise_covar_factor.matmul(self.task_noise_covar_factor.transpose(-1, -2))
|
|
280
|
+
else:
|
|
281
|
+
raise AttributeError("Cannot retrieve task noises when covariance is diagonal.")
|
|
282
|
+
|
|
283
|
+
@task_noise_covar.setter
|
|
284
|
+
def task_noise_covar(self, value: Tensor) -> None:
|
|
285
|
+
# internally uses a pivoted cholesky decomposition to construct a low rank
|
|
286
|
+
# approximation of the covariance
|
|
287
|
+
if self.rank > 0:
|
|
288
|
+
with torch.no_grad():
|
|
289
|
+
self.task_noise_covar_factor.data = to_linear_operator(value).pivoted_cholesky(rank=self.rank)
|
|
290
|
+
else:
|
|
291
|
+
raise AttributeError("Cannot set non-diagonal task noises when covariance is diagonal.")
|
|
292
|
+
|
|
293
|
+
def _eval_covar_matrix(self) -> Tensor:
|
|
294
|
+
covar_factor = self.task_noise_covar_factor
|
|
295
|
+
noise = self.noise
|
|
296
|
+
D = noise * torch.eye(self.num_tasks, dtype=noise.dtype, device=noise.device) # pyre-fixme[16]
|
|
297
|
+
return covar_factor.matmul(covar_factor.transpose(-1, -2)) + D
|
|
298
|
+
|
|
299
|
+
def marginal(
|
|
300
|
+
self, function_dist: MultitaskMultivariateQExponential, *args: Any, **kwargs: Any
|
|
301
|
+
) -> MultitaskMultivariateQExponential:
|
|
302
|
+
r"""
|
|
303
|
+
:return: Analytic marginal :math:`p(\mathbf y)`.
|
|
304
|
+
"""
|
|
305
|
+
return super().marginal(function_dist, *args, **kwargs)
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
class MultitaskFixedNoiseQExponentialLikelihood(_MultitaskQExponentialLikelihoodBase):
|
|
309
|
+
r"""
|
|
310
|
+
A convenient extension of the :class:`~qpytorch.likelihoods.FixedNoiseQExponentialLikelihood` to the multitask setting
|
|
311
|
+
that assumes fixed heteroscedastic noise. This is useful when you have fixed, known observation
|
|
312
|
+
noise for each training example.
|
|
313
|
+
|
|
314
|
+
Note that this likelihood takes an additional argument when you call it, `noise`, that adds a specified amount
|
|
315
|
+
of noise to the passed MultivariateQExponential. This allows for adding known observational noise to test data.
|
|
316
|
+
|
|
317
|
+
.. note::
|
|
318
|
+
This likelihood can be used for exact or approximate inference.
|
|
319
|
+
|
|
320
|
+
:param num_tasks: Number of tasks.
|
|
321
|
+
:param noise: Known observation noise (variance) for each training example.
|
|
322
|
+
:type noise: torch.Tensor (... x N)
|
|
323
|
+
:param rank: The rank of the task noise covariance matrix to fit. If `rank`
|
|
324
|
+
is set to 0, then a diagonal covariance matrix is fit.
|
|
325
|
+
:param learn_additional_noise: Set to true if you additionally want to
|
|
326
|
+
learn added diagonal noise, similar to QExponentialLikelihood.
|
|
327
|
+
:type learn_additional_noise: bool, optional
|
|
328
|
+
:param batch_shape: The batch shape of the learned noise parameter (default
|
|
329
|
+
[]) if :obj:`learn_additional_noise=True`.
|
|
330
|
+
:type batch_shape: torch.Size, optional
|
|
331
|
+
:param kwargs: power (default: 2.0), miu (default: False).
|
|
332
|
+
|
|
333
|
+
:var torch.Tensor noise: :math:`\sigma^2` parameter (noise)
|
|
334
|
+
|
|
335
|
+
.. note::
|
|
336
|
+
MultitaskFixedNoiseQExponentialLikelihood has an analytic marginal distribution.
|
|
337
|
+
|
|
338
|
+
Example:
|
|
339
|
+
>>> num_tasks = 2
|
|
340
|
+
>>> train_x = torch.randn(55, 2)
|
|
341
|
+
>>> noises = torch.ones(55) * 0.01
|
|
342
|
+
>>> likelihood = MultitaskFixedNoiseQExponentialLikelihood(num_tasks=num_tasks, noise=noises, learn_additional_noise=True)
|
|
343
|
+
>>> pred_y = likelihood(qep_model(train_x))
|
|
344
|
+
>>>
|
|
345
|
+
>>> test_x = torch.randn(21, 2)
|
|
346
|
+
>>> test_noises = torch.ones(21) * 0.02
|
|
347
|
+
>>> pred_y = likelihood(qep_model(test_x), noise=test_noises)
|
|
348
|
+
"""
|
|
349
|
+
|
|
350
|
+
def __init__(
|
|
351
|
+
self,
|
|
352
|
+
num_tasks: int,
|
|
353
|
+
noise: Tensor,
|
|
354
|
+
rank: int = 0,
|
|
355
|
+
learn_additional_noise: Optional[bool] = False,
|
|
356
|
+
batch_shape: Optional[torch.Size] = torch.Size(),
|
|
357
|
+
**kwargs: Any,
|
|
358
|
+
) -> None:
|
|
359
|
+
super().__init__(num_tasks=num_tasks, noise_covar=FixedNoise(noise=noise), rank=rank, batch_shape=batch_shape, **kwargs)
|
|
360
|
+
|
|
361
|
+
self.second_noise_covar: Optional[MultitaskHomoskedasticNoise] = None
|
|
362
|
+
if learn_additional_noise:
|
|
363
|
+
noise_prior = kwargs.get("noise_prior", None)
|
|
364
|
+
noise_constraint = kwargs.get("noise_constraint", None)
|
|
365
|
+
self.second_noise_covar = MultitaskHomoskedasticNoise(
|
|
366
|
+
num_tasks=1, noise_prior=noise_prior, noise_constraint=noise_constraint, batch_shape=batch_shape
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
@property
|
|
370
|
+
def noise(self) -> Tensor:
|
|
371
|
+
return self.noise_covar.noise + self.second_noise
|
|
372
|
+
|
|
373
|
+
@noise.setter
|
|
374
|
+
def noise(self, value: Tensor) -> None:
|
|
375
|
+
self.noise_covar.initialize(noise=value)
|
|
376
|
+
|
|
377
|
+
@property
|
|
378
|
+
def second_noise(self) -> Union[float, Tensor]:
|
|
379
|
+
if self.second_noise_covar is None:
|
|
380
|
+
return 0.0
|
|
381
|
+
else:
|
|
382
|
+
return self.second_noise_covar.noise
|
|
383
|
+
|
|
384
|
+
@second_noise.setter
|
|
385
|
+
def second_noise(self, value: Tensor) -> None:
|
|
386
|
+
if self.second_noise_covar is None:
|
|
387
|
+
raise RuntimeError(
|
|
388
|
+
"Attempting to set secondary learned noise for MultitaskFixedNoiseQExponentialLikelihood, "
|
|
389
|
+
"but learn_additional_noise must have been False!"
|
|
390
|
+
)
|
|
391
|
+
self.second_noise_covar.initialize(noise=value)
|
|
392
|
+
|
|
393
|
+
def get_fantasy_likelihood(self, **kwargs: Any) -> "MultitaskFixedNoiseQExponentialLikelihood":
|
|
394
|
+
if "noise" not in kwargs:
|
|
395
|
+
raise RuntimeError("MultitaskFixedNoiseQExponentialLikelihood.fantasize requires a `noise` kwarg")
|
|
396
|
+
old_noise_covar = self.noise_covar
|
|
397
|
+
self.noise_covar = None # pyre-fixme[8]
|
|
398
|
+
fantasy_liklihood = deepcopy(self)
|
|
399
|
+
self.noise_covar = old_noise_covar
|
|
400
|
+
|
|
401
|
+
old_noise = old_noise_covar.noise
|
|
402
|
+
new_noise = kwargs.get("noise")
|
|
403
|
+
if old_noise.dim() != new_noise.dim():
|
|
404
|
+
old_noise = old_noise.expand(*new_noise.shape[:-1], old_noise.shape[-1])
|
|
405
|
+
fantasy_liklihood.noise_covar = FixedNoise(noise=torch.cat([old_noise, new_noise], -1))
|
|
406
|
+
return fantasy_liklihood
|
|
407
|
+
|
|
408
|
+
def _shaped_noise_covar(self, base_shape: torch.Size, *params: Any, **kwargs: Any) -> Union[Tensor, LinearOperator]:
|
|
409
|
+
if len(params) > 0:
|
|
410
|
+
# we can infer the shape from the params
|
|
411
|
+
shape = None
|
|
412
|
+
else:
|
|
413
|
+
# here shape[:-1] is the batch shape requested, and shape[-1] is `n`, the number of points
|
|
414
|
+
shape = base_shape[:-2]+base_shape[-2:][::-1]
|
|
415
|
+
|
|
416
|
+
res = self.noise_covar(*params, shape=shape, **kwargs)
|
|
417
|
+
|
|
418
|
+
if self.second_noise_covar is not None:
|
|
419
|
+
res = res + self.second_noise_covar(*params, shape=shape, **kwargs)
|
|
420
|
+
elif isinstance(res, ZeroLinearOperator):
|
|
421
|
+
warnings.warn(
|
|
422
|
+
"You have passed data through a FixedNoiseQExponentialLikelihood that did not match the size "
|
|
423
|
+
"of the fixed noise, *and* you did not specify noise. This is treated as a no-op.",
|
|
424
|
+
QEPInputWarning,
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
return BlockDiagLinearOperator(res)
|
|
428
|
+
|
|
429
|
+
def marginal(self, function_dist: MultitaskMultivariateQExponential, *args: Any, **kwargs: Any) -> MultitaskMultivariateQExponential:
|
|
430
|
+
r"""
|
|
431
|
+
:return: Analytic marginal :math:`p(\mathbf y)`.
|
|
432
|
+
"""
|
|
433
|
+
return super().marginal(function_dist, *args, **kwargs)
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
class MultitaskQExponentialDirichletClassificationLikelihood(MultitaskFixedNoiseQExponentialLikelihood):
|
|
437
|
+
r"""
|
|
438
|
+
A multi-classification likelihood that treats the labels as regression targets with fixed heteroscedastic noise.
|
|
439
|
+
From Milios et al, NeurIPS, 2018 [https://arxiv.org/abs/1805.10915].
|
|
440
|
+
|
|
441
|
+
.. note::
|
|
442
|
+
This multitask likelihood can be used for exact or approximate inference and in deep models.
|
|
443
|
+
|
|
444
|
+
:param targets: (... x N) Classification labels.
|
|
445
|
+
:param alpha_epsilon: Tuning parameter for the scaling of the likeihood targets. We'd suggest 0.01 or setting
|
|
446
|
+
via cross-validation.
|
|
447
|
+
:param learn_additional_noise: Set to true if you additionally want to
|
|
448
|
+
learn added diagonal noise, similar to QExponentialLikelihood.
|
|
449
|
+
:param batch_shape: The batch shape of the learned noise parameter (default
|
|
450
|
+
[]) if :obj:`learn_additional_noise=True`.
|
|
451
|
+
:param kwargs: power (default: 2.0), miu (default: False).
|
|
452
|
+
|
|
453
|
+
:ivar torch.Tensor noise: :math:`\sigma^2` parameter (noise)
|
|
454
|
+
|
|
455
|
+
.. note::
|
|
456
|
+
MultitaskDirichletClassificationLikelihood has an analytic marginal distribution.
|
|
457
|
+
|
|
458
|
+
Example:
|
|
459
|
+
>>> train_x = torch.randn(55, 1)
|
|
460
|
+
>>> labels = torch.round(train_x).long()
|
|
461
|
+
>>> likelihood = MultitaskDirichletClassificationLikelihood(targets=labels, learn_additional_noise=True)
|
|
462
|
+
>>> pred_y = likelihood(qep_model(train_x))
|
|
463
|
+
>>>
|
|
464
|
+
>>> test_x = torch.randn(21, 1)
|
|
465
|
+
>>> test_labels = torch.round(test_x).long()
|
|
466
|
+
>>> pred_y = likelihood(qep_model(test_x), targets=labels)
|
|
467
|
+
"""
|
|
468
|
+
|
|
469
|
+
def _prepare_targets(
|
|
470
|
+
self, targets: Tensor, num_classes: Optional = None, alpha_epsilon: float = 0.01, dtype: torch.dtype = torch.float
|
|
471
|
+
) -> Tuple[Tensor, Tensor, int]:
|
|
472
|
+
if num_classes is None: num_classes = int(targets.max() + 1)
|
|
473
|
+
# set alpha = \alpha_\epsilon
|
|
474
|
+
alpha = alpha_epsilon * torch.ones(targets.shape[-1], num_classes, device=targets.device, dtype=dtype)
|
|
475
|
+
|
|
476
|
+
# alpha[class_labels] = 1 + \alpha_\epsilon
|
|
477
|
+
alpha[torch.arange(len(targets)), targets] = alpha[torch.arange(len(targets)), targets] + 1.0
|
|
478
|
+
|
|
479
|
+
# sigma^2 = log(1 / alpha + 1)
|
|
480
|
+
sigma2_i = torch.log(alpha.reciprocal() + 1.0)
|
|
481
|
+
|
|
482
|
+
# y = log(alpha) - 0.5 * sigma^2
|
|
483
|
+
transformed_targets = alpha.log() - 0.5 * sigma2_i
|
|
484
|
+
|
|
485
|
+
return sigma2_i.transpose(-2, -1).type(dtype), transformed_targets.type(dtype), num_classes
|
|
486
|
+
|
|
487
|
+
def __init__(
|
|
488
|
+
self,
|
|
489
|
+
targets: Tensor,
|
|
490
|
+
alpha_epsilon: float = 0.01,
|
|
491
|
+
learn_additional_noise: Optional[bool] = False,
|
|
492
|
+
batch_shape: torch.Size = torch.Size(),
|
|
493
|
+
dtype: torch.dtype = torch.float,
|
|
494
|
+
**kwargs: Any,
|
|
495
|
+
) -> None:
|
|
496
|
+
sigma2_labels, transformed_targets, num_classes = self._prepare_targets(
|
|
497
|
+
targets, alpha_epsilon=alpha_epsilon, dtype=dtype
|
|
498
|
+
)
|
|
499
|
+
super().__init__(
|
|
500
|
+
num_tasks=num_classes,
|
|
501
|
+
noise=sigma2_labels,
|
|
502
|
+
learn_additional_noise=learn_additional_noise,
|
|
503
|
+
batch_shape=torch.Size((num_classes,)),
|
|
504
|
+
**kwargs,
|
|
505
|
+
)
|
|
506
|
+
self.transformed_targets: Tensor = transformed_targets.transpose(-2, -1)
|
|
507
|
+
self.num_classes: int = num_classes
|
|
508
|
+
self.targets: Tensor = targets
|
|
509
|
+
self.alpha_epsilon: float = alpha_epsilon
|
|
510
|
+
|
|
511
|
+
def get_fantasy_likelihood(self, **kwargs: Any) -> "MultitaskDirichletClassificationLikelihood":
|
|
512
|
+
# we assume that the number of classes does not change.
|
|
513
|
+
|
|
514
|
+
if "targets" not in kwargs:
|
|
515
|
+
raise RuntimeError("FixedNoiseQExponentialLikelihood.fantasize requires a `targets` kwarg")
|
|
516
|
+
|
|
517
|
+
old_noise_covar = self.noise_covar
|
|
518
|
+
self.noise_covar = None # pyre-fixme[8]
|
|
519
|
+
fantasy_liklihood = deepcopy(self)
|
|
520
|
+
self.noise_covar = old_noise_covar
|
|
521
|
+
|
|
522
|
+
old_noise = old_noise_covar.noise
|
|
523
|
+
new_targets = kwargs.get("noise")
|
|
524
|
+
new_noise, new_targets, _ = fantasy_liklihood._prepare_targets(new_targets, self.alpha_epsilon)
|
|
525
|
+
fantasy_liklihood.targets = torch.cat([fantasy_liklihood.targets, new_targets], -1)
|
|
526
|
+
|
|
527
|
+
if old_noise.dim() != new_noise.dim():
|
|
528
|
+
old_noise = old_noise.expand(*new_noise.shape[:-1], old_noise.shape[-1])
|
|
529
|
+
|
|
530
|
+
fantasy_liklihood.noise_covar = FixedNoise(noise=torch.cat([old_noise, new_noise], -1))
|
|
531
|
+
return fantasy_liklihood
|
|
532
|
+
|
|
533
|
+
def marginal(self, function_dist: MultitaskMultivariateQExponential, *args: Any, **kwargs: Any) -> MultitaskMultivariateQExponential:
|
|
534
|
+
r"""
|
|
535
|
+
:return: Analytic marginal :math:`p(\mathbf y)`.
|
|
536
|
+
"""
|
|
537
|
+
return super().marginal(function_dist, *args, **kwargs)
|
|
538
|
+
|
|
539
|
+
def __call__(self, input: Union[Tensor, MultitaskMultivariateQExponential], *args: Any, **kwargs: Any) -> Distribution:
|
|
540
|
+
if "targets" in kwargs:
|
|
541
|
+
targets = kwargs.pop("targets")
|
|
542
|
+
dtype = self.transformed_targets.dtype
|
|
543
|
+
new_noise, _, _ = self._prepare_targets(targets, dtype=dtype)
|
|
544
|
+
kwargs["noise"] = new_noise
|
|
545
|
+
return super().__call__(input, *args, **kwargs)
|