qpytorch 0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of qpytorch might be problematic. Click here for more details.
- qpytorch/__init__.py +327 -0
- qpytorch/constraints/__init__.py +3 -0
- qpytorch/distributions/__init__.py +21 -0
- qpytorch/distributions/delta.py +86 -0
- qpytorch/distributions/multitask_multivariate_qexponential.py +435 -0
- qpytorch/distributions/multivariate_qexponential.py +581 -0
- qpytorch/distributions/power.py +113 -0
- qpytorch/distributions/qexponential.py +153 -0
- qpytorch/functions/__init__.py +58 -0
- qpytorch/kernels/__init__.py +80 -0
- qpytorch/kernels/grid_interpolation_kernel.py +213 -0
- qpytorch/kernels/inducing_point_kernel.py +151 -0
- qpytorch/kernels/kernel.py +695 -0
- qpytorch/kernels/matern32_kernel_grad.py +155 -0
- qpytorch/kernels/matern52_kernel_grad.py +194 -0
- qpytorch/kernels/matern52_kernel_gradgrad.py +248 -0
- qpytorch/kernels/polynomial_kernel_grad.py +88 -0
- qpytorch/kernels/qexponential_symmetrized_kl_kernel.py +61 -0
- qpytorch/kernels/rbf_kernel_grad.py +125 -0
- qpytorch/kernels/rbf_kernel_gradgrad.py +186 -0
- qpytorch/kernels/rff_kernel.py +153 -0
- qpytorch/lazy/__init__.py +9 -0
- qpytorch/likelihoods/__init__.py +66 -0
- qpytorch/likelihoods/bernoulli_likelihood.py +75 -0
- qpytorch/likelihoods/beta_likelihood.py +76 -0
- qpytorch/likelihoods/gaussian_likelihood.py +472 -0
- qpytorch/likelihoods/laplace_likelihood.py +59 -0
- qpytorch/likelihoods/likelihood.py +437 -0
- qpytorch/likelihoods/likelihood_list.py +60 -0
- qpytorch/likelihoods/multitask_gaussian_likelihood.py +542 -0
- qpytorch/likelihoods/multitask_qexponential_likelihood.py +545 -0
- qpytorch/likelihoods/noise_models.py +184 -0
- qpytorch/likelihoods/qexponential_likelihood.py +494 -0
- qpytorch/likelihoods/softmax_likelihood.py +97 -0
- qpytorch/likelihoods/student_t_likelihood.py +90 -0
- qpytorch/means/__init__.py +23 -0
- qpytorch/metrics/__init__.py +17 -0
- qpytorch/mlls/__init__.py +53 -0
- qpytorch/mlls/_approximate_mll.py +79 -0
- qpytorch/mlls/deep_approximate_mll.py +30 -0
- qpytorch/mlls/deep_predictive_log_likelihood.py +32 -0
- qpytorch/mlls/exact_marginal_log_likelihood.py +96 -0
- qpytorch/mlls/gamma_robust_variational_elbo.py +106 -0
- qpytorch/mlls/inducing_point_kernel_added_loss_term.py +69 -0
- qpytorch/mlls/kl_qexponential_added_loss_term.py +41 -0
- qpytorch/mlls/leave_one_out_pseudo_likelihood.py +73 -0
- qpytorch/mlls/marginal_log_likelihood.py +48 -0
- qpytorch/mlls/predictive_log_likelihood.py +76 -0
- qpytorch/mlls/sum_marginal_log_likelihood.py +40 -0
- qpytorch/mlls/variational_elbo.py +77 -0
- qpytorch/models/__init__.py +72 -0
- qpytorch/models/approximate_qep.py +115 -0
- qpytorch/models/deep_qeps/__init__.py +22 -0
- qpytorch/models/deep_qeps/deep_qep.py +155 -0
- qpytorch/models/deep_qeps/dspp.py +114 -0
- qpytorch/models/exact_prediction_strategies.py +880 -0
- qpytorch/models/exact_qep.py +349 -0
- qpytorch/models/model_list.py +100 -0
- qpytorch/models/pyro/__init__.py +28 -0
- qpytorch/models/pyro/_pyro_mixin.py +57 -0
- qpytorch/models/pyro/distributions/__init__.py +5 -0
- qpytorch/models/pyro/pyro_qep.py +105 -0
- qpytorch/models/qep.py +7 -0
- qpytorch/models/qeplvm/__init__.py +6 -0
- qpytorch/models/qeplvm/bayesian_qeplvm.py +40 -0
- qpytorch/models/qeplvm/latent_variable.py +102 -0
- qpytorch/module.py +30 -0
- qpytorch/optim/__init__.py +5 -0
- qpytorch/priors/__init__.py +42 -0
- qpytorch/priors/qep_priors.py +81 -0
- qpytorch/test/__init__.py +22 -0
- qpytorch/test/base_likelihood_test_case.py +106 -0
- qpytorch/test/model_test_case.py +150 -0
- qpytorch/test/variational_test_case.py +400 -0
- qpytorch/utils/__init__.py +38 -0
- qpytorch/utils/warnings.py +37 -0
- qpytorch/variational/__init__.py +47 -0
- qpytorch/variational/_variational_distribution.py +61 -0
- qpytorch/variational/_variational_strategy.py +391 -0
- qpytorch/variational/additive_grid_interpolation_variational_strategy.py +90 -0
- qpytorch/variational/batch_decoupled_variational_strategy.py +256 -0
- qpytorch/variational/cholesky_variational_distribution.py +65 -0
- qpytorch/variational/ciq_variational_strategy.py +352 -0
- qpytorch/variational/delta_variational_distribution.py +41 -0
- qpytorch/variational/grid_interpolation_variational_strategy.py +113 -0
- qpytorch/variational/independent_multitask_variational_strategy.py +114 -0
- qpytorch/variational/lmc_variational_strategy.py +248 -0
- qpytorch/variational/mean_field_variational_distribution.py +58 -0
- qpytorch/variational/multitask_variational_strategy.py +317 -0
- qpytorch/variational/natural_variational_distribution.py +152 -0
- qpytorch/variational/nearest_neighbor_variational_strategy.py +487 -0
- qpytorch/variational/orthogonally_decoupled_variational_strategy.py +128 -0
- qpytorch/variational/tril_natural_variational_distribution.py +130 -0
- qpytorch/variational/uncorrelated_multitask_variational_strategy.py +114 -0
- qpytorch/variational/unwhitened_variational_strategy.py +225 -0
- qpytorch/variational/variational_strategy.py +280 -0
- qpytorch/version.py +4 -0
- qpytorch-0.1.dist-info/LICENSE +21 -0
- qpytorch-0.1.dist-info/METADATA +177 -0
- qpytorch-0.1.dist-info/RECORD +102 -0
- qpytorch-0.1.dist-info/WHEEL +5 -0
- qpytorch-0.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
|
|
3
|
+
import warnings
|
|
4
|
+
from typing import Any, Optional, Union
|
|
5
|
+
|
|
6
|
+
import torch
|
|
7
|
+
from linear_operator.operators import ConstantDiagLinearOperator, DiagLinearOperator, LinearOperator, ZeroLinearOperator
|
|
8
|
+
from torch import Tensor
|
|
9
|
+
from torch.nn import Parameter
|
|
10
|
+
|
|
11
|
+
from .. import settings
|
|
12
|
+
from ..constraints import GreaterThan
|
|
13
|
+
from ..distributions import MultivariateNormal, MultivariateQExponential
|
|
14
|
+
from ..module import Module
|
|
15
|
+
from ..utils.warnings import NumericalWarning
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class Noise(Module):
|
|
19
|
+
def __call__(
|
|
20
|
+
self, *params: Any, shape: Optional[torch.Size] = None, **kwargs: Any
|
|
21
|
+
) -> Union[Tensor, LinearOperator]:
|
|
22
|
+
# For corredct typing
|
|
23
|
+
return super().__call__(*params, shape=shape, **kwargs)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class _HomoskedasticNoiseBase(Noise):
|
|
27
|
+
def __init__(self, noise_prior=None, noise_constraint=None, batch_shape=torch.Size(), num_tasks=1):
|
|
28
|
+
super().__init__()
|
|
29
|
+
if noise_constraint is None:
|
|
30
|
+
noise_constraint = GreaterThan(1e-4)
|
|
31
|
+
|
|
32
|
+
self.register_parameter(name="raw_noise", parameter=Parameter(torch.zeros(*batch_shape, num_tasks)))
|
|
33
|
+
if noise_prior is not None:
|
|
34
|
+
self.register_prior("noise_prior", noise_prior, self._noise_param, self._noise_closure)
|
|
35
|
+
|
|
36
|
+
self.register_constraint("raw_noise", noise_constraint)
|
|
37
|
+
|
|
38
|
+
def _noise_param(self, m):
|
|
39
|
+
return m.noise
|
|
40
|
+
|
|
41
|
+
def _noise_closure(self, m, v):
|
|
42
|
+
return m._set_noise(v)
|
|
43
|
+
|
|
44
|
+
@property
|
|
45
|
+
def noise(self):
|
|
46
|
+
return self.raw_noise_constraint.transform(self.raw_noise)
|
|
47
|
+
|
|
48
|
+
@noise.setter
|
|
49
|
+
def noise(self, value: Tensor) -> None:
|
|
50
|
+
self._set_noise(value)
|
|
51
|
+
|
|
52
|
+
def _set_noise(self, value: Tensor) -> None:
|
|
53
|
+
if not torch.is_tensor(value):
|
|
54
|
+
value = torch.as_tensor(value).to(self.raw_noise)
|
|
55
|
+
self.initialize(raw_noise=self.raw_noise_constraint.inverse_transform(value))
|
|
56
|
+
|
|
57
|
+
def forward(self, *params: Any, shape: Optional[torch.Size] = None, **kwargs: Any) -> DiagLinearOperator:
|
|
58
|
+
"""In the homoskedastic case, the parameters are only used to infer the required shape.
|
|
59
|
+
Here are the possible scenarios:
|
|
60
|
+
- non-batched noise, non-batched input, non-MT -> noise_diag shape is `n`
|
|
61
|
+
- non-batched noise, non-batched input, MT -> noise_diag shape is `nt`
|
|
62
|
+
- non-batched noise, batched input, non-MT -> noise_diag shape is `b x n` with b' the broadcasted batch shape
|
|
63
|
+
- non-batched noise, batched input, MT -> noise_diag shape is `b x nt`
|
|
64
|
+
- batched noise, non-batched input, non-MT -> noise_diag shape is `b x n`
|
|
65
|
+
- batched noise, non-batched input, MT -> noise_diag shape is `b x nt`
|
|
66
|
+
- batched noise, batched input, non-MT -> noise_diag shape is `b' x n`
|
|
67
|
+
- batched noise, batched input, MT -> noise_diag shape is `b' x nt`
|
|
68
|
+
where `n` is the number of evaluation points and `t` is the number of tasks (i.e. `num_tasks` of self.noise).
|
|
69
|
+
So bascially the shape is always `b' x nt`, with `b'` appropriately broadcast from the noise parameter and
|
|
70
|
+
input batch shapes. `n` and the input batch shape are determined either from the shape arg or from the params
|
|
71
|
+
input. For this it is sufficient to take in a single `shape` arg, with the convention that shape[:-1] is the
|
|
72
|
+
batch shape of the input, and shape[-1] is `n`.
|
|
73
|
+
|
|
74
|
+
If a "noise" kwarg (a Tensor) is provided, this noise is used directly.
|
|
75
|
+
"""
|
|
76
|
+
if "noise" in kwargs:
|
|
77
|
+
return DiagLinearOperator(kwargs.get("noise"))
|
|
78
|
+
if shape is None:
|
|
79
|
+
p = params[0] if torch.is_tensor(params[0]) else params[0][0]
|
|
80
|
+
shape = p.shape if len(p.shape) == 1 else p.shape[:-1]
|
|
81
|
+
noise = self.noise
|
|
82
|
+
*batch_shape, n = shape
|
|
83
|
+
noise_batch_shape = noise.shape[:-1] if noise.dim() > 1 else torch.Size()
|
|
84
|
+
num_tasks = noise.shape[-1]
|
|
85
|
+
batch_shape = torch.broadcast_shapes(noise_batch_shape, batch_shape)
|
|
86
|
+
noise = noise.unsqueeze(-2)
|
|
87
|
+
noise_diag = noise.expand(*batch_shape, 1, num_tasks).contiguous()
|
|
88
|
+
if num_tasks == 1:
|
|
89
|
+
noise_diag = noise_diag.view(*batch_shape, 1)
|
|
90
|
+
if noise_diag.shape[-1] != 1:
|
|
91
|
+
noise_diag = noise_diag.unsqueeze(-1)
|
|
92
|
+
return ConstantDiagLinearOperator(noise_diag, diag_shape=n)
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class HomoskedasticNoise(_HomoskedasticNoiseBase):
|
|
96
|
+
def __init__(self, noise_prior=None, noise_constraint=None, batch_shape=torch.Size()):
|
|
97
|
+
super().__init__(
|
|
98
|
+
noise_prior=noise_prior, noise_constraint=noise_constraint, batch_shape=batch_shape, num_tasks=1
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class MultitaskHomoskedasticNoise(_HomoskedasticNoiseBase):
|
|
103
|
+
def __init__(self, num_tasks, noise_prior=None, noise_constraint=None, batch_shape=torch.Size()):
|
|
104
|
+
super().__init__(
|
|
105
|
+
noise_prior=noise_prior, noise_constraint=noise_constraint, batch_shape=batch_shape, num_tasks=num_tasks
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class HeteroskedasticNoise(Noise):
|
|
110
|
+
def __init__(self, noise_model, noise_indices=None, noise_constraint=None):
|
|
111
|
+
if noise_constraint is None:
|
|
112
|
+
noise_constraint = GreaterThan(1e-4)
|
|
113
|
+
super().__init__()
|
|
114
|
+
self.noise_model = noise_model
|
|
115
|
+
self._noise_constraint = noise_constraint
|
|
116
|
+
self._noise_indices = noise_indices
|
|
117
|
+
|
|
118
|
+
def forward(
|
|
119
|
+
self,
|
|
120
|
+
*params: Any,
|
|
121
|
+
batch_shape: Optional[torch.Size] = None,
|
|
122
|
+
shape: Optional[torch.Size] = None,
|
|
123
|
+
noise: Optional[Tensor] = None,
|
|
124
|
+
) -> DiagLinearOperator:
|
|
125
|
+
if noise is not None:
|
|
126
|
+
return DiagLinearOperator(noise)
|
|
127
|
+
training = self.noise_model.training # keep track of mode
|
|
128
|
+
try:
|
|
129
|
+
self.noise_model.eval() # we want the posterior prediction of the noise model
|
|
130
|
+
with settings.detach_test_caches(False), settings.debug(False):
|
|
131
|
+
if len(params) == 1 and not torch.is_tensor(params[0]):
|
|
132
|
+
output = self.noise_model(*params[0])
|
|
133
|
+
else:
|
|
134
|
+
output = self.noise_model(*params)
|
|
135
|
+
finally:
|
|
136
|
+
self.noise_model.train(training)
|
|
137
|
+
if not isinstance(output, (MultivariateNormal, MultivariateQExponential)):
|
|
138
|
+
raise NotImplementedError("Currently only noise models that return a MultivariateNormal or MultivariateQExponential are supported")
|
|
139
|
+
# note: this also works with MultitaskMultivariateNormal (MultitaskMultivariateQExponential), where this
|
|
140
|
+
# will return a batched DiagLinearOperators of size n x num_tasks x num_tasks
|
|
141
|
+
noise_diag = output.mean if self._noise_indices is None else output.mean[..., self._noise_indices]
|
|
142
|
+
return DiagLinearOperator(self._noise_constraint.transform(noise_diag))
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class FixedNoise(Module):
|
|
146
|
+
def __init__(self, noise: Tensor) -> None:
|
|
147
|
+
super().__init__()
|
|
148
|
+
min_noise = settings.min_fixed_noise.value(noise.dtype)
|
|
149
|
+
if noise.lt(min_noise).any():
|
|
150
|
+
warnings.warn(
|
|
151
|
+
"Very small noise values detected. This will likely "
|
|
152
|
+
"lead to numerical instabilities. Rounding small noise "
|
|
153
|
+
f"values up to {min_noise}.",
|
|
154
|
+
NumericalWarning,
|
|
155
|
+
)
|
|
156
|
+
noise = noise.clamp_min(min_noise)
|
|
157
|
+
self.noise = noise
|
|
158
|
+
|
|
159
|
+
def forward(
|
|
160
|
+
self, *params: Any, shape: Optional[torch.Size] = None, noise: Optional[Tensor] = None, **kwargs: Any
|
|
161
|
+
) -> DiagLinearOperator:
|
|
162
|
+
if shape is None:
|
|
163
|
+
p = params[0] if torch.is_tensor(params[0]) else params[0][0]
|
|
164
|
+
shape = p.shape if len(p.shape) == 1 else p.shape[:-1]
|
|
165
|
+
|
|
166
|
+
if noise is not None:
|
|
167
|
+
return DiagLinearOperator(noise)
|
|
168
|
+
elif shape[-1] == self.noise.shape[-1]:
|
|
169
|
+
return DiagLinearOperator(self.noise)
|
|
170
|
+
else:
|
|
171
|
+
return ZeroLinearOperator()
|
|
172
|
+
|
|
173
|
+
def _apply(self, fn):
|
|
174
|
+
self.noise = fn(self.noise)
|
|
175
|
+
return super(FixedNoise, self)._apply(fn)
|
|
176
|
+
|
|
177
|
+
def __call__(
|
|
178
|
+
self, *params: Any, shape: Optional[torch.Size] = None, **kwargs: Any
|
|
179
|
+
) -> Union[Tensor, LinearOperator]:
|
|
180
|
+
# For corredct typing
|
|
181
|
+
return super().__call__(*params, shape=shape, **kwargs)
|
|
182
|
+
|
|
183
|
+
class FixedGaussianNoise(FixedNoise): # alias to a more general name
|
|
184
|
+
pass
|
|
@@ -0,0 +1,494 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
import math
|
|
3
|
+
import warnings
|
|
4
|
+
from copy import deepcopy
|
|
5
|
+
from typing import Any, Optional, Tuple, Union
|
|
6
|
+
|
|
7
|
+
import torch
|
|
8
|
+
from linear_operator.operators import LinearOperator, MaskedLinearOperator, ZeroLinearOperator, DiagLinearOperator
|
|
9
|
+
from torch import Tensor
|
|
10
|
+
from torch.distributions import Distribution
|
|
11
|
+
|
|
12
|
+
from .. import settings
|
|
13
|
+
from ..constraints import Interval
|
|
14
|
+
from ..distributions import base_distributions, QExponential, MultivariateQExponential
|
|
15
|
+
from ..priors import Prior
|
|
16
|
+
from ..utils.warnings import QEPInputWarning
|
|
17
|
+
from .likelihood import Likelihood
|
|
18
|
+
from .noise_models import FixedNoise, HomoskedasticNoise, Noise
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class _QExponentialLikelihoodBase(Likelihood):
|
|
22
|
+
"""Base class for QExponential Likelihoods, supporting general heteroskedastic noise models."""
|
|
23
|
+
|
|
24
|
+
has_analytic_marginal = True
|
|
25
|
+
|
|
26
|
+
def __init__(self, noise_covar: Union[Noise, FixedNoise], **kwargs: Any) -> None:
|
|
27
|
+
super().__init__()
|
|
28
|
+
param_transform = kwargs.get("param_transform")
|
|
29
|
+
if param_transform is not None:
|
|
30
|
+
warnings.warn(
|
|
31
|
+
"The 'param_transform' argument is now deprecated. If you want to use a different "
|
|
32
|
+
"transformaton, specify a different 'noise_constraint' instead.",
|
|
33
|
+
DeprecationWarning,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
self.noise_covar = noise_covar
|
|
37
|
+
self.power = kwargs.pop('power', torch.tensor(2.0))
|
|
38
|
+
self.miu = kwargs.pop('miu', False) # marginally identical but uncorrelated
|
|
39
|
+
|
|
40
|
+
def _shaped_noise_covar(self, base_shape: torch.Size, *params: Any, **kwargs: Any) -> Union[Tensor, LinearOperator]:
|
|
41
|
+
return self.noise_covar(*params, shape=base_shape, **kwargs)
|
|
42
|
+
|
|
43
|
+
def expected_log_prob(self, target: Tensor, input: MultivariateQExponential, *params: Any, **kwargs: Any) -> Tensor:
|
|
44
|
+
|
|
45
|
+
noise = self._shaped_noise_covar(input.mean.shape, *params, **kwargs).diagonal(dim1=-1, dim2=-2)
|
|
46
|
+
# Potentially reshape the noise to deal with the multitask case
|
|
47
|
+
noise = noise.view(*noise.shape[:-1], *input.event_shape)
|
|
48
|
+
|
|
49
|
+
# Handle NaN values if enabled
|
|
50
|
+
nan_policy = settings.observation_nan_policy.value()
|
|
51
|
+
if nan_policy == "mask":
|
|
52
|
+
observed = settings.observation_nan_policy._get_observed(target, input.event_shape)
|
|
53
|
+
input = MultivariateQExponential(
|
|
54
|
+
mean=input.mean[..., observed],
|
|
55
|
+
covariance_matrix=MaskedLinearOperator(
|
|
56
|
+
input.lazy_covariance_matrix, observed.reshape(-1), observed.reshape(-1)
|
|
57
|
+
),
|
|
58
|
+
power=input.power
|
|
59
|
+
)
|
|
60
|
+
noise = noise[..., observed]
|
|
61
|
+
target = target[..., observed]
|
|
62
|
+
elif nan_policy == "fill":
|
|
63
|
+
missing = torch.isnan(target)
|
|
64
|
+
target = settings.observation_nan_policy._fill_tensor(target)
|
|
65
|
+
|
|
66
|
+
mean, variance, power = input.mean, input.variance, self.power
|
|
67
|
+
trace_plus_inv_quad_form = ((target - mean).square() + variance) / noise # <r>
|
|
68
|
+
res = noise.log() + math.log(2 * math.pi) #+ trace_plus_inv_quad_form**(power/2.)
|
|
69
|
+
# res = res.mul(-0.5)
|
|
70
|
+
# if power!=2: res += 0.5 * (power/2.-1) * trace_plus_inv_quad_form.log() + torch.log(power/2.) # exact value is intractable; only lower bound is provided.
|
|
71
|
+
|
|
72
|
+
if nan_policy == "fill":
|
|
73
|
+
# res = res * ~missing
|
|
74
|
+
trace_plus_inv_quad_form = trace_plus_inv_quad_form * ~missing
|
|
75
|
+
|
|
76
|
+
# Do appropriate summation for multitask QExponential likelihoods
|
|
77
|
+
num_event_dim = len(input.event_shape)
|
|
78
|
+
if num_event_dim > 1 and self.miu:
|
|
79
|
+
res = res.sum(list(range(-1, -num_event_dim, -1)))
|
|
80
|
+
trace_plus_inv_quad_form = trace_plus_inv_quad_form.sum(list(range(-1, -num_event_dim, -1)))
|
|
81
|
+
|
|
82
|
+
res = res + trace_plus_inv_quad_form**(power/2.)
|
|
83
|
+
res = res.mul(-0.5)
|
|
84
|
+
if power!=2: res += 0.5 * (power/2.-1) * trace_plus_inv_quad_form.log() * (torch.tensor(input.event_shape[-1:-num_event_dim:-1]).prod() if self.miu else 1.0) + torch.log(power/2.) # exact value is intractable; only lower bound is provided.
|
|
85
|
+
|
|
86
|
+
if num_event_dim > 1 and not self.miu:
|
|
87
|
+
res = res.sum(list(range(-1, -num_event_dim, -1)))
|
|
88
|
+
|
|
89
|
+
if 'reduction' in kwargs:
|
|
90
|
+
res = eval('res.'+kwargs.pop('reduction'))
|
|
91
|
+
|
|
92
|
+
return res
|
|
93
|
+
|
|
94
|
+
def forward(self, function_samples: Tensor, *params: Any, **kwargs: Any) -> QExponential:
|
|
95
|
+
noise = self._shaped_noise_covar(function_samples.shape, *params, **kwargs).diagonal(dim1=-1, dim2=-2)
|
|
96
|
+
return QExponential(function_samples, noise.sqrt(), self.power)
|
|
97
|
+
|
|
98
|
+
def log_marginal(
|
|
99
|
+
self, observations: Tensor, function_dist: MultivariateQExponential, *params: Any, **kwargs: Any
|
|
100
|
+
) -> Tensor:
|
|
101
|
+
marginal = self.marginal(function_dist, *params, **kwargs)
|
|
102
|
+
|
|
103
|
+
# Handle NaN values if enabled
|
|
104
|
+
nan_policy = settings.observation_nan_policy.value()
|
|
105
|
+
if nan_policy == "mask":
|
|
106
|
+
observed = settings.observation_nan_policy._get_observed(observations, marginal.event_shape)
|
|
107
|
+
marginal = MultivariateQExponential(
|
|
108
|
+
mean=marginal.mean[..., observed],
|
|
109
|
+
covariance_matrix=MaskedLinearOperator(
|
|
110
|
+
marginal.lazy_covariance_matrix, observed.reshape(-1), observed.reshape(-1)
|
|
111
|
+
),
|
|
112
|
+
power=marginal.power
|
|
113
|
+
)
|
|
114
|
+
observations = observations[..., observed]
|
|
115
|
+
elif nan_policy == "fill":
|
|
116
|
+
missing = torch.isnan(observations)
|
|
117
|
+
observations = settings.observation_nan_policy._fill_tensor(observations)
|
|
118
|
+
|
|
119
|
+
if self.miu:
|
|
120
|
+
if type(marginal) is MultivariateQExponential:
|
|
121
|
+
marginal = marginal.to_data_uncorrelated_dist()
|
|
122
|
+
else:
|
|
123
|
+
marginal.lazy_covariance_matrix = DiagLinearOperator(marginal.lazy_covariance_matrix.diagonal(dim1=-1, dim2=-2))
|
|
124
|
+
res = marginal.log_prob(observations)/marginal.event_shape[0]
|
|
125
|
+
else:
|
|
126
|
+
# We're making everything conditionally independent
|
|
127
|
+
indep_dist = QExponential(marginal.mean, marginal.variance.clamp_min(1e-8).sqrt(), marginal.power)
|
|
128
|
+
res = indep_dist.log_prob(observations)
|
|
129
|
+
|
|
130
|
+
if nan_policy == "fill":
|
|
131
|
+
res = res * ~missing
|
|
132
|
+
|
|
133
|
+
# Do appropriate summation for multitask QExponential likelihoods
|
|
134
|
+
num_event_dim = len(marginal.event_shape)
|
|
135
|
+
if num_event_dim > 1 and not self.miu:
|
|
136
|
+
res = res.sum(list(range(-1, -num_event_dim, -1)))
|
|
137
|
+
return res
|
|
138
|
+
|
|
139
|
+
def marginal(self, function_dist: MultivariateQExponential, *params: Any, **kwargs: Any) -> MultivariateQExponential:
|
|
140
|
+
mean, covar = function_dist.mean, function_dist.lazy_covariance_matrix
|
|
141
|
+
noise_covar = self._shaped_noise_covar(mean.shape, *params, **kwargs)
|
|
142
|
+
full_covar = covar + noise_covar
|
|
143
|
+
return function_dist.__class__(mean, full_covar, self.power)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
class QExponentialLikelihood(_QExponentialLikelihoodBase):
|
|
147
|
+
r"""
|
|
148
|
+
The standard likelihood for regression.
|
|
149
|
+
Assumes a standard homoskedastic noise model:
|
|
150
|
+
|
|
151
|
+
.. math::
|
|
152
|
+
p(y \mid f) = f + \epsilon, \quad \epsilon \sim \mathcal Q-EP (0, \sigma^2)
|
|
153
|
+
|
|
154
|
+
where :math:`\sigma^2` is a noise parameter.
|
|
155
|
+
|
|
156
|
+
.. note::
|
|
157
|
+
This likelihood can be used for exact or approximate inference.
|
|
158
|
+
|
|
159
|
+
.. note::
|
|
160
|
+
QExponentialLikelihood has an analytic marginal distribution.
|
|
161
|
+
|
|
162
|
+
:param noise_prior: Prior for noise parameter :math:`\sigma^2`.
|
|
163
|
+
:param noise_constraint: Constraint for noise parameter :math:`\sigma^2`.
|
|
164
|
+
:param batch_shape: The batch shape of the learned noise parameter (default: []).
|
|
165
|
+
:param kwargs: power (default: 2.0), miu (default: False).
|
|
166
|
+
|
|
167
|
+
:ivar torch.Tensor noise: :math:`\sigma^2` parameter (noise)
|
|
168
|
+
"""
|
|
169
|
+
|
|
170
|
+
def __init__(
|
|
171
|
+
self,
|
|
172
|
+
noise_prior: Optional[Prior] = None,
|
|
173
|
+
noise_constraint: Optional[Interval] = None,
|
|
174
|
+
batch_shape: torch.Size = torch.Size(),
|
|
175
|
+
**kwargs: Any,
|
|
176
|
+
) -> None:
|
|
177
|
+
noise_covar = HomoskedasticNoise(
|
|
178
|
+
noise_prior=noise_prior, noise_constraint=noise_constraint, batch_shape=batch_shape
|
|
179
|
+
)
|
|
180
|
+
super().__init__(noise_covar=noise_covar, **kwargs)
|
|
181
|
+
|
|
182
|
+
@property
|
|
183
|
+
def noise(self) -> Tensor:
|
|
184
|
+
return self.noise_covar.noise
|
|
185
|
+
|
|
186
|
+
@noise.setter
|
|
187
|
+
def noise(self, value: Tensor) -> None:
|
|
188
|
+
self.noise_covar.initialize(noise=value)
|
|
189
|
+
|
|
190
|
+
@property
|
|
191
|
+
def raw_noise(self) -> Tensor:
|
|
192
|
+
return self.noise_covar.raw_noise
|
|
193
|
+
|
|
194
|
+
@raw_noise.setter
|
|
195
|
+
def raw_noise(self, value: Tensor) -> None:
|
|
196
|
+
self.noise_covar.initialize(raw_noise=value)
|
|
197
|
+
|
|
198
|
+
def marginal(self, function_dist: MultivariateQExponential, *args: Any, **kwargs: Any) -> MultivariateQExponential:
|
|
199
|
+
r"""
|
|
200
|
+
:return: Analytic marginal :math:`p(\mathbf y)`.
|
|
201
|
+
"""
|
|
202
|
+
return super().marginal(function_dist, *args, **kwargs)
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
class QExponentialLikelihoodWithMissingObs(QExponentialLikelihood):
|
|
206
|
+
r"""
|
|
207
|
+
The standard likelihood for regression with support for missing values.
|
|
208
|
+
Assumes a standard homoskedastic noise model:
|
|
209
|
+
|
|
210
|
+
.. math::
|
|
211
|
+
p(y \mid f) = f + \epsilon, \quad \epsilon \sim \mathcal Q-EP (0, \sigma^2)
|
|
212
|
+
|
|
213
|
+
where :math:`\sigma^2` is a noise parameter. Values of y that are nan do
|
|
214
|
+
not impact the likelihood calculation.
|
|
215
|
+
|
|
216
|
+
.. note::
|
|
217
|
+
This likelihood can be used for exact or approximate inference.
|
|
218
|
+
|
|
219
|
+
.. warning::
|
|
220
|
+
This likelihood is deprecated in favor of :class:`gpytorch.settings.observation_nan_policy`.
|
|
221
|
+
|
|
222
|
+
:param noise_prior: Prior for noise parameter :math:`\sigma^2`.
|
|
223
|
+
:type noise_prior: ~gpytorch.priors.Prior, optional
|
|
224
|
+
:param noise_constraint: Constraint for noise parameter :math:`\sigma^2`.
|
|
225
|
+
:type noise_constraint: ~gpytorch.constraints.Interval, optional
|
|
226
|
+
:param batch_shape: The batch shape of the learned noise parameter (default: []).
|
|
227
|
+
:type batch_shape: torch.Size, optional
|
|
228
|
+
:var torch.Tensor noise: :math:`\sigma^2` parameter (noise)
|
|
229
|
+
|
|
230
|
+
.. note::
|
|
231
|
+
QExponentialLikelihoodWithMissingObs has an analytic marginal distribution.
|
|
232
|
+
"""
|
|
233
|
+
|
|
234
|
+
MISSING_VALUE_FILL: float = -999.0
|
|
235
|
+
|
|
236
|
+
def __init__(self, **kwargs: Any) -> None:
|
|
237
|
+
warnings.warn(
|
|
238
|
+
"QExponentialLikelihoodWithMissingObs is replaced by qpytorch.settings.observation_nan_policy('fill').",
|
|
239
|
+
DeprecationWarning,
|
|
240
|
+
)
|
|
241
|
+
super().__init__(**kwargs)
|
|
242
|
+
|
|
243
|
+
def _get_masked_obs(self, x: Tensor) -> Tuple[Tensor, Tensor]:
|
|
244
|
+
missing_idx = x.isnan()
|
|
245
|
+
x_masked = x.masked_fill(missing_idx, self.MISSING_VALUE_FILL)
|
|
246
|
+
return missing_idx, x_masked
|
|
247
|
+
|
|
248
|
+
def expected_log_prob(self, target: Tensor, input: MultivariateQExponential, *params: Any, **kwargs: Any) -> Tensor:
|
|
249
|
+
missing_idx, target = self._get_masked_obs(target)
|
|
250
|
+
res = super().expected_log_prob(target, input, *params, **kwargs)
|
|
251
|
+
return res * ~missing_idx
|
|
252
|
+
|
|
253
|
+
def log_marginal(
|
|
254
|
+
self, observations: Tensor, function_dist: MultivariateQExponential, *params: Any, **kwargs: Any
|
|
255
|
+
) -> Tensor:
|
|
256
|
+
missing_idx, observations = self._get_masked_obs(observations)
|
|
257
|
+
res = super().log_marginal(observations, function_dist, *params, **kwargs)
|
|
258
|
+
return res * ~missing_idx
|
|
259
|
+
|
|
260
|
+
def marginal(self, function_dist: MultivariateQExponential, *args: Any, **kwargs: Any) -> MultivariateQExponential:
|
|
261
|
+
r"""
|
|
262
|
+
:return: Analytic marginal :math:`p(\mathbf y)`.
|
|
263
|
+
"""
|
|
264
|
+
return super().marginal(function_dist, *args, **kwargs)
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
class FixedNoiseQExponentialLikelihood(_QExponentialLikelihoodBase):
|
|
268
|
+
r"""
|
|
269
|
+
A Likelihood that assumes fixed heteroscedastic noise. This is useful when you have fixed, known observation
|
|
270
|
+
noise for each training example.
|
|
271
|
+
|
|
272
|
+
Note that this likelihood takes an additional argument when you call it, `noise`, that adds a specified amount
|
|
273
|
+
of noise to the passed MultivariateQExponential. This allows for adding known observational noise to test data.
|
|
274
|
+
|
|
275
|
+
.. note::
|
|
276
|
+
This likelihood can be used for exact or approximate inference.
|
|
277
|
+
|
|
278
|
+
:param noise: Known observation noise (variance) for each training example.
|
|
279
|
+
:type noise: torch.Tensor (... x N)
|
|
280
|
+
:param learn_additional_noise: Set to true if you additionally want to
|
|
281
|
+
learn added diagonal noise, similar to QExponentialLikelihood.
|
|
282
|
+
:type learn_additional_noise: bool, optional
|
|
283
|
+
:param batch_shape: The batch shape of the learned noise parameter (default
|
|
284
|
+
[]) if :obj:`learn_additional_noise=True`.
|
|
285
|
+
:type batch_shape: torch.Size, optional
|
|
286
|
+
|
|
287
|
+
:var torch.Tensor noise: :math:`\sigma^2` parameter (noise)
|
|
288
|
+
|
|
289
|
+
.. note::
|
|
290
|
+
FixedNoiseQExponentialLikelihood has an analytic marginal distribution.
|
|
291
|
+
|
|
292
|
+
Example:
|
|
293
|
+
>>> train_x = torch.randn(55, 2)
|
|
294
|
+
>>> noises = torch.ones(55) * 0.01
|
|
295
|
+
>>> likelihood = FixedNoiseQExponentialLikelihood(noise=noises, learn_additional_noise=True)
|
|
296
|
+
>>> pred_y = likelihood(qep_model(train_x))
|
|
297
|
+
>>>
|
|
298
|
+
>>> test_x = torch.randn(21, 2)
|
|
299
|
+
>>> test_noises = torch.ones(21) * 0.02
|
|
300
|
+
>>> pred_y = likelihood(qep_model(test_x), noise=test_noises)
|
|
301
|
+
"""
|
|
302
|
+
|
|
303
|
+
def __init__(
|
|
304
|
+
self,
|
|
305
|
+
noise: Tensor,
|
|
306
|
+
learn_additional_noise: Optional[bool] = False,
|
|
307
|
+
batch_shape: Optional[torch.Size] = torch.Size(),
|
|
308
|
+
**kwargs: Any,
|
|
309
|
+
) -> None:
|
|
310
|
+
super().__init__(noise_covar=FixedNoise(noise=noise), **kwargs)
|
|
311
|
+
|
|
312
|
+
self.second_noise_covar: Optional[HomoskedasticNoise] = None
|
|
313
|
+
if learn_additional_noise:
|
|
314
|
+
noise_prior = kwargs.get("noise_prior", None)
|
|
315
|
+
noise_constraint = kwargs.get("noise_constraint", None)
|
|
316
|
+
self.second_noise_covar = HomoskedasticNoise(
|
|
317
|
+
noise_prior=noise_prior, noise_constraint=noise_constraint, batch_shape=batch_shape
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
@property
|
|
321
|
+
def noise(self) -> Tensor:
|
|
322
|
+
return self.noise_covar.noise + self.second_noise
|
|
323
|
+
|
|
324
|
+
@noise.setter
|
|
325
|
+
def noise(self, value: Tensor) -> None:
|
|
326
|
+
self.noise_covar.initialize(noise=value)
|
|
327
|
+
|
|
328
|
+
@property
|
|
329
|
+
def second_noise(self) -> Union[float, Tensor]:
|
|
330
|
+
if self.second_noise_covar is None:
|
|
331
|
+
return 0.0
|
|
332
|
+
else:
|
|
333
|
+
return self.second_noise_covar.noise
|
|
334
|
+
|
|
335
|
+
@second_noise.setter
|
|
336
|
+
def second_noise(self, value: Tensor) -> None:
|
|
337
|
+
if self.second_noise_covar is None:
|
|
338
|
+
raise RuntimeError(
|
|
339
|
+
"Attempting to set secondary learned noise for FixedNoiseQExponentialLikelihood, "
|
|
340
|
+
"but learn_additional_noise must have been False!"
|
|
341
|
+
)
|
|
342
|
+
self.second_noise_covar.initialize(noise=value)
|
|
343
|
+
|
|
344
|
+
def get_fantasy_likelihood(self, **kwargs: Any) -> "FixedNoiseQExponentialLikelihood":
|
|
345
|
+
if "noise" not in kwargs:
|
|
346
|
+
raise RuntimeError("FixedNoiseQExponentialLikelihood.fantasize requires a `noise` kwarg")
|
|
347
|
+
old_noise_covar = self.noise_covar
|
|
348
|
+
self.noise_covar = None # pyre-fixme[8]
|
|
349
|
+
fantasy_liklihood = deepcopy(self)
|
|
350
|
+
self.noise_covar = old_noise_covar
|
|
351
|
+
|
|
352
|
+
old_noise = old_noise_covar.noise
|
|
353
|
+
new_noise = kwargs.get("noise")
|
|
354
|
+
if old_noise.dim() != new_noise.dim():
|
|
355
|
+
old_noise = old_noise.expand(*new_noise.shape[:-1], old_noise.shape[-1])
|
|
356
|
+
fantasy_liklihood.noise_covar = FixedNoise(noise=torch.cat([old_noise, new_noise], -1))
|
|
357
|
+
return fantasy_liklihood
|
|
358
|
+
|
|
359
|
+
def _shaped_noise_covar(self, base_shape: torch.Size, *params: Any, **kwargs: Any) -> Union[Tensor, LinearOperator]:
|
|
360
|
+
if len(params) > 0:
|
|
361
|
+
# we can infer the shape from the params
|
|
362
|
+
shape = None
|
|
363
|
+
else:
|
|
364
|
+
# here shape[:-1] is the batch shape requested, and shape[-1] is `n`, the number of points
|
|
365
|
+
shape = base_shape
|
|
366
|
+
|
|
367
|
+
res = self.noise_covar(*params, shape=shape, **kwargs)
|
|
368
|
+
|
|
369
|
+
if self.second_noise_covar is not None:
|
|
370
|
+
res = res + self.second_noise_covar(*params, shape=shape, **kwargs)
|
|
371
|
+
elif isinstance(res, ZeroLinearOperator):
|
|
372
|
+
warnings.warn(
|
|
373
|
+
"You have passed data through a FixedNoiseQExponentialLikelihood that did not match the size "
|
|
374
|
+
"of the fixed noise, *and* you did not specify noise. This is treated as a no-op.",
|
|
375
|
+
QEPInputWarning,
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
return res
|
|
379
|
+
|
|
380
|
+
def marginal(self, function_dist: MultivariateQExponential, *args: Any, **kwargs: Any) -> MultivariateQExponential:
|
|
381
|
+
r"""
|
|
382
|
+
:return: Analytic marginal :math:`p(\mathbf y)`.
|
|
383
|
+
"""
|
|
384
|
+
return super().marginal(function_dist, *args, **kwargs)
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
class QExponentialDirichletClassificationLikelihood(FixedNoiseQExponentialLikelihood):
|
|
388
|
+
r"""
|
|
389
|
+
A classification likelihood that treats the labels as regression targets with fixed heteroscedastic noise.
|
|
390
|
+
From Milios et al, NeurIPS, 2018 [https://arxiv.org/abs/1805.10915].
|
|
391
|
+
|
|
392
|
+
.. note::
|
|
393
|
+
This likelihood can be used for exact or approximate inference.
|
|
394
|
+
|
|
395
|
+
:param targets: (... x N) Classification labels.
|
|
396
|
+
:param alpha_epsilon: Tuning parameter for the scaling of the likeihood targets. We'd suggest 0.01 or setting
|
|
397
|
+
via cross-validation.
|
|
398
|
+
:param learn_additional_noise: Set to true if you additionally want to
|
|
399
|
+
learn added diagonal noise, similar to QExponentialLikelihood.
|
|
400
|
+
:param batch_shape: The batch shape of the learned noise parameter (default
|
|
401
|
+
[]) if :obj:`learn_additional_noise=True`.
|
|
402
|
+
|
|
403
|
+
:ivar torch.Tensor noise: :math:`\sigma^2` parameter (noise)
|
|
404
|
+
|
|
405
|
+
.. note::
|
|
406
|
+
DirichletClassificationLikelihood has an analytic marginal distribution.
|
|
407
|
+
|
|
408
|
+
Example:
|
|
409
|
+
>>> train_x = torch.randn(55, 1)
|
|
410
|
+
>>> labels = torch.round(train_x).long()
|
|
411
|
+
>>> likelihood = DirichletClassificationLikelihood(targets=labels, learn_additional_noise=True)
|
|
412
|
+
>>> pred_y = likelihood(qep_model(train_x))
|
|
413
|
+
>>>
|
|
414
|
+
>>> test_x = torch.randn(21, 1)
|
|
415
|
+
>>> test_labels = torch.round(test_x).long()
|
|
416
|
+
>>> pred_y = likelihood(qep_model(test_x), targets=labels)
|
|
417
|
+
"""
|
|
418
|
+
|
|
419
|
+
def _prepare_targets(
|
|
420
|
+
self, targets: Tensor, num_classes: Optional = None, alpha_epsilon: float = 0.01, dtype: torch.dtype = torch.float
|
|
421
|
+
) -> Tuple[Tensor, Tensor, int]:
|
|
422
|
+
if num_classes is None: num_classes = int(targets.max() + 1)
|
|
423
|
+
# set alpha = \alpha_\epsilon
|
|
424
|
+
alpha = alpha_epsilon * torch.ones(targets.shape[-1], num_classes, device=targets.device, dtype=dtype)
|
|
425
|
+
|
|
426
|
+
# alpha[class_labels] = 1 + \alpha_\epsilon
|
|
427
|
+
alpha[torch.arange(len(targets)), targets] = alpha[torch.arange(len(targets)), targets] + 1.0
|
|
428
|
+
|
|
429
|
+
# sigma^2 = log(1 / alpha + 1)
|
|
430
|
+
sigma2_i = torch.log(alpha.reciprocal() + 1.0)
|
|
431
|
+
|
|
432
|
+
# y = log(alpha) - 0.5 * sigma^2
|
|
433
|
+
transformed_targets = alpha.log() - 0.5 * sigma2_i
|
|
434
|
+
|
|
435
|
+
return sigma2_i.transpose(-2, -1).type(dtype), transformed_targets.type(dtype), num_classes
|
|
436
|
+
|
|
437
|
+
def __init__(
|
|
438
|
+
self,
|
|
439
|
+
targets: Tensor,
|
|
440
|
+
alpha_epsilon: float = 0.01,
|
|
441
|
+
learn_additional_noise: Optional[bool] = False,
|
|
442
|
+
batch_shape: torch.Size = torch.Size(),
|
|
443
|
+
dtype: torch.dtype = torch.float,
|
|
444
|
+
**kwargs: Any,
|
|
445
|
+
) -> None:
|
|
446
|
+
sigma2_labels, transformed_targets, num_classes = self._prepare_targets(
|
|
447
|
+
targets, alpha_epsilon=alpha_epsilon, dtype=dtype
|
|
448
|
+
)
|
|
449
|
+
super().__init__(
|
|
450
|
+
noise=sigma2_labels,
|
|
451
|
+
learn_additional_noise=learn_additional_noise,
|
|
452
|
+
batch_shape=torch.Size((num_classes,)),
|
|
453
|
+
**kwargs,
|
|
454
|
+
)
|
|
455
|
+
self.transformed_targets: Tensor = transformed_targets.transpose(-2, -1)
|
|
456
|
+
self.num_classes: int = num_classes
|
|
457
|
+
self.targets: Tensor = targets
|
|
458
|
+
self.alpha_epsilon: float = alpha_epsilon
|
|
459
|
+
|
|
460
|
+
def get_fantasy_likelihood(self, **kwargs: Any) -> "DirichletClassificationLikelihood":
|
|
461
|
+
# we assume that the number of classes does not change.
|
|
462
|
+
|
|
463
|
+
if "targets" not in kwargs:
|
|
464
|
+
raise RuntimeError("FixedNoiseQExponentialLikelihood.fantasize requires a `targets` kwarg")
|
|
465
|
+
|
|
466
|
+
old_noise_covar = self.noise_covar
|
|
467
|
+
self.noise_covar = None # pyre-fixme[8]
|
|
468
|
+
fantasy_liklihood = deepcopy(self)
|
|
469
|
+
self.noise_covar = old_noise_covar
|
|
470
|
+
|
|
471
|
+
old_noise = old_noise_covar.noise
|
|
472
|
+
new_targets = kwargs.get("noise")
|
|
473
|
+
new_noise, new_targets, _ = fantasy_liklihood._prepare_targets(new_targets, self.alpha_epsilon)
|
|
474
|
+
fantasy_liklihood.targets = torch.cat([fantasy_liklihood.targets, new_targets], -1)
|
|
475
|
+
|
|
476
|
+
if old_noise.dim() != new_noise.dim():
|
|
477
|
+
old_noise = old_noise.expand(*new_noise.shape[:-1], old_noise.shape[-1])
|
|
478
|
+
|
|
479
|
+
fantasy_liklihood.noise_covar = FixedNoise(noise=torch.cat([old_noise, new_noise], -1))
|
|
480
|
+
return fantasy_liklihood
|
|
481
|
+
|
|
482
|
+
def marginal(self, function_dist: MultivariateQExponential, *args: Any, **kwargs: Any) -> MultivariateQExponential:
|
|
483
|
+
r"""
|
|
484
|
+
:return: Analytic marginal :math:`p(\mathbf y)`.
|
|
485
|
+
"""
|
|
486
|
+
return super().marginal(function_dist, *args, **kwargs)
|
|
487
|
+
|
|
488
|
+
def __call__(self, input: Union[Tensor, MultivariateQExponential], *args: Any, **kwargs: Any) -> Distribution:
|
|
489
|
+
if "targets" in kwargs:
|
|
490
|
+
targets = kwargs.pop("targets")
|
|
491
|
+
dtype = self.transformed_targets.dtype
|
|
492
|
+
new_noise, _, _ = self._prepare_targets(targets, dtype=dtype)
|
|
493
|
+
kwargs["noise"] = new_noise
|
|
494
|
+
return super().__call__(input, *args, **kwargs)
|