qpytorch 0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of qpytorch might be problematic. Click here for more details.

Files changed (102) hide show
  1. qpytorch/__init__.py +327 -0
  2. qpytorch/constraints/__init__.py +3 -0
  3. qpytorch/distributions/__init__.py +21 -0
  4. qpytorch/distributions/delta.py +86 -0
  5. qpytorch/distributions/multitask_multivariate_qexponential.py +435 -0
  6. qpytorch/distributions/multivariate_qexponential.py +581 -0
  7. qpytorch/distributions/power.py +113 -0
  8. qpytorch/distributions/qexponential.py +153 -0
  9. qpytorch/functions/__init__.py +58 -0
  10. qpytorch/kernels/__init__.py +80 -0
  11. qpytorch/kernels/grid_interpolation_kernel.py +213 -0
  12. qpytorch/kernels/inducing_point_kernel.py +151 -0
  13. qpytorch/kernels/kernel.py +695 -0
  14. qpytorch/kernels/matern32_kernel_grad.py +155 -0
  15. qpytorch/kernels/matern52_kernel_grad.py +194 -0
  16. qpytorch/kernels/matern52_kernel_gradgrad.py +248 -0
  17. qpytorch/kernels/polynomial_kernel_grad.py +88 -0
  18. qpytorch/kernels/qexponential_symmetrized_kl_kernel.py +61 -0
  19. qpytorch/kernels/rbf_kernel_grad.py +125 -0
  20. qpytorch/kernels/rbf_kernel_gradgrad.py +186 -0
  21. qpytorch/kernels/rff_kernel.py +153 -0
  22. qpytorch/lazy/__init__.py +9 -0
  23. qpytorch/likelihoods/__init__.py +66 -0
  24. qpytorch/likelihoods/bernoulli_likelihood.py +75 -0
  25. qpytorch/likelihoods/beta_likelihood.py +76 -0
  26. qpytorch/likelihoods/gaussian_likelihood.py +472 -0
  27. qpytorch/likelihoods/laplace_likelihood.py +59 -0
  28. qpytorch/likelihoods/likelihood.py +437 -0
  29. qpytorch/likelihoods/likelihood_list.py +60 -0
  30. qpytorch/likelihoods/multitask_gaussian_likelihood.py +542 -0
  31. qpytorch/likelihoods/multitask_qexponential_likelihood.py +545 -0
  32. qpytorch/likelihoods/noise_models.py +184 -0
  33. qpytorch/likelihoods/qexponential_likelihood.py +494 -0
  34. qpytorch/likelihoods/softmax_likelihood.py +97 -0
  35. qpytorch/likelihoods/student_t_likelihood.py +90 -0
  36. qpytorch/means/__init__.py +23 -0
  37. qpytorch/metrics/__init__.py +17 -0
  38. qpytorch/mlls/__init__.py +53 -0
  39. qpytorch/mlls/_approximate_mll.py +79 -0
  40. qpytorch/mlls/deep_approximate_mll.py +30 -0
  41. qpytorch/mlls/deep_predictive_log_likelihood.py +32 -0
  42. qpytorch/mlls/exact_marginal_log_likelihood.py +96 -0
  43. qpytorch/mlls/gamma_robust_variational_elbo.py +106 -0
  44. qpytorch/mlls/inducing_point_kernel_added_loss_term.py +69 -0
  45. qpytorch/mlls/kl_qexponential_added_loss_term.py +41 -0
  46. qpytorch/mlls/leave_one_out_pseudo_likelihood.py +73 -0
  47. qpytorch/mlls/marginal_log_likelihood.py +48 -0
  48. qpytorch/mlls/predictive_log_likelihood.py +76 -0
  49. qpytorch/mlls/sum_marginal_log_likelihood.py +40 -0
  50. qpytorch/mlls/variational_elbo.py +77 -0
  51. qpytorch/models/__init__.py +72 -0
  52. qpytorch/models/approximate_qep.py +115 -0
  53. qpytorch/models/deep_qeps/__init__.py +22 -0
  54. qpytorch/models/deep_qeps/deep_qep.py +155 -0
  55. qpytorch/models/deep_qeps/dspp.py +114 -0
  56. qpytorch/models/exact_prediction_strategies.py +880 -0
  57. qpytorch/models/exact_qep.py +349 -0
  58. qpytorch/models/model_list.py +100 -0
  59. qpytorch/models/pyro/__init__.py +28 -0
  60. qpytorch/models/pyro/_pyro_mixin.py +57 -0
  61. qpytorch/models/pyro/distributions/__init__.py +5 -0
  62. qpytorch/models/pyro/pyro_qep.py +105 -0
  63. qpytorch/models/qep.py +7 -0
  64. qpytorch/models/qeplvm/__init__.py +6 -0
  65. qpytorch/models/qeplvm/bayesian_qeplvm.py +40 -0
  66. qpytorch/models/qeplvm/latent_variable.py +102 -0
  67. qpytorch/module.py +30 -0
  68. qpytorch/optim/__init__.py +5 -0
  69. qpytorch/priors/__init__.py +42 -0
  70. qpytorch/priors/qep_priors.py +81 -0
  71. qpytorch/test/__init__.py +22 -0
  72. qpytorch/test/base_likelihood_test_case.py +106 -0
  73. qpytorch/test/model_test_case.py +150 -0
  74. qpytorch/test/variational_test_case.py +400 -0
  75. qpytorch/utils/__init__.py +38 -0
  76. qpytorch/utils/warnings.py +37 -0
  77. qpytorch/variational/__init__.py +47 -0
  78. qpytorch/variational/_variational_distribution.py +61 -0
  79. qpytorch/variational/_variational_strategy.py +391 -0
  80. qpytorch/variational/additive_grid_interpolation_variational_strategy.py +90 -0
  81. qpytorch/variational/batch_decoupled_variational_strategy.py +256 -0
  82. qpytorch/variational/cholesky_variational_distribution.py +65 -0
  83. qpytorch/variational/ciq_variational_strategy.py +352 -0
  84. qpytorch/variational/delta_variational_distribution.py +41 -0
  85. qpytorch/variational/grid_interpolation_variational_strategy.py +113 -0
  86. qpytorch/variational/independent_multitask_variational_strategy.py +114 -0
  87. qpytorch/variational/lmc_variational_strategy.py +248 -0
  88. qpytorch/variational/mean_field_variational_distribution.py +58 -0
  89. qpytorch/variational/multitask_variational_strategy.py +317 -0
  90. qpytorch/variational/natural_variational_distribution.py +152 -0
  91. qpytorch/variational/nearest_neighbor_variational_strategy.py +487 -0
  92. qpytorch/variational/orthogonally_decoupled_variational_strategy.py +128 -0
  93. qpytorch/variational/tril_natural_variational_distribution.py +130 -0
  94. qpytorch/variational/uncorrelated_multitask_variational_strategy.py +114 -0
  95. qpytorch/variational/unwhitened_variational_strategy.py +225 -0
  96. qpytorch/variational/variational_strategy.py +280 -0
  97. qpytorch/version.py +4 -0
  98. qpytorch-0.1.dist-info/LICENSE +21 -0
  99. qpytorch-0.1.dist-info/METADATA +177 -0
  100. qpytorch-0.1.dist-info/RECORD +102 -0
  101. qpytorch-0.1.dist-info/WHEEL +5 -0
  102. qpytorch-0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,155 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import warnings
4
+
5
+ import torch
6
+ from linear_operator.operators import BlockDiagLinearOperator
7
+
8
+ from ... import settings
9
+ from ...distributions import QExponential, MultitaskMultivariateQExponential
10
+ from ...likelihoods import Likelihood
11
+ from ..approximate_qep import ApproximateQEP
12
+ from ..qep import QEP
13
+
14
+
15
+ class _DeepQEPVariationalStrategy(object):
16
+ def __init__(self, model):
17
+ self.model = model
18
+
19
+ @property
20
+ def sub_variational_strategies(self):
21
+ if not hasattr(self, "_sub_variational_strategies_memo"):
22
+ self._sub_variational_strategies_memo = [
23
+ module.variational_strategy for module in self.model.modules() if isinstance(module, ApproximateQEP)
24
+ ]
25
+ return self._sub_variational_strategies_memo
26
+
27
+ def kl_divergence(self):
28
+ return sum(strategy.kl_divergence().sum() for strategy in self.sub_variational_strategies)
29
+
30
+
31
+ class DeepQEPLayer(ApproximateQEP):
32
+ """
33
+ Represents a layer in a deep QEP where inference is performed via the doubly stochastic method of
34
+ Salimbeni et al., 2017. Upon calling, instead of returning a variational distribution q(f), returns samples
35
+ from the variational distribution.
36
+
37
+ See the documentation for __call__ below for more details below. Note that the behavior of __call__
38
+ will change to be much more elegant with multiple batch dimensions; however, the interface doesn't really
39
+ change.
40
+
41
+ :param ~gpytorch.variational.VariationalStrategy variational_strategy: Strategy for
42
+ changing q(u) -> q(f) (see other VI docs)
43
+ :param int input_dims`: Dimensionality of input data expected by each QEP
44
+ :param int output_dims: (default None) Number of QEPs in this layer, equivalent to
45
+ output dimensionality. If set to `None`, then the output dimension will be squashed.
46
+
47
+ Forward data through this hidden QEP layer. The output is a MultitaskMultivariateQExponential distribution
48
+ (or MultivariateQExponential distribution is output_dims=None).
49
+
50
+ If the input is >=2 dimensional Tensor (e.g. `n x d`), we pass the input through each hidden QEP,
51
+ resulting in a `n x h` multitask Q-Exponential distribution (where all of the `h` tasks represent an
52
+ output dimension and are independent from one another). We then draw `s` samples from these Q-Exponentials,
53
+ resulting in a `s x n x h` MultitaskMultivariateQExponential distribution.
54
+
55
+ If the input is a >=3 dimensional Tensor, and the `are_samples=True` kwarg is set, then we assume that
56
+ the outermost batch dimension is a samples dimension. The output will have the same number of samples.
57
+ For example, a `s x b x n x d` input will result in a `s x b x n x h` MultitaskMultivariateQExponential distribution.
58
+
59
+ The goal of these last two points is that if you have a tensor `x` that is `n x d`, then
60
+
61
+ >>> hidden_qep2(hidden_qep(x))
62
+
63
+ will just work, and return a tensor of size `s x n x h2`, where `h2` is the output dimensionality of
64
+ hidden_qep2. In this way, hidden QEP layers are easily composable.
65
+ """
66
+
67
+ def __init__(self, variational_strategy, input_dims, output_dims):
68
+ super(DeepQEPLayer, self).__init__(variational_strategy)
69
+ self.input_dims = input_dims
70
+ self.output_dims = output_dims
71
+
72
+ def forward(self, x):
73
+ raise NotImplementedError
74
+
75
+ def __call__(self, inputs, are_samples=False, **kwargs):
76
+ deterministic_inputs = not are_samples
77
+ if isinstance(inputs, MultitaskMultivariateQExponential):
78
+ inputs = QExponential(loc=inputs.mean, scale=inputs.variance.sqrt(), power=inputs.power).rsample(rescale=kwargs.pop('rescale', False))
79
+ deterministic_inputs = False
80
+
81
+ if settings.debug.on():
82
+ if not torch.is_tensor(inputs):
83
+ raise ValueError(
84
+ "`inputs` should either be a MultitaskMultivariateQExponential or a Tensor, got "
85
+ f"{inputs.__class__.__Name__}"
86
+ )
87
+
88
+ if inputs.size(-1) != self.input_dims:
89
+ raise RuntimeError(
90
+ f"Input shape did not match self.input_dims. Got total feature dims [{inputs.size(-1)}],"
91
+ f" expected [{self.input_dims}]"
92
+ )
93
+
94
+ # Repeat the input for all possible outputs
95
+ if self.output_dims is not None:
96
+ inputs = inputs.unsqueeze(-3)
97
+ inputs = inputs.expand(*inputs.shape[:-3], self.output_dims, *inputs.shape[-2:])
98
+
99
+ # Now run samples through the QEP
100
+ output = ApproximateQEP.__call__(self, inputs, **kwargs)
101
+ if self.output_dims is not None:
102
+ mean = output.loc.transpose(-1, -2)
103
+ covar = BlockDiagLinearOperator(output.lazy_covariance_matrix, block_dim=-3)
104
+ output = MultitaskMultivariateQExponential(mean, covar, power=output.power, interleaved=False)
105
+
106
+ # Maybe expand inputs?
107
+ if deterministic_inputs:
108
+ output = output.expand(torch.Size([settings.num_likelihood_samples.value()]) + output.batch_shape)
109
+
110
+ return output
111
+
112
+
113
+ class DeepQEP(QEP):
114
+ """
115
+ A container module to build a DeepQEP.
116
+ This module should contain :obj:`~gpytorch.models.deep.DeepQEPLayer`
117
+ modules, and can also contain other modules as well.
118
+ """
119
+
120
+ def __init__(self):
121
+ super().__init__()
122
+ self.variational_strategy = _DeepQEPVariationalStrategy(self)
123
+
124
+ def forward(self, x):
125
+ raise NotImplementedError
126
+
127
+
128
+ class DeepLikelihood(Likelihood):
129
+ """
130
+ A wrapper to make a GPyTorch likelihood compatible with Deep QEPs
131
+
132
+ Example:
133
+ >>> deep_qexponential_likelihood = gpytorch.likelihoods.DeepLikelihood(gpytorch.likelihood.QExponentialLikelihood)
134
+ """
135
+
136
+ def __init__(self, base_likelihood):
137
+ super().__init__()
138
+ warnings.warn(
139
+ "DeepLikelihood is now deprecated. Use a standard likelihood in conjunction with a "
140
+ "gpytorch.mlls.DeepApproximateMLL. See the DeepQEP example in our documentation.",
141
+ DeprecationWarning,
142
+ )
143
+ self.base_likelihood = base_likelihood
144
+
145
+ def expected_log_prob(self, observations, function_dist, *params, **kwargs):
146
+ return self.base_likelihood.expected_log_prob(observations, function_dist, *params, **kwargs).mean(dim=0)
147
+
148
+ def log_marginal(self, observations, function_dist, *params, **kwargs):
149
+ return self.base_likelihood.log_marginal(observations, function_dist, *params, **kwargs).mean(dim=0)
150
+
151
+ def forward(self, *args, **kwargs):
152
+ pass
153
+
154
+ def __call__(self, *args, **kwargs):
155
+ return self.base_likelihood.__call__(*args, **kwargs)
@@ -0,0 +1,114 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import torch
4
+ from linear_operator.operators import BlockDiagLinearOperator
5
+
6
+ from ... import settings
7
+ from ...distributions import MultitaskMultivariateQExponential
8
+ from ..approximate_qep import ApproximateQEP
9
+ from .deep_qep import DeepQEP, DeepQEPLayer
10
+
11
+
12
+ class DSPPLayer(DeepQEPLayer):
13
+ """
14
+ Represents a layer in a DSPP where inference is performed using the techniques defined in Jankowiak et al., 2020.
15
+
16
+ From an end user perspective, the functionality and usage of a DSPPLayer is essentially identical to that of a
17
+ DeepQEPLayer. It is therefore recommended that you review the documentation for DeepQEPLayer.
18
+
19
+ :param ~gpytorch.variational.VariationalStrategy variational_strategy: Strategy for
20
+ changing q(u) -> q(f) (see other VI docs)
21
+ :param int input_dims: Dimensionality of input data expected by each QEP
22
+ :param int output_dims: (default None) Number of QEPs in this layer, equivalent to
23
+ output dimensionality. If set to `None`, then the output dimension will be squashed.
24
+ :param int num_quad_sites: Number of quadrature sites to use. Also the number of Q-Exponentials in the mixture output
25
+ by this layer.
26
+
27
+ Again, refer to the documentation for DeepQEPLayer or our example notebooks for full details on what calling a
28
+ DSPPLayer module does. The high level overview is that if a tensor `x` is `n x d` then
29
+
30
+ >>> hidden_qep2(hidden_qep1(x))
31
+
32
+ will return a `num_quad_sites` by `output_dims` set of Q-Exponentials, where for each output dim the first batch dim
33
+ represents a weighted mixture of `num_quad_sites` Q-Exponentials with weights given by DSPP.quad_weights (see DSPP below)
34
+ """
35
+
36
+ def __init__(self, variational_strategy, input_dims, output_dims, num_quad_sites=3, quad_sites=None):
37
+ super().__init__(variational_strategy, input_dims, output_dims)
38
+
39
+ self.num_quad_sites = num_quad_sites
40
+
41
+ # Pass in previous_layer.quad_sites if you want to share quad_sites across layers.
42
+ if quad_sites is not None:
43
+ self.quad_sites = quad_sites
44
+ else:
45
+ self.quad_sites = torch.nn.Parameter(torch.randn(num_quad_sites, input_dims))
46
+
47
+ def __call__(self, inputs, **kwargs):
48
+ if isinstance(inputs, MultitaskMultivariateQExponential):
49
+ # This is for subsequent layers. We apply quadrature here
50
+ # Mean, stdv are q x ... x n x t
51
+ mus, sigmas = inputs.mean, inputs.variance.sqrt()
52
+ qg = self.quad_sites.view([self.num_quad_sites] + [1] * (mus.dim() - 2) + [self.input_dims])
53
+ sigmas = sigmas * qg
54
+ inputs = mus + sigmas # q^t x n x t
55
+ deterministic_inputs = False
56
+ else:
57
+ deterministic_inputs = True
58
+
59
+ if settings.debug.on():
60
+ if not torch.is_tensor(inputs):
61
+ raise ValueError(
62
+ "`inputs` should either be a MultitaskMultivariateQExponential or a Tensor, got "
63
+ f"{inputs.__class__.__Name__}"
64
+ )
65
+
66
+ if inputs.size(-1) != self.input_dims:
67
+ raise RuntimeError(
68
+ f"Input shape did not match self.input_dims. Got total feature dims [{inputs.size(-1)}],"
69
+ f" expected [{self.input_dims}]"
70
+ )
71
+
72
+ # Repeat the input for all possible outputs
73
+ if self.output_dims is not None:
74
+ inputs = inputs.unsqueeze(-3)
75
+ inputs = inputs.expand(*inputs.shape[:-3], self.output_dims, *inputs.shape[-2:])
76
+
77
+ # Now run samples through the QEP
78
+ output = ApproximateQEP.__call__(self, inputs, **kwargs)
79
+
80
+ # If this is the first layer (deterministic inputs), expand the output
81
+ # This allows quadrature to be applied to future layers
82
+ if deterministic_inputs:
83
+ output = output.expand(torch.Size([self.num_quad_sites]) + output.batch_shape)
84
+
85
+ if self.num_quad_sites > 0:
86
+ if self.output_dims is not None and not isinstance(output, MultitaskMultivariateQExponential):
87
+ mean = output.loc.transpose(-1, -2)
88
+ covar = BlockDiagLinearOperator(output.lazy_covariance_matrix, block_dim=-3)
89
+ output = MultitaskMultivariateQExponential(mean, covar, power=output.power, interleaved=False)
90
+ else:
91
+ output = output.loc.transpose(-1, -2) # this layer provides noiseless kernel interpolation
92
+
93
+ return output
94
+
95
+
96
+ class DSPP(DeepQEP):
97
+ """
98
+ A container module to build a DSPP
99
+ This module should contain :obj:`~gpytorch.models.deep_qeps.DSPPLayer`
100
+ modules, and can also contain other modules as well.
101
+
102
+ This Module contains an additional set of parameters, `raw_quad_weights`, that represent the mixture weights for
103
+ the output distribution.
104
+ """
105
+
106
+ def __init__(self, num_quad_sites):
107
+ super().__init__()
108
+ self.num_quad_sites = num_quad_sites
109
+ self.register_parameter("raw_quad_weights", torch.nn.Parameter(torch.randn(self.num_quad_sites)))
110
+
111
+ @property
112
+ def quad_weights(self):
113
+ qwd = self.raw_quad_weights
114
+ return qwd - qwd.logsumexp(dim=-1)