qpytorch 0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of qpytorch might be problematic. Click here for more details.

Files changed (102) hide show
  1. qpytorch/__init__.py +327 -0
  2. qpytorch/constraints/__init__.py +3 -0
  3. qpytorch/distributions/__init__.py +21 -0
  4. qpytorch/distributions/delta.py +86 -0
  5. qpytorch/distributions/multitask_multivariate_qexponential.py +435 -0
  6. qpytorch/distributions/multivariate_qexponential.py +581 -0
  7. qpytorch/distributions/power.py +113 -0
  8. qpytorch/distributions/qexponential.py +153 -0
  9. qpytorch/functions/__init__.py +58 -0
  10. qpytorch/kernels/__init__.py +80 -0
  11. qpytorch/kernels/grid_interpolation_kernel.py +213 -0
  12. qpytorch/kernels/inducing_point_kernel.py +151 -0
  13. qpytorch/kernels/kernel.py +695 -0
  14. qpytorch/kernels/matern32_kernel_grad.py +155 -0
  15. qpytorch/kernels/matern52_kernel_grad.py +194 -0
  16. qpytorch/kernels/matern52_kernel_gradgrad.py +248 -0
  17. qpytorch/kernels/polynomial_kernel_grad.py +88 -0
  18. qpytorch/kernels/qexponential_symmetrized_kl_kernel.py +61 -0
  19. qpytorch/kernels/rbf_kernel_grad.py +125 -0
  20. qpytorch/kernels/rbf_kernel_gradgrad.py +186 -0
  21. qpytorch/kernels/rff_kernel.py +153 -0
  22. qpytorch/lazy/__init__.py +9 -0
  23. qpytorch/likelihoods/__init__.py +66 -0
  24. qpytorch/likelihoods/bernoulli_likelihood.py +75 -0
  25. qpytorch/likelihoods/beta_likelihood.py +76 -0
  26. qpytorch/likelihoods/gaussian_likelihood.py +472 -0
  27. qpytorch/likelihoods/laplace_likelihood.py +59 -0
  28. qpytorch/likelihoods/likelihood.py +437 -0
  29. qpytorch/likelihoods/likelihood_list.py +60 -0
  30. qpytorch/likelihoods/multitask_gaussian_likelihood.py +542 -0
  31. qpytorch/likelihoods/multitask_qexponential_likelihood.py +545 -0
  32. qpytorch/likelihoods/noise_models.py +184 -0
  33. qpytorch/likelihoods/qexponential_likelihood.py +494 -0
  34. qpytorch/likelihoods/softmax_likelihood.py +97 -0
  35. qpytorch/likelihoods/student_t_likelihood.py +90 -0
  36. qpytorch/means/__init__.py +23 -0
  37. qpytorch/metrics/__init__.py +17 -0
  38. qpytorch/mlls/__init__.py +53 -0
  39. qpytorch/mlls/_approximate_mll.py +79 -0
  40. qpytorch/mlls/deep_approximate_mll.py +30 -0
  41. qpytorch/mlls/deep_predictive_log_likelihood.py +32 -0
  42. qpytorch/mlls/exact_marginal_log_likelihood.py +96 -0
  43. qpytorch/mlls/gamma_robust_variational_elbo.py +106 -0
  44. qpytorch/mlls/inducing_point_kernel_added_loss_term.py +69 -0
  45. qpytorch/mlls/kl_qexponential_added_loss_term.py +41 -0
  46. qpytorch/mlls/leave_one_out_pseudo_likelihood.py +73 -0
  47. qpytorch/mlls/marginal_log_likelihood.py +48 -0
  48. qpytorch/mlls/predictive_log_likelihood.py +76 -0
  49. qpytorch/mlls/sum_marginal_log_likelihood.py +40 -0
  50. qpytorch/mlls/variational_elbo.py +77 -0
  51. qpytorch/models/__init__.py +72 -0
  52. qpytorch/models/approximate_qep.py +115 -0
  53. qpytorch/models/deep_qeps/__init__.py +22 -0
  54. qpytorch/models/deep_qeps/deep_qep.py +155 -0
  55. qpytorch/models/deep_qeps/dspp.py +114 -0
  56. qpytorch/models/exact_prediction_strategies.py +880 -0
  57. qpytorch/models/exact_qep.py +349 -0
  58. qpytorch/models/model_list.py +100 -0
  59. qpytorch/models/pyro/__init__.py +28 -0
  60. qpytorch/models/pyro/_pyro_mixin.py +57 -0
  61. qpytorch/models/pyro/distributions/__init__.py +5 -0
  62. qpytorch/models/pyro/pyro_qep.py +105 -0
  63. qpytorch/models/qep.py +7 -0
  64. qpytorch/models/qeplvm/__init__.py +6 -0
  65. qpytorch/models/qeplvm/bayesian_qeplvm.py +40 -0
  66. qpytorch/models/qeplvm/latent_variable.py +102 -0
  67. qpytorch/module.py +30 -0
  68. qpytorch/optim/__init__.py +5 -0
  69. qpytorch/priors/__init__.py +42 -0
  70. qpytorch/priors/qep_priors.py +81 -0
  71. qpytorch/test/__init__.py +22 -0
  72. qpytorch/test/base_likelihood_test_case.py +106 -0
  73. qpytorch/test/model_test_case.py +150 -0
  74. qpytorch/test/variational_test_case.py +400 -0
  75. qpytorch/utils/__init__.py +38 -0
  76. qpytorch/utils/warnings.py +37 -0
  77. qpytorch/variational/__init__.py +47 -0
  78. qpytorch/variational/_variational_distribution.py +61 -0
  79. qpytorch/variational/_variational_strategy.py +391 -0
  80. qpytorch/variational/additive_grid_interpolation_variational_strategy.py +90 -0
  81. qpytorch/variational/batch_decoupled_variational_strategy.py +256 -0
  82. qpytorch/variational/cholesky_variational_distribution.py +65 -0
  83. qpytorch/variational/ciq_variational_strategy.py +352 -0
  84. qpytorch/variational/delta_variational_distribution.py +41 -0
  85. qpytorch/variational/grid_interpolation_variational_strategy.py +113 -0
  86. qpytorch/variational/independent_multitask_variational_strategy.py +114 -0
  87. qpytorch/variational/lmc_variational_strategy.py +248 -0
  88. qpytorch/variational/mean_field_variational_distribution.py +58 -0
  89. qpytorch/variational/multitask_variational_strategy.py +317 -0
  90. qpytorch/variational/natural_variational_distribution.py +152 -0
  91. qpytorch/variational/nearest_neighbor_variational_strategy.py +487 -0
  92. qpytorch/variational/orthogonally_decoupled_variational_strategy.py +128 -0
  93. qpytorch/variational/tril_natural_variational_distribution.py +130 -0
  94. qpytorch/variational/uncorrelated_multitask_variational_strategy.py +114 -0
  95. qpytorch/variational/unwhitened_variational_strategy.py +225 -0
  96. qpytorch/variational/variational_strategy.py +280 -0
  97. qpytorch/version.py +4 -0
  98. qpytorch-0.1.dist-info/LICENSE +21 -0
  99. qpytorch-0.1.dist-info/METADATA +177 -0
  100. qpytorch-0.1.dist-info/RECORD +102 -0
  101. qpytorch-0.1.dist-info/WHEEL +5 -0
  102. qpytorch-0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,280 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import warnings
4
+ from typing import Any, Dict, Iterable, Optional, Tuple, Union
5
+
6
+ import torch
7
+ from linear_operator import to_dense
8
+ from linear_operator.operators import (
9
+ CholLinearOperator,
10
+ DiagLinearOperator,
11
+ LinearOperator,
12
+ MatmulLinearOperator,
13
+ RootLinearOperator,
14
+ SumLinearOperator,
15
+ TriangularLinearOperator,
16
+ )
17
+ from linear_operator.utils.cholesky import psd_safe_cholesky
18
+ from linear_operator.utils.errors import NotPSDError
19
+ from torch import Tensor
20
+
21
+ from ._variational_strategy import _VariationalStrategy
22
+ from .cholesky_variational_distribution import CholeskyVariationalDistribution
23
+
24
+ from ..distributions import MultivariateNormal, MultivariateQExponential
25
+ from ..models import ApproximateGP, ApproximateQEP
26
+ from gpytorch.settings import _linalg_dtype_cholesky, trace_mode
27
+ from gpytorch.utils.errors import CachingError
28
+ from gpytorch.utils.memoize import cached, clear_cache_hook, pop_from_cache_ignore_args
29
+ from ..utils.warnings import OldVersionWarning
30
+ from . import _VariationalDistribution
31
+
32
+
33
+ def _ensure_updated_strategy_flag_set(
34
+ state_dict: Dict[str, Tensor],
35
+ prefix: str,
36
+ local_metadata: Dict[str, Any],
37
+ strict: bool,
38
+ missing_keys: Iterable[str],
39
+ unexpected_keys: Iterable[str],
40
+ error_msgs: Iterable[str],
41
+ ):
42
+ device = state_dict[list(state_dict.keys())[0]].device
43
+ if prefix + "updated_strategy" not in state_dict:
44
+ state_dict[prefix + "updated_strategy"] = torch.tensor(False, device=device)
45
+ warnings.warn(
46
+ "You have loaded a variational GP (QEP) model (using `VariationalStrategy`) from a previous version of "
47
+ "GPyTorch. We have updated the parameters of your model to work with the new version of "
48
+ "`VariationalStrategy` that uses whitened parameters.\nYour model will work as expected, but we "
49
+ "recommend that you re-save your model.",
50
+ OldVersionWarning,
51
+ )
52
+
53
+
54
+ class VariationalStrategy(_VariationalStrategy):
55
+ r"""
56
+ The standard variational strategy, as defined by `Hensman et al. (2015)`_.
57
+ This strategy takes a set of :math:`m \ll n` inducing points :math:`\mathbf Z`
58
+ and applies an approximate distribution :math:`q( \mathbf u)` over their function values.
59
+ (Here, we use the common notation :math:`\mathbf u = f(\mathbf Z)`.
60
+ The approximate function distribution for any abitrary input :math:`\mathbf X` is given by:
61
+
62
+ .. math::
63
+
64
+ q( f(\mathbf X) ) = \int p( f(\mathbf X) \mid \mathbf u) q(\mathbf u) \: d\mathbf u
65
+
66
+ This variational strategy uses "whitening" to accelerate the optimization of the variational
67
+ parameters. See `Matthews (2017)`_ for more info.
68
+
69
+ :param model: Model this strategy is applied to.
70
+ Typically passed in when the VariationalStrategy is created in the
71
+ __init__ method of the user defined model.
72
+ It should contain power if Q-Exponential distribution is involved in.
73
+ :param inducing_points: Tensor containing a set of inducing
74
+ points to use for variational inference.
75
+ :param variational_distribution: A
76
+ VariationalDistribution object that represents the form of the variational distribution :math:`q(\mathbf u)`
77
+ :param learn_inducing_locations: (Default True): Whether or not
78
+ the inducing point locations :math:`\mathbf Z` should be learned (i.e. are they
79
+ parameters of the model).
80
+ :param jitter_val: Amount of diagonal jitter to add for Cholesky factorization numerical stability
81
+
82
+ .. _Hensman et al. (2015):
83
+ http://proceedings.mlr.press/v38/hensman15.pdf
84
+ .. _Matthews (2017):
85
+ https://www.repository.cam.ac.uk/handle/1810/278022
86
+ """
87
+
88
+ def __init__(
89
+ self,
90
+ model: Union[ApproximateGP, ApproximateQEP],
91
+ inducing_points: Tensor,
92
+ variational_distribution: _VariationalDistribution,
93
+ learn_inducing_locations: bool = True,
94
+ jitter_val: Optional[float] = None,
95
+ ):
96
+ super().__init__(
97
+ model, inducing_points, variational_distribution, learn_inducing_locations, jitter_val=jitter_val
98
+ )
99
+ self.register_buffer("updated_strategy", torch.tensor(True))
100
+ self._register_load_state_dict_pre_hook(_ensure_updated_strategy_flag_set)
101
+ self.has_fantasy_strategy = True
102
+
103
+ @cached(name="cholesky_factor", ignore_args=True)
104
+ def _cholesky_factor(self, induc_induc_covar: LinearOperator) -> TriangularLinearOperator:
105
+ L = psd_safe_cholesky(to_dense(induc_induc_covar).type(_linalg_dtype_cholesky.value()))
106
+ return TriangularLinearOperator(L)
107
+
108
+ @property
109
+ @cached(name="prior_distribution_memo")
110
+ def prior_distribution(self) -> Union[MultivariateNormal, MultivariateQExponential]:
111
+ zeros = torch.zeros(
112
+ self._variational_distribution.shape(),
113
+ dtype=self._variational_distribution.dtype,
114
+ device=self._variational_distribution.device,
115
+ )
116
+ ones = torch.ones_like(zeros)
117
+ if hasattr(self.model, 'power'):
118
+ res = MultivariateQExponential(zeros, DiagLinearOperator(ones), power=self.model.power)
119
+ else:
120
+ res = MultivariateNormal(zeros, DiagLinearOperator(ones))
121
+ return res
122
+
123
+ @property
124
+ @cached(name="pseudo_points_memo")
125
+ def pseudo_points(self) -> Tuple[Tensor, Tensor]:
126
+ # TODO: have var_mean, var_cov come from a method of _variational_distribution
127
+ # while having Kmm_root be a root decomposition to enable CIQVariationalDistribution support.
128
+
129
+ # retrieve the variational mean, m and covariance matrix, S.
130
+ if not isinstance(self._variational_distribution, CholeskyVariationalDistribution):
131
+ raise NotImplementedError(
132
+ "Only CholeskyVariationalDistribution has pseudo-point support currently, ",
133
+ "but your _variational_distribution is a ",
134
+ self._variational_distribution.__name__,
135
+ )
136
+
137
+ var_cov_root = TriangularLinearOperator(self._variational_distribution.chol_variational_covar)
138
+ var_cov = CholLinearOperator(var_cov_root)
139
+ var_mean = self.variational_distribution.mean
140
+ if var_mean.shape[-1] != 1:
141
+ var_mean = var_mean.unsqueeze(-1)
142
+
143
+ # compute R = I - S
144
+ cov_diff = var_cov.add_jitter(-1.0)
145
+ cov_diff = -1.0 * cov_diff
146
+
147
+ # K^{1/2}
148
+ Kmm = self.model.covar_module(self.inducing_points)
149
+ Kmm_root = Kmm.cholesky()
150
+
151
+ # D_a = (S^{-1} - K^{-1})^{-1} = S + S R^{-1} S
152
+ # note that in the whitened case R = I - S, unwhitened R = K - S
153
+ # we compute (R R^{T})^{-1} R^T S for stability reasons as R is probably not PSD.
154
+ eval_var_cov = var_cov.to_dense()
155
+ eval_rhs = cov_diff.transpose(-1, -2).matmul(eval_var_cov)
156
+ inner_term = cov_diff.matmul(cov_diff.transpose(-1, -2))
157
+ # TODO: flag the jitter here
158
+ inner_solve = inner_term.add_jitter(self.jitter_val).solve(eval_rhs, eval_var_cov.transpose(-1, -2))
159
+ inducing_covar = var_cov + inner_solve
160
+
161
+ inducing_covar = Kmm_root.matmul(inducing_covar).matmul(Kmm_root.transpose(-1, -2))
162
+
163
+ # mean term: D_a S^{-1} m
164
+ # unwhitened: (S - S R^{-1} S) S^{-1} m = (I - S R^{-1}) m
165
+ rhs = cov_diff.transpose(-1, -2).matmul(var_mean)
166
+ # TODO: this jitter too
167
+ inner_rhs_mean_solve = inner_term.add_jitter(self.jitter_val).solve(rhs)
168
+ pseudo_target_mean = Kmm_root.matmul(inner_rhs_mean_solve)
169
+
170
+ # ensure inducing covar is psd
171
+ # TODO: make this be an explicit root decomposition
172
+ try:
173
+ pseudo_target_covar = CholLinearOperator(inducing_covar.add_jitter(self.jitter_val).cholesky()).to_dense()
174
+ except NotPSDError:
175
+ from linear_operator.operators import DiagLinearOperator
176
+
177
+ evals, evecs = torch.linalg.eigh(inducing_covar)
178
+ pseudo_target_covar = (
179
+ evecs.matmul(DiagLinearOperator(evals + self.jitter_val)).matmul(evecs.transpose(-1, -2)).to_dense()
180
+ )
181
+
182
+ return pseudo_target_covar, pseudo_target_mean
183
+
184
+ def forward(
185
+ self,
186
+ x: Tensor,
187
+ inducing_points: Tensor,
188
+ inducing_values: Tensor,
189
+ variational_inducing_covar: Optional[LinearOperator] = None,
190
+ **kwargs,
191
+ ) -> Union[MultivariateNormal, MultivariateQExponential]:
192
+ # Compute full prior distribution
193
+ full_inputs = torch.cat([inducing_points, x], dim=-2)
194
+ full_output = self.model.forward(full_inputs, **kwargs)
195
+ full_covar = full_output.lazy_covariance_matrix
196
+
197
+ # Covariance terms
198
+ num_induc = inducing_points.size(-2)
199
+ test_mean = full_output.mean[..., num_induc:]
200
+ induc_induc_covar = full_covar[..., :num_induc, :num_induc].add_jitter(self.jitter_val)
201
+ induc_data_covar = full_covar[..., :num_induc, num_induc:].to_dense()
202
+ data_data_covar = full_covar[..., num_induc:, num_induc:]
203
+
204
+ # Compute interpolation terms
205
+ # K_ZZ^{-1/2} K_ZX
206
+ # K_ZZ^{-1/2} \mu_Z
207
+ L = self._cholesky_factor(induc_induc_covar)
208
+ if L.shape != induc_induc_covar.shape:
209
+ # Aggressive caching can cause nasty shape incompatibilies when evaluating with different batch shapes
210
+ # TODO: Use a hook fo this
211
+ try:
212
+ pop_from_cache_ignore_args(self, "cholesky_factor")
213
+ except CachingError:
214
+ pass
215
+ L = self._cholesky_factor(induc_induc_covar)
216
+ interp_term = L.solve(induc_data_covar.type(_linalg_dtype_cholesky.value())).to(full_inputs.dtype)
217
+
218
+ # Compute the mean of q(f)
219
+ # k_XZ K_ZZ^{-1/2} (m - K_ZZ^{-1/2} \mu_Z) + \mu_X
220
+ predictive_mean = (interp_term.transpose(-1, -2) @ inducing_values.unsqueeze(-1)).squeeze(-1) + test_mean
221
+
222
+ # Compute the covariance of q(f)
223
+ # K_XX + k_XZ K_ZZ^{-1/2} (S - I) K_ZZ^{-1/2} k_ZX
224
+ middle_term = self.prior_distribution.lazy_covariance_matrix.mul(-1)
225
+ if variational_inducing_covar is not None:
226
+ middle_term = SumLinearOperator(variational_inducing_covar, middle_term)
227
+
228
+ if trace_mode.on():
229
+ predictive_covar = (
230
+ data_data_covar.add_jitter(self.jitter_val).to_dense()
231
+ + interp_term.transpose(-1, -2) @ middle_term.to_dense() @ interp_term
232
+ )
233
+ else:
234
+ predictive_covar = SumLinearOperator(
235
+ data_data_covar.add_jitter(self.jitter_val),
236
+ MatmulLinearOperator(interp_term.transpose(-1, -2), middle_term @ interp_term),
237
+ )
238
+
239
+ # Return the distribution
240
+ if hasattr(self.model, 'power'):
241
+ return MultivariateQExponential(predictive_mean, predictive_covar, power=self.model.power)
242
+ else:
243
+ return MultivariateNormal(predictive_mean, predictive_covar)
244
+
245
+ def __call__(self, x: Tensor, prior: bool = False, **kwargs) -> Union[MultivariateNormal, MultivariateQExponential]:
246
+ if not self.updated_strategy.item() and not prior:
247
+ with torch.no_grad():
248
+ # Get unwhitened p(u)
249
+ prior_function_dist = self(self.inducing_points, prior=True)
250
+ prior_mean = prior_function_dist.loc
251
+ L = self._cholesky_factor(prior_function_dist.lazy_covariance_matrix.add_jitter(self.jitter_val))
252
+
253
+ # Temporarily turn off noise that's added to the mean
254
+ orig_mean_init_std = self._variational_distribution.mean_init_std
255
+ self._variational_distribution.mean_init_std = 0.0
256
+
257
+ # Change the variational parameters to be whitened
258
+ variational_dist = self.variational_distribution
259
+ if isinstance(variational_dist, (MultivariateNormal, MultivariateQExponential)):
260
+ mean_diff = (variational_dist.loc - prior_mean).unsqueeze(-1).type(_linalg_dtype_cholesky.value())
261
+ whitened_mean = L.solve(mean_diff).squeeze(-1).to(variational_dist.loc.dtype)
262
+ covar_root = variational_dist.lazy_covariance_matrix.root_decomposition().root.to_dense()
263
+ covar_root = covar_root.type(_linalg_dtype_cholesky.value())
264
+ whitened_covar = RootLinearOperator(L.solve(covar_root).to(variational_dist.loc.dtype))
265
+ whitened_variational_distribution = variational_dist.__class__(whitened_mean, whitened_covar)
266
+ if isinstance(variational_dist, MultivariateQExponential): whitened_variational_distribution.power = variational_dist.power
267
+ self._variational_distribution.initialize_variational_distribution(
268
+ whitened_variational_distribution
269
+ )
270
+
271
+ # Reset the random noise parameter of the model
272
+ self._variational_distribution.mean_init_std = orig_mean_init_std
273
+
274
+ # Reset the cache
275
+ clear_cache_hook(self)
276
+
277
+ # Mark that we have updated the variational strategy
278
+ self.updated_strategy.fill_(True)
279
+
280
+ return super().__call__(x, prior=prior, **kwargs)
qpytorch/version.py ADDED
@@ -0,0 +1,4 @@
1
+ # file generated by setuptools_scm
2
+ # don't change, don't track in version control
3
+ __version__ = version = '0.1'
4
+ __version_tuple__ = version_tuple = (0, 1)
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Shiwei Lan
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,177 @@
1
+ Metadata-Version: 2.1
2
+ Name: qpytorch
3
+ Version: 0.1
4
+ Summary: An implementation of Q-Exponential Processes in Pytorch based on GPyTorch
5
+ Home-page: https://lanzithinking.github.io/qepytorch/
6
+ Author: Shiwei Lan
7
+ Author-email: lanzithinking@gmail.com
8
+ License: MIT
9
+ Project-URL: Documentation, https://qepytorch.readthedocs.io
10
+ Project-URL: Source, https://github.com/lanzithinking/qepytorch/
11
+ Classifier: Development Status :: 5 - Production/Stable
12
+ Classifier: Programming Language :: Python :: 3
13
+ Requires-Python: >=3.10
14
+ Description-Content-Type: text/markdown
15
+ License-File: LICENSE
16
+ Requires-Dist: jaxtyping
17
+ Requires-Dist: mpmath<=1.3,>=0.19
18
+ Requires-Dist: scikit-learn
19
+ Requires-Dist: scipy>=1.6.0
20
+ Requires-Dist: linear-operator>=0.6
21
+ Requires-Dist: gpytorch>=1.13
22
+ Provides-Extra: dev
23
+ Requires-Dist: pre-commit; extra == "dev"
24
+ Requires-Dist: setuptools-scm; extra == "dev"
25
+ Requires-Dist: twine; extra == "dev"
26
+ Requires-Dist: ufmt; extra == "dev"
27
+ Provides-Extra: docs
28
+ Requires-Dist: ipykernel<=6.17.1; extra == "docs"
29
+ Requires-Dist: ipython<=8.6.0; extra == "docs"
30
+ Requires-Dist: m2r2<=0.3.3.post2; extra == "docs"
31
+ Requires-Dist: nbclient<=0.7.3; extra == "docs"
32
+ Requires-Dist: nbformat<=5.8.0; extra == "docs"
33
+ Requires-Dist: nbsphinx<=0.9.1; extra == "docs"
34
+ Requires-Dist: lxml-html-clean; extra == "docs"
35
+ Requires-Dist: pandoc<=3.0.0; extra == "docs"
36
+ Requires-Dist: platformdirs<=3.2.0; extra == "docs"
37
+ Requires-Dist: setuptools-scm<=7.1.0; extra == "docs"
38
+ Requires-Dist: sphinx<=6.2.1; extra == "docs"
39
+ Requires-Dist: sphinx-autodoc-typehints<=1.23.0; extra == "docs"
40
+ Requires-Dist: sphinx-rtd-theme<0.5; extra == "docs"
41
+ Provides-Extra: examples
42
+ Requires-Dist: ipython; extra == "examples"
43
+ Requires-Dist: jupyter; extra == "examples"
44
+ Requires-Dist: matplotlib; extra == "examples"
45
+ Requires-Dist: scipy; extra == "examples"
46
+ Requires-Dist: torchvision; extra == "examples"
47
+ Requires-Dist: tqdm; extra == "examples"
48
+ Provides-Extra: keops
49
+ Requires-Dist: pykeops>=1.1.1; extra == "keops"
50
+ Provides-Extra: pyro
51
+ Requires-Dist: pyro-ppl>=1.8; extra == "pyro"
52
+ Provides-Extra: test
53
+ Requires-Dist: flake8==4.0.1; extra == "test"
54
+ Requires-Dist: flake8-print==4.0.0; extra == "test"
55
+ Requires-Dist: pytest; extra == "test"
56
+ Requires-Dist: nbval; extra == "test"
57
+
58
+ # Q<sup style="font-size: 0.5em;">&#9428;</sup>PyTorch
59
+
60
+ ---
61
+ [![Test Suite](https://github.com/lanzithinking/qepytorch/actions/workflows/run_test_suite.yml/badge.svg)](https://github.com/lanzithinking/qepytorch/actions/workflows/run_test_suite.yml)
62
+ [![Documentation Status](https://readthedocs.org/projects/qepytorch/badge/?version=latest)](https://qepytorch.readthedocs.io/en/latest/?badge=latest)
63
+ [![License](https://img.shields.io/badge/license-MIT-green.svg)](LICENSE)
64
+
65
+ [![Python Version](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
66
+ [![Conda](https://img.shields.io/conda/v/conda-forge/qpytorch.svg)](https://anaconda.org/conda-forge/qpytorch)
67
+ [![PyPI](https://img.shields.io/pypi/v/qpytorch.svg)](https://pypi.org/project/qpytorch)
68
+
69
+ Q<sup style="font-size: 0.5em;">&#9428;</sup>PyTorch is a Q-exponential process library implemented using PyTorch built on [GPyTorch](https://gpytorch.ai). Q<sup style="font-size: 0.5em;">&#9428;</sup>PyTorch is designed for creating scalable, flexible, and modular Q-exponential process models with ease.
70
+
71
+ Internally, Q<sup style="font-size: 0.5em;">&#9428;</sup>PyTorch differs from many existing approaches to QEP inference by performing most inference operations using numerical linear algebra techniques like preconditioned conjugate gradients.
72
+ Implementing a scalable QEP method is as simple as providing a matrix multiplication routine with the kernel matrix and its derivative via our [LinearOperator](https://github.com/cornellius-gp/linear_operator) interface,
73
+ or by composing many of our already existing `LinearOperators`.
74
+ This allows not only for easy implementation of popular scalable QEP techniques,
75
+ but often also for significantly improved utilization of GPU computing compared to solvers based on the Cholesky decomposition.
76
+
77
+ Q<sup style="font-size: 0.5em;">&#9428;</sup>PyTorch provides (1) significant GPU acceleration (through MVM based inference);
78
+ (2) state-of-the-art implementations of the latest algorithmic advances for scalability and flexibility ([SKI/KISS-GP](http://proceedings.mlr.press/v37/wilson15.pdf), [stochastic Lanczos expansions](https://arxiv.org/abs/1711.03481), [LOVE](https://arxiv.org/pdf/1803.06058.pdf), [SKIP](https://arxiv.org/pdf/1802.08903.pdf), [stochastic variational](https://arxiv.org/pdf/1611.00336.pdf) [deep kernel learning](http://proceedings.mlr.press/v51/wilson16.pdf), ...);
79
+ (3) easy integration with deep learning frameworks.
80
+
81
+
82
+ ## Examples, Tutorials, and Documentation
83
+
84
+ See our [**documentation, examples, tutorials**](https://qepytorch.readthedocs.io/en/stable/) on how to construct all sorts of models in Q<sup style="font-size: 0.5em;">&#9428;</sup>PyTorch.
85
+
86
+ ## Installation
87
+
88
+ **Requirements**:
89
+ - Python >= 3.10
90
+ - PyTorch >= 2.2
91
+ - GPyTorch >= 1.13
92
+
93
+ Install Q<sup style="font-size: 0.5em;">&#9428;</sup>PyTorch using pip or conda:
94
+
95
+ ```bash
96
+ pip install qpytorch
97
+ conda install qpytorch -c qpytorch
98
+ ```
99
+
100
+ (To use packages globally but install Q<sup style="font-size: 0.5em;">&#9428;</sup>PyTorch as a user-only package, use `pip install --user` above.)
101
+
102
+ #### Latest (Unstable) Version
103
+
104
+ To upgrade to the latest (unstable) version, run
105
+
106
+ ```bash
107
+ pip install --upgrade git+https://github.com/cornellius-gp/linear_operator.git
108
+ pip install --upgrade git+https://github.com/cornellius-gp/gpytorch.git
109
+ pip install --upgrade git+https://github.com/lanzithinking/qepytorch.git
110
+ ```
111
+
112
+ #### Development version
113
+
114
+ If you are contributing a pull request, it is best to perform a manual installation:
115
+
116
+ ```sh
117
+ git clone https://github.com/lanzithinking/qepytorch.git qpytorch
118
+ cd qpytorch
119
+ pip install -e .[dev,docs,examples,keops,pyro,test] # keops and pyro are optional
120
+ ```
121
+
122
+ <!--
123
+ #### ArchLinux Package
124
+ **Note**: Experimental AUR package. For most users, we recommend installation by conda or pip.
125
+ -->
126
+ <!--
127
+ Q<sup style="font-size: 0.5em;">&#9428;</sup>PyTorch is also available on the [ArchLinux User Repository](https://wiki.archlinux.org/index.php/Arch_User_Repository) (AUR).
128
+ You can install it with an [AUR helper](https://wiki.archlinux.org/index.php/AUR_helpers), like [`yay`](https://aur.archlinux.org/packages/yay/), as follows:
129
+ -->
130
+ <!--
131
+ ```bash
132
+ yay -S python-qpytorch
133
+ ```
134
+ To discuss any issues related to this AUR package refer to the comments section of
135
+ [`python-qpytorch`](https://aur.archlinux.org/packages/python-qpytorch/).
136
+ -->
137
+
138
+ ## Citing Us
139
+
140
+ If you use Q<sup style="font-size: 0.5em;">&#9428;</sup>PyTorch, please cite the following papers:
141
+ > [Li, Shuyi, Michael O'Connor, and Shiwei Lan. "Bayesian Learning via Q-Exponential Process." In Advances in Neural Information Processing Systems (2023).](https://papers.nips.cc/paper_files/paper/2023/hash/e6bfdd58f1326ff821a1b92743963bdf-Abstract-Conference.html)
142
+ ```
143
+ @inproceedings{li2023QEP,
144
+ title={Bayesian Learning via Q-Exponential Process},
145
+ author={Li, Shuyi, Michael O'Connor, and Shiwei Lan},
146
+ booktitle={Advances in Neural Information Processing Systems},
147
+ year={2023}
148
+ }
149
+ ```
150
+
151
+ ## Contributing
152
+
153
+ See the contributing guidelines [CONTRIBUTING.md](https://github.com/lanzithinking/qepytorch/blob/main/CONTRIBUTING.md)
154
+ for information on submitting issues and pull requests.
155
+
156
+
157
+ ## The Team
158
+
159
+ Q<sup style="font-size: 0.5em;">&#9428;</sup>PyTorch is primarily maintained by:
160
+ - [Shiwei Lan](https://math.la.asu.edu/~slan) (Arizona State University)
161
+
162
+ We would like to thank our other contributors including (but not limited to)
163
+ Shuyi Li,
164
+ Guangting Yu,
165
+ Zhi Chang,
166
+ Chukwudi Paul Obite,
167
+ Keyan Wu,
168
+ and many more!
169
+
170
+
171
+ ## Acknowledgements
172
+ Development of Q<sup style="font-size: 0.5em;">&#9428;</sup>PyTorch is supported by.
173
+
174
+
175
+ ## License
176
+
177
+ Q<sup style="font-size: 0.5em;">&#9428;</sup>PyTorch is [MIT licensed](https://github.com/lanzithinking/qepytorch/blob/main/LICENSE).
@@ -0,0 +1,102 @@
1
+ qpytorch/__init__.py,sha256=0sD7DfLvEXx3PvqrTyl5bOYprus1Wjzxs1hOzTbQx6s,12392
2
+ qpytorch/module.py,sha256=xjw6X-v3_iVt4cOUEQdQp-cbTKkLCqwaeJoUardRQSQ,1259
3
+ qpytorch/version.py,sha256=Ljuux5HvS-yDzAhpzt_WxHJ6Ct4dLQiCUJg2yT5NuzE,155
4
+ qpytorch/constraints/__init__.py,sha256=JS4bzSGrgAiDtOM_Y4rerzkoMAUt671E2U1SVakhfX4,138
5
+ qpytorch/distributions/__init__.py,sha256=OSVX5Lfb72NKVFWVku0t-D2RyZuVcJAIICndOX9f5EA,973
6
+ qpytorch/distributions/delta.py,sha256=AC-N5fhBQad872jl8q9PMLq46E5qwcQOXkRf1PSW-Cg,3507
7
+ qpytorch/distributions/multitask_multivariate_qexponential.py,sha256=mttRjFp_hx-Sjxi-061sRuGeFmpA_hQFLx6cYgnUBqc,21584
8
+ qpytorch/distributions/multivariate_qexponential.py,sha256=BUBPyEqoXR7E1VY3uW-DbWSr-h-94WXGxLzkKXluR3Q,28885
9
+ qpytorch/distributions/power.py,sha256=a1_MMLJk-P1UcLNRtoquoBcbl1Rpwq_6UvlJR-8RPY0,3777
10
+ qpytorch/distributions/qexponential.py,sha256=9O_uquDARLSamDnEsbaJJ3GZbtY3Mv2PqEiwVMwLU2M,5969
11
+ qpytorch/functions/__init__.py,sha256=rLKoyZQNW0DttCQGAxu5SgKOeZZXsp1wJnABi5WL5kY,1655
12
+ qpytorch/kernels/__init__.py,sha256=2TlJ4Vq8mGg4KE19K8OfQwZf3A5hRiUm53KdX7ZUkCc,3229
13
+ qpytorch/kernels/grid_interpolation_kernel.py,sha256=l0t0pJ0EWHaCaP_rkJo1BL63gEoKAIX3kYc8rQFWjN8,9108
14
+ qpytorch/kernels/inducing_point_kernel.py,sha256=yRp2_w0q8HkOCb4sbuVEebdlGnRhR9cQtYir5cr-6lo,5451
15
+ qpytorch/kernels/kernel.py,sha256=UBBTelSm2nFS1JU2kfaXMzlWCKEdbrhMh6aKqh3_1_w,29415
16
+ qpytorch/kernels/matern32_kernel_grad.py,sha256=W5P5XSWTdeHdw9zrbpInOyIUhCFW_Z86CJ9D1biK6MY,7050
17
+ qpytorch/kernels/matern52_kernel_grad.py,sha256=mBkwuGBiVFudJ9zNfcoMn7_j9mbBgm37w853njZK48o,8638
18
+ qpytorch/kernels/matern52_kernel_gradgrad.py,sha256=1jc012aYCWAT2qNBhW60fM0IKberW5qLkvptAhXq9ys,12846
19
+ qpytorch/kernels/polynomial_kernel_grad.py,sha256=vM8y00lEGz4Z-ceZ3wJ33pL5rD16gehUpYjJ-PVnPbE,3708
20
+ qpytorch/kernels/qexponential_symmetrized_kl_kernel.py,sha256=pMZpjU3phGEBQ_O7K9sRFQgwfX5rMtaowsVteJ9hdr4,2932
21
+ qpytorch/kernels/rbf_kernel_grad.py,sha256=bVUfbkn1k01X4cix2XTTWJZXtezIX4JBtvKpHkB_C5s,5917
22
+ qpytorch/kernels/rbf_kernel_gradgrad.py,sha256=0v17TMsxURKPigOvYLGy7OwHeOleTBXhk5UaE2WUpv8,9392
23
+ qpytorch/kernels/rff_kernel.py,sha256=kmP4U2mJX_OtjhAdlevxQfbBpIfOCNKSPAGcrQJiTSo,5967
24
+ qpytorch/lazy/__init__.py,sha256=9O54rlcJMXrzGs5BWcLCAAuNluQVS2FxiTgKkBRQwCQ,189
25
+ qpytorch/likelihoods/__init__.py,sha256=knFtCZOf5hLgX3XWEBnkf-RfY0NFJ9gs7aEiG0ex3es,2291
26
+ qpytorch/likelihoods/bernoulli_likelihood.py,sha256=TuARyzTBS9CXJ_gTPHbshozH1rUFFSaqD0WD9Vf8J6c,2938
27
+ qpytorch/likelihoods/beta_likelihood.py,sha256=uPixvFYX_ILVm44rl1uNSPoaRf5v83QMN8ta35YY_rA,2688
28
+ qpytorch/likelihoods/gaussian_likelihood.py,sha256=OI4vi-Bo_ta08jJ_fsnJuc2oNP4YsVFYs8RfafsCbaI,19525
29
+ qpytorch/likelihoods/laplace_likelihood.py,sha256=WAWnM-JBn-jKoLS-_294CWe0o4y70E5IxWhP-C2vXCg,2002
30
+ qpytorch/likelihoods/likelihood.py,sha256=4b5CvQrH0JWOXPQnP-qVw9iZnVj-G0WE4bjUVkVOGiI,22659
31
+ qpytorch/likelihoods/likelihood_list.py,sha256=eHmI0Vub8A88OvRCOU2yD__AVFcMWXOuS5HvqT164eE,2119
32
+ qpytorch/likelihoods/multitask_gaussian_likelihood.py,sha256=rkEAW1eRn3IjdL7rFS8gsupzJG7MGXM1LhkaNZ7D-Ho,24779
33
+ qpytorch/likelihoods/multitask_qexponential_likelihood.py,sha256=0u5HNN-oM-nJ-B6k6j5snGNJ-3SQyHSxuXFJC_g8tZg,25238
34
+ qpytorch/likelihoods/noise_models.py,sha256=0ebjtb7A3P1WbXMsXe2XMcSNxyKMpSh85_3DNjMeYdQ,8166
35
+ qpytorch/likelihoods/qexponential_likelihood.py,sha256=mrFWQ84s3fWXziyjtIkS52ndCcc0Xe6q_MW7irXCHrQ,21357
36
+ qpytorch/likelihoods/softmax_likelihood.py,sha256=_Q6K3unAgoOSWkZSGeK84-RqoaIz4k6ihf2DGAWqO1k,4558
37
+ qpytorch/likelihoods/student_t_likelihood.py,sha256=Iv7IGu0K_xkjgC0N6SGaII-Vov8QtQzjtQPmD5WN6vE,3364
38
+ qpytorch/means/__init__.py,sha256=7Gj9e8VijlDYJEWXYU2AKWOgkw-SwzM_qgnODNWifhk,729
39
+ qpytorch/metrics/__init__.py,sha256=BLnN_PXczngBRxvsQEZ6Mw_cf345wzSnlUA5m1y-rV8,428
40
+ qpytorch/mlls/__init__.py,sha256=HnB2efNG-hWbECBprxoqoWvvZPnI2fajNEfHbzV-pNI,2066
41
+ qpytorch/mlls/_approximate_mll.py,sha256=RaEaQAhf5Bkca0t6E3ix7ctz84AmgK3PXx500QxrLUM,3592
42
+ qpytorch/mlls/deep_approximate_mll.py,sha256=TZxH3M-whckfZbL3s9XBsMvpGHeHfGNwT0xTv0MO93I,1236
43
+ qpytorch/mlls/deep_predictive_log_likelihood.py,sha256=3sxp9-fSIWt8V_dQG_ayrnvLgN-Ve9A-4c7JEY8j6A0,1377
44
+ qpytorch/mlls/exact_marginal_log_likelihood.py,sha256=dd7yD6wskbbqrNTlSxoR_mEQ8WmJH6ILAWHeZ1omCcg,4799
45
+ qpytorch/mlls/gamma_robust_variational_elbo.py,sha256=6NB6FtkEy5YL6r75kMqMptkROpGWh0QeV540rlNE2vI,4816
46
+ qpytorch/mlls/inducing_point_kernel_added_loss_term.py,sha256=ZQvNip6k95iV-Bdy6RdhsCHWS3KVdB5jXeg5Zi1IcU4,3824
47
+ qpytorch/mlls/kl_qexponential_added_loss_term.py,sha256=9tT2CWck5uzwmHuFjkm1eHuovLBvlQXGbBctu-GuOik,1541
48
+ qpytorch/mlls/leave_one_out_pseudo_likelihood.py,sha256=ZXQ6Rj-zHiD2Mjqm_rk_mcVTgA5otpiMSVNJtFYq1bI,3989
49
+ qpytorch/mlls/marginal_log_likelihood.py,sha256=BjuM9apQ7b79x6jdjliK_h24O8Yy9zMMIwLlveFdJhs,2187
50
+ qpytorch/mlls/predictive_log_likelihood.py,sha256=tGGkzu29Cs91BeDxCRC_s-l3idhbHuus3XTPni0SlL0,4015
51
+ qpytorch/mlls/sum_marginal_log_likelihood.py,sha256=la3teD1VC_Xh4FUCyIGphWkRkdTzuMtgcj2R1NA2fc4,1641
52
+ qpytorch/mlls/variational_elbo.py,sha256=M9H-nKRsQsfpHewmUihePzcGlKe_fPqnLQWoPl2nrqI,4082
53
+ qpytorch/models/__init__.py,sha256=IybxlfX2XGo8bOoQE7BPlv9G-fqP6EnkVZHnS01nbJ8,2103
54
+ qpytorch/models/approximate_qep.py,sha256=UxLcwzP1ykDKuLKFIZxljanQ3Mw7Gk_K67W341nOJd8,5577
55
+ qpytorch/models/exact_prediction_strategies.py,sha256=gzTjc8rjiHTaXk0STKl-HocrOxzyWGfXV9Iq0DwYJUU,43305
56
+ qpytorch/models/exact_qep.py,sha256=qPSxipLTa0KRJUcBpf85XXOVP2QItYQKnbxw4n9zcnc,16910
57
+ qpytorch/models/model_list.py,sha256=oAol6X6S4-Pd6NHt-El9R3dAL09m4fITHWJFw9Y7qnY,3342
58
+ qpytorch/models/qep.py,sha256=RLoLOs7XwLCRT2zDbief8le2vVExTnOWUIqqIhyB0Po,82
59
+ qpytorch/models/deep_qeps/__init__.py,sha256=GblwsDly1YxZMZVMz91aIxDWJdRL4ndr-PMYNldtDKo,694
60
+ qpytorch/models/deep_qeps/deep_qep.py,sha256=IyW4udmQZtrp8TVf79vU5p6pQxfaJiW7GW4dxSPFVyg,6651
61
+ qpytorch/models/deep_qeps/dspp.py,sha256=xOb63PWU5eYlV6Im36meWXl3S9bSSOHZGMdPW5wvc1E,5155
62
+ qpytorch/models/pyro/__init__.py,sha256=48AjdCGvlLHRjHggbSH6zZtMXBnbRjYjWxn5KyEmWHk,1024
63
+ qpytorch/models/pyro/_pyro_mixin.py,sha256=Lj9wdVDEVp1eAhYqNbykT7Cy3j6tobRkn9CNOFAMMy4,2790
64
+ qpytorch/models/pyro/pyro_qep.py,sha256=RSnuPxVzHhQWnfz61Tno_xsKhVp8otJ143mO4evun18,4655
65
+ qpytorch/models/pyro/distributions/__init__.py,sha256=RiVEFSDwU_4auqTBy6WJlEpB_0nN35OH1p1ps9ng6j8,108
66
+ qpytorch/models/qeplvm/__init__.py,sha256=GtRJb6AknTNT0epZacscrcu9itNW24GXFh0gwFw_QMM,266
67
+ qpytorch/models/qeplvm/bayesian_qeplvm.py,sha256=Czs6yfBxNiD3a4AwAErw6gXA0b16eUIOOS1Kn7pbvxA,1625
68
+ qpytorch/models/qeplvm/latent_variable.py,sha256=Oz0MZNg0_GPsMr0h0K8rrFPjUr76GgfJamuUQAUCFWc,3659
69
+ qpytorch/optim/__init__.py,sha256=CcL6qsxuzrGMYWb4Tj2bhu6oNCMMZoBxfXkl162nskw,78
70
+ qpytorch/priors/__init__.py,sha256=FQlqEd7C9Uib44inp4HUq-2o8wcLcdLUAgrY1GxUSmU,1031
71
+ qpytorch/priors/qep_priors.py,sha256=9KFm62XuceheMAdHtSqOdq3O2xqtzdStaoCYp6giSMk,2840
72
+ qpytorch/test/__init__.py,sha256=HNZWJRqUxAFxegg4etHcYSw4Zu9XBbe1dxIzNbhWkNY,730
73
+ qpytorch/test/base_likelihood_test_case.py,sha256=_XtpflusvDOuhzEB0hElgQdB5QkIMcCaq3SdjXVmg6k,4634
74
+ qpytorch/test/model_test_case.py,sha256=V0H50k6NAl7ikn5M6orFW3ORukIKpLO-2ECn352hqHA,6405
75
+ qpytorch/test/variational_test_case.py,sha256=8WA2Kr5krlWgAsACHwsnOpgfRrUyPPGn-iXrEkNQnTc,17085
76
+ qpytorch/utils/__init__.py,sha256=DlpoQfY0COkeWmW8f0KMZyNsNiJMBEs5Z-nlbs8pdxo,1001
77
+ qpytorch/utils/warnings.py,sha256=HMrlnT1cK6twx4m1dTHnqx2PTBvuzmqxk567YxYk8co,826
78
+ qpytorch/variational/__init__.py,sha256=b8T3yzFTPMhssoPmgygacD-Zh55b7SFqLLUtTB-BIpU,2367
79
+ qpytorch/variational/_variational_distribution.py,sha256=ZFKFbvV0-USC8NAWAdXN-_0HpGdwM9K66sBm7eTXnh0,2014
80
+ qpytorch/variational/_variational_strategy.py,sha256=fCFUSuTOjRrB-tHztC93_4sczQvILTN4WS68Ot1iJUA,18736
81
+ qpytorch/variational/additive_grid_interpolation_variational_strategy.py,sha256=i9qRRCLlv341wGHdcqX8oesBtT10qmMZvn-U_YuhpyI,3984
82
+ qpytorch/variational/batch_decoupled_variational_strategy.py,sha256=kc2Z3NVbEuaWZ_hHxfSX2jKyuYsPzI6QPrVIzHRUxrg,12998
83
+ qpytorch/variational/cholesky_variational_distribution.py,sha256=7RDdUqu3iwr0EQySJ-OHQOQq4_LlU8rC2LioW_dTRD0,3347
84
+ qpytorch/variational/ciq_variational_strategy.py,sha256=UR2_SILO7MunHQX30O_mUiEylZxBhHiWBAE-k7Ps6Pc,15481
85
+ qpytorch/variational/delta_variational_distribution.py,sha256=OBhbMthojkxFKbdArRkrqeHrGrZbfd_8h9PMgwWlJsg,1924
86
+ qpytorch/variational/grid_interpolation_variational_strategy.py,sha256=a_Ow8XVup5FwHXaq2pOC9svKNVwqRMvRRGwRf6fP754,5435
87
+ qpytorch/variational/independent_multitask_variational_strategy.py,sha256=nm7T_FykWdRp0BS2sumy48WRW05YCRmbSECw-4k2hU8,5091
88
+ qpytorch/variational/lmc_variational_strategy.py,sha256=qx_jvmWP0p2Fkf7SP7I5f-_LIs3ti-pOlwu2x74vhcI,12646
89
+ qpytorch/variational/mean_field_variational_distribution.py,sha256=DtH7thw7llrnjpuoOukb4bGTZTpuOyaDik8nPJZzyss,3241
90
+ qpytorch/variational/multitask_variational_strategy.py,sha256=DTMIqlu7WfxOejK4YL5T3OwYddmihorFMwQlOyQl1m8,16165
91
+ qpytorch/variational/natural_variational_distribution.py,sha256=RT1NqZbTjGAbRfiOKp7rgQ2ACwMz3WG1xOX2yR1bzKE,6557
92
+ qpytorch/variational/nearest_neighbor_variational_strategy.py,sha256=RAtEVpZSOeuQk8v-r8GZKl_at-piNScBdhCGjX1gDQY,26072
93
+ qpytorch/variational/orthogonally_decoupled_variational_strategy.py,sha256=I2YL4M5BIzaVdZfGKFYSyhBGPv_9POQkYmvqdbrI7UQ,6024
94
+ qpytorch/variational/tril_natural_variational_distribution.py,sha256=3wPggMUY2935HTO1mW2GH7ceDKOHQsSDaIbt05W72ec,6020
95
+ qpytorch/variational/uncorrelated_multitask_variational_strategy.py,sha256=LdiVacUdQZ-BJDGcdHwlZ_uIVPfRl1WAgEtfs3ahxdg,5176
96
+ qpytorch/variational/unwhitened_variational_strategy.py,sha256=068ScAOHk0VHLfkwakR41vrpASqIDjvvnhOFYIEE9uU,10713
97
+ qpytorch/variational/variational_strategy.py,sha256=TD_rPpQL2n7bK869tICT6cA0cc0YFgqi0AsHfyVo2Zc,13155
98
+ qpytorch-0.1.dist-info/LICENSE,sha256=QcK8fAvGl70vlwIHUqKdi4oV_SvhC6lBGYXTR1znTsY,1067
99
+ qpytorch-0.1.dist-info/METADATA,sha256=pOhOSsJjOL6qc4tSYFILwL-ZUN9dKgS4OhbqfjWfDHs,7986
100
+ qpytorch-0.1.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
101
+ qpytorch-0.1.dist-info/top_level.txt,sha256=WZP9m4PVYtj2RhzbzmW4UqUGOy-sOfumPrjnvNFrv4Q,9
102
+ qpytorch-0.1.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.45.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ qpytorch