CUQIpy 1.2.0.post0.dev109__tar.gz → 1.2.0.post0.dev245__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of CUQIpy might be problematic. Click here for more details.
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/CUQIpy.egg-info/PKG-INFO +1 -1
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/CUQIpy.egg-info/SOURCES.txt +1 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/PKG-INFO +1 -1
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/_version.py +3 -3
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/mcmc/__init__.py +1 -1
- cuqipy-1.2.0.post0.dev245/cuqi/experimental/mcmc/_langevin_algorithm.py +389 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/implicitprior/__init__.py +2 -0
- cuqipy-1.2.0.post0.dev245/cuqi/implicitprior/_restorator.py +223 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_implicit_priors.py +36 -0
- cuqipy-1.2.0.post0.dev109/cuqi/experimental/mcmc/_langevin_algorithm.py +0 -233
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/CUQIpy.egg-info/dependency_links.txt +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/CUQIpy.egg-info/requires.txt +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/CUQIpy.egg-info/top_level.txt +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/LICENSE +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/README.md +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/_messages.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/array/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/array/_array.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/config.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/data/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/data/_data.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/data/astronaut.npz +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/data/camera.npz +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/data/cat.npz +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/data/cookie.png +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/data/satellite.mat +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/density/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/density/_density.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/diagnostics.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_beta.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_cauchy.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_cmrf.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_custom.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_distribution.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_gamma.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_gaussian.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_gmrf.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_inverse_gamma.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_joint_distribution.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_laplace.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_lmrf.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_lognormal.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_modifiedhalfnormal.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_normal.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_posterior.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_smoothed_laplace.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_truncated_normal.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/distribution/_uniform.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/mcmc/_conjugate.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/mcmc/_conjugate_approx.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/mcmc/_cwmh.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/mcmc/_direct.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/mcmc/_gibbs.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/mcmc/_hmc.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/mcmc/_laplace_approximation.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/mcmc/_mh.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/mcmc/_pcn.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/mcmc/_rto.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/mcmc/_sampler.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/experimental/mcmc/_utilities.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/geometry/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/geometry/_geometry.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/implicitprior/_regularizedGMRF.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/implicitprior/_regularizedGaussian.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/implicitprior/_regularizedUnboundedUniform.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/likelihood/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/likelihood/_likelihood.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/model/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/model/_model.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/operator/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/operator/_operator.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/pde/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/pde/_pde.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/problem/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/problem/_problem.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/sampler/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/sampler/_conjugate.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/sampler/_conjugate_approx.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/sampler/_cwmh.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/sampler/_gibbs.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/sampler/_hmc.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/sampler/_langevin_algorithm.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/sampler/_laplace_approximation.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/sampler/_mh.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/sampler/_pcn.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/sampler/_rto.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/sampler/_sampler.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/samples/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/samples/_samples.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/solver/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/solver/_solver.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/testproblem/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/testproblem/_testproblem.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/utilities/__init__.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/utilities/_get_python_variable_name.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/cuqi/utilities/_utilities.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/pyproject.toml +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/requirements.txt +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/setup.cfg +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/setup.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_MRFs.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_abstract_distribution_density.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_bayesian_inversion.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_density.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_distribution.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_distributions_shape.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_geometry.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_joint_distribution.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_likelihood.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_model.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_pde.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_posterior.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_problem.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_sampler.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_samples.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_solver.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_testproblem.py +0 -0
- {cuqipy-1.2.0.post0.dev109 → cuqipy-1.2.0.post0.dev245}/tests/test_utilities.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: CUQIpy
|
|
3
|
-
Version: 1.2.0.post0.
|
|
3
|
+
Version: 1.2.0.post0.dev245
|
|
4
4
|
Summary: Computational Uncertainty Quantification for Inverse problems in Python
|
|
5
5
|
Maintainer-email: "Nicolai A. B. Riis" <nabr@dtu.dk>, "Jakob S. Jørgensen" <jakj@dtu.dk>, "Amal M. Alghamdi" <amaal@dtu.dk>, Chao Zhang <chaz@dtu.dk>
|
|
6
6
|
License: Apache License
|
|
@@ -65,6 +65,7 @@ cuqi/implicitprior/__init__.py
|
|
|
65
65
|
cuqi/implicitprior/_regularizedGMRF.py
|
|
66
66
|
cuqi/implicitprior/_regularizedGaussian.py
|
|
67
67
|
cuqi/implicitprior/_regularizedUnboundedUniform.py
|
|
68
|
+
cuqi/implicitprior/_restorator.py
|
|
68
69
|
cuqi/likelihood/__init__.py
|
|
69
70
|
cuqi/likelihood/_likelihood.py
|
|
70
71
|
cuqi/model/__init__.py
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: CUQIpy
|
|
3
|
-
Version: 1.2.0.post0.
|
|
3
|
+
Version: 1.2.0.post0.dev245
|
|
4
4
|
Summary: Computational Uncertainty Quantification for Inverse problems in Python
|
|
5
5
|
Maintainer-email: "Nicolai A. B. Riis" <nabr@dtu.dk>, "Jakob S. Jørgensen" <jakj@dtu.dk>, "Amal M. Alghamdi" <amaal@dtu.dk>, Chao Zhang <chaz@dtu.dk>
|
|
6
6
|
License: Apache License
|
|
@@ -8,11 +8,11 @@ import json
|
|
|
8
8
|
|
|
9
9
|
version_json = '''
|
|
10
10
|
{
|
|
11
|
-
"date": "2024-11-
|
|
11
|
+
"date": "2024-11-08T12:37:05+0100",
|
|
12
12
|
"dirty": false,
|
|
13
13
|
"error": null,
|
|
14
|
-
"full-revisionid": "
|
|
15
|
-
"version": "1.2.0.post0.
|
|
14
|
+
"full-revisionid": "113dd1dc30ade5f182e79d003153bcce9aee1894",
|
|
15
|
+
"version": "1.2.0.post0.dev245"
|
|
16
16
|
}
|
|
17
17
|
''' # END VERSION_JSON
|
|
18
18
|
|
|
@@ -109,7 +109,7 @@ Main changes for users
|
|
|
109
109
|
|
|
110
110
|
|
|
111
111
|
from ._sampler import Sampler, ProposalBasedSampler
|
|
112
|
-
from ._langevin_algorithm import ULA, MALA
|
|
112
|
+
from ._langevin_algorithm import ULA, MALA, MYULA, PnPULA
|
|
113
113
|
from ._mh import MH
|
|
114
114
|
from ._pcn import PCN
|
|
115
115
|
from ._rto import LinearRTO, RegularizedLinearRTO
|
|
@@ -0,0 +1,389 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import cuqi
|
|
3
|
+
from cuqi.experimental.mcmc import Sampler
|
|
4
|
+
from cuqi.implicitprior import RestorationPrior, MoreauYoshidaPrior
|
|
5
|
+
from cuqi.array import CUQIarray
|
|
6
|
+
from copy import deepcopy
|
|
7
|
+
|
|
8
|
+
class ULA(Sampler): # Refactor to Proposal-based sampler?
|
|
9
|
+
"""Unadjusted Langevin algorithm (ULA) (Roberts and Tweedie, 1996)
|
|
10
|
+
|
|
11
|
+
It approximately samples a distribution given its logpdf gradient based on
|
|
12
|
+
the Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt, where
|
|
13
|
+
W_t is the `dim`-dimensional standard Brownian motion.
|
|
14
|
+
ULA results from the Euler-Maruyama discretization of this Langevin stochastic
|
|
15
|
+
differential equation (SDE).
|
|
16
|
+
|
|
17
|
+
For more details see: Roberts, G. O., & Tweedie, R. L. (1996). Exponential convergence
|
|
18
|
+
of Langevin distributions and their discrete approximations. Bernoulli, 341-363.
|
|
19
|
+
|
|
20
|
+
Parameters
|
|
21
|
+
----------
|
|
22
|
+
|
|
23
|
+
target : `cuqi.distribution.Distribution`
|
|
24
|
+
The target distribution to sample. Must have logd and gradient method. Custom logpdfs
|
|
25
|
+
and gradients are supported by using a :class:`cuqi.distribution.UserDefinedDistribution`.
|
|
26
|
+
|
|
27
|
+
initial_point : ndarray
|
|
28
|
+
Initial parameters. *Optional*
|
|
29
|
+
|
|
30
|
+
scale : float
|
|
31
|
+
The Langevin diffusion discretization time step (In practice, scale must
|
|
32
|
+
be smaller than 1/L, where L is the Lipschitz of the gradient of the log
|
|
33
|
+
target density, logd).
|
|
34
|
+
|
|
35
|
+
callback : callable, *Optional*
|
|
36
|
+
If set this function will be called after every sample.
|
|
37
|
+
The signature of the callback function is `callback(sample, sample_index)`,
|
|
38
|
+
where `sample` is the current sample and `sample_index` is the index of the sample.
|
|
39
|
+
An example is shown in demos/demo31_callback.py.
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
Example
|
|
43
|
+
-------
|
|
44
|
+
.. code-block:: python
|
|
45
|
+
|
|
46
|
+
# Parameters
|
|
47
|
+
dim = 5 # Dimension of distribution
|
|
48
|
+
mu = np.arange(dim) # Mean of Gaussian
|
|
49
|
+
std = 1 # standard deviation of Gaussian
|
|
50
|
+
|
|
51
|
+
# Logpdf function
|
|
52
|
+
logpdf_func = lambda x: -1/(std**2)*np.sum((x-mu)**2)
|
|
53
|
+
gradient_func = lambda x: -2/(std**2)*(x - mu)
|
|
54
|
+
|
|
55
|
+
# Define distribution from logpdf and gradient as UserDefinedDistribution
|
|
56
|
+
target = cuqi.distribution.UserDefinedDistribution(dim=dim, logpdf_func=logpdf_func,
|
|
57
|
+
gradient_func=gradient_func)
|
|
58
|
+
|
|
59
|
+
# Set up sampler
|
|
60
|
+
sampler = cuqi.experimental.mcmc.ULA(target, scale=1/dim**2)
|
|
61
|
+
|
|
62
|
+
# Sample
|
|
63
|
+
sampler.sample(2000)
|
|
64
|
+
|
|
65
|
+
A Deblur example can be found in demos/demo27_ULA.py
|
|
66
|
+
# TODO: update demo once sampler merged
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
_STATE_KEYS = Sampler._STATE_KEYS.union({'scale', 'current_target_grad'})
|
|
70
|
+
|
|
71
|
+
def __init__(self, target=None, scale=1.0, **kwargs):
|
|
72
|
+
|
|
73
|
+
super().__init__(target, **kwargs)
|
|
74
|
+
self.initial_scale = scale
|
|
75
|
+
|
|
76
|
+
def _initialize(self):
|
|
77
|
+
self.scale = self.initial_scale
|
|
78
|
+
self.current_target_grad = self._eval_target_grad(self.current_point)
|
|
79
|
+
|
|
80
|
+
def validate_target(self):
|
|
81
|
+
try:
|
|
82
|
+
self._eval_target_grad(np.ones(self.dim))
|
|
83
|
+
pass
|
|
84
|
+
except (NotImplementedError, AttributeError):
|
|
85
|
+
raise ValueError("The target needs to have a gradient method")
|
|
86
|
+
|
|
87
|
+
def _eval_target_logd(self, x):
|
|
88
|
+
return None
|
|
89
|
+
|
|
90
|
+
def _eval_target_grad(self, x):
|
|
91
|
+
return self.target.gradient(x)
|
|
92
|
+
|
|
93
|
+
def _accept_or_reject(self, x_star, target_eval_star, target_grad_star):
|
|
94
|
+
"""
|
|
95
|
+
Accepts the proposed state and updates the sampler's state accordingly, i.e.,
|
|
96
|
+
current_point, current_target_eval, and current_target_grad_eval.
|
|
97
|
+
|
|
98
|
+
Parameters
|
|
99
|
+
----------
|
|
100
|
+
x_star :
|
|
101
|
+
The proposed state
|
|
102
|
+
|
|
103
|
+
target_eval_star:
|
|
104
|
+
The log likelihood evaluated at x_star
|
|
105
|
+
|
|
106
|
+
target_grad_star:
|
|
107
|
+
The gradient of log likelihood evaluated at x_star
|
|
108
|
+
|
|
109
|
+
Returns
|
|
110
|
+
-------
|
|
111
|
+
scalar
|
|
112
|
+
1 (accepted)
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
self.current_point = x_star
|
|
116
|
+
self.current_target_grad = target_grad_star
|
|
117
|
+
acc = 1
|
|
118
|
+
|
|
119
|
+
return acc
|
|
120
|
+
|
|
121
|
+
def step(self):
|
|
122
|
+
# propose state
|
|
123
|
+
xi = cuqi.distribution.Normal(mean=np.zeros(self.dim), std=np.sqrt(self.scale)).sample()
|
|
124
|
+
x_star = self.current_point + 0.5*self.scale*self.current_target_grad + xi
|
|
125
|
+
|
|
126
|
+
# evaluate target
|
|
127
|
+
target_eval_star = self._eval_target_logd(x_star)
|
|
128
|
+
target_grad_star = self._eval_target_grad(x_star)
|
|
129
|
+
|
|
130
|
+
# accept or reject proposal
|
|
131
|
+
acc = self._accept_or_reject(x_star, target_eval_star, target_grad_star)
|
|
132
|
+
|
|
133
|
+
return acc
|
|
134
|
+
|
|
135
|
+
def tune(self, skip_len, update_count):
|
|
136
|
+
pass
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class MALA(ULA): # Refactor to Proposal-based sampler?
|
|
140
|
+
""" Metropolis-adjusted Langevin algorithm (MALA) (Roberts and Tweedie, 1996)
|
|
141
|
+
|
|
142
|
+
Samples a distribution given its logd and gradient (up to a constant) based on
|
|
143
|
+
Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt,
|
|
144
|
+
W_t is the `dim`-dimensional standard Brownian motion.
|
|
145
|
+
A sample is firstly proposed by ULA and is then accepted or rejected according
|
|
146
|
+
to a Metropolis–Hastings step.
|
|
147
|
+
This accept-reject step allows us to remove the asymptotic bias of ULA.
|
|
148
|
+
|
|
149
|
+
For more details see: Roberts, G. O., & Tweedie, R. L. (1996). Exponential convergence
|
|
150
|
+
of Langevin distributions and their discrete approximations. Bernoulli, 341-363.
|
|
151
|
+
|
|
152
|
+
Parameters
|
|
153
|
+
----------
|
|
154
|
+
|
|
155
|
+
target : `cuqi.distribution.Distribution`
|
|
156
|
+
The target distribution to sample. Must have logpdf and gradient method. Custom logpdfs
|
|
157
|
+
and gradients are supported by using a :class:`cuqi.distribution.UserDefinedDistribution`.
|
|
158
|
+
|
|
159
|
+
initial_point : ndarray
|
|
160
|
+
Initial parameters. *Optional*
|
|
161
|
+
|
|
162
|
+
scale : float
|
|
163
|
+
The Langevin diffusion discretization time step (In practice, scale must
|
|
164
|
+
be smaller than 1/L, where L is the Lipschitz of the gradient of the log
|
|
165
|
+
target density, logd).
|
|
166
|
+
|
|
167
|
+
callback : callable, *Optional*
|
|
168
|
+
If set this function will be called after every sample.
|
|
169
|
+
The signature of the callback function is `callback(sample, sample_index)`,
|
|
170
|
+
where `sample` is the current sample and `sample_index` is the index of the sample.
|
|
171
|
+
An example is shown in demos/demo31_callback.py.
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
Example
|
|
175
|
+
-------
|
|
176
|
+
.. code-block:: python
|
|
177
|
+
|
|
178
|
+
# Parameters
|
|
179
|
+
dim = 5 # Dimension of distribution
|
|
180
|
+
mu = np.arange(dim) # Mean of Gaussian
|
|
181
|
+
std = 1 # standard deviation of Gaussian
|
|
182
|
+
|
|
183
|
+
# Logpdf function
|
|
184
|
+
logpdf_func = lambda x: -1/(std**2)*np.sum((x-mu)**2)
|
|
185
|
+
gradient_func = lambda x: -2/(std**2)*(x-mu)
|
|
186
|
+
|
|
187
|
+
# Define distribution from logpdf as UserDefinedDistribution (sample and gradients also supported)
|
|
188
|
+
target = cuqi.distribution.UserDefinedDistribution(dim=dim, logpdf_func=logpdf_func,
|
|
189
|
+
gradient_func=gradient_func)
|
|
190
|
+
|
|
191
|
+
# Set up sampler
|
|
192
|
+
sampler = cuqi.experimental.mcmc.MALA(target, scale=1/5**2)
|
|
193
|
+
|
|
194
|
+
# Sample
|
|
195
|
+
sampler.sample(2000)
|
|
196
|
+
|
|
197
|
+
A Deblur example can be found in demos/demo28_MALA.py
|
|
198
|
+
# TODO: update demo once sampler merged
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
_STATE_KEYS = ULA._STATE_KEYS.union({'current_target_logd'})
|
|
202
|
+
|
|
203
|
+
def _initialize(self):
|
|
204
|
+
super()._initialize()
|
|
205
|
+
self.current_target_logd = self.target.logd(self.current_point)
|
|
206
|
+
|
|
207
|
+
def _eval_target_logd(self, x):
|
|
208
|
+
return self.target.logd(x)
|
|
209
|
+
|
|
210
|
+
def _accept_or_reject(self, x_star, target_eval_star, target_grad_star):
|
|
211
|
+
"""
|
|
212
|
+
Accepts the proposed state according to a Metropolis step and updates
|
|
213
|
+
the sampler's state accordingly, i.e., current_point, current_target_eval,
|
|
214
|
+
and current_target_grad_eval.
|
|
215
|
+
|
|
216
|
+
Parameters
|
|
217
|
+
----------
|
|
218
|
+
x_star :
|
|
219
|
+
The proposed state
|
|
220
|
+
|
|
221
|
+
target_eval_star:
|
|
222
|
+
The log likelihood evaluated at x_star
|
|
223
|
+
|
|
224
|
+
target_grad_star:
|
|
225
|
+
The gradient of log likelihood evaluated at x_star
|
|
226
|
+
|
|
227
|
+
Returns
|
|
228
|
+
-------
|
|
229
|
+
scaler
|
|
230
|
+
1 if accepted, 0 otherwise
|
|
231
|
+
"""
|
|
232
|
+
log_target_ratio = target_eval_star - self.current_target_logd
|
|
233
|
+
log_prop_ratio = self._log_proposal(self.current_point, x_star, target_grad_star) \
|
|
234
|
+
- self._log_proposal(x_star, self.current_point, self.current_target_grad)
|
|
235
|
+
log_alpha = min(0, log_target_ratio + log_prop_ratio)
|
|
236
|
+
|
|
237
|
+
# accept/reject with Metropolis
|
|
238
|
+
acc = 0
|
|
239
|
+
log_u = np.log(np.random.rand())
|
|
240
|
+
if (log_u <= log_alpha) and \
|
|
241
|
+
(not np.isnan(target_eval_star)) and \
|
|
242
|
+
(not np.isinf(target_eval_star)):
|
|
243
|
+
self.current_point = x_star
|
|
244
|
+
self.current_target_logd = target_eval_star
|
|
245
|
+
self.current_target_grad = target_grad_star
|
|
246
|
+
acc = 1
|
|
247
|
+
return acc
|
|
248
|
+
|
|
249
|
+
def tune(self, skip_len, update_count):
|
|
250
|
+
pass
|
|
251
|
+
|
|
252
|
+
def _log_proposal(self, theta_star, theta_k, g_logpi_k):
|
|
253
|
+
mu = theta_k + ((self.scale)/2)*g_logpi_k
|
|
254
|
+
misfit = theta_star - mu
|
|
255
|
+
return -0.5*((1/(self.scale))*(misfit.T @ misfit))
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
class MYULA(ULA):
|
|
259
|
+
"""Moreau-Yoshida Unadjusted Langevin algorithm (MYUULA) (Durmus et al., 2018)
|
|
260
|
+
|
|
261
|
+
Samples a smoothed target distribution given its smoothed logpdf gradient.
|
|
262
|
+
It is based on the Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt,
|
|
263
|
+
where W_t is a `dim`-dimensional standard Brownian motion.
|
|
264
|
+
It targets a differentiable density (partially) smoothed by the Moreau-Yoshida
|
|
265
|
+
envelope. The smoothed target density can be made arbitrarily closed to the
|
|
266
|
+
true unsmoothed target density.
|
|
267
|
+
|
|
268
|
+
For more details see: Durmus, Alain, Eric Moulines, and Marcelo Pereyra.
|
|
269
|
+
"Efficient Bayesian
|
|
270
|
+
computation by proximal Markov chain Monte Carlo: when Langevin meets Moreau."
|
|
271
|
+
SIAM Journal on Imaging Sciences 11.1 (2018): 473-506.
|
|
272
|
+
|
|
273
|
+
Parameters
|
|
274
|
+
----------
|
|
275
|
+
|
|
276
|
+
target : `cuqi.distribution.Distribution`
|
|
277
|
+
The target distribution to sample from. The target distribution results from
|
|
278
|
+
a differentiable likelihood and prior of type RestorationPrior.
|
|
279
|
+
|
|
280
|
+
initial_point : ndarray
|
|
281
|
+
Initial parameters. *Optional*
|
|
282
|
+
|
|
283
|
+
scale : float
|
|
284
|
+
The Langevin diffusion discretization time step (In practice, scale must
|
|
285
|
+
be smaller than 1/L, where L is the Lipschitz of the gradient of the log
|
|
286
|
+
target density, logd).
|
|
287
|
+
|
|
288
|
+
smoothing_strength : float
|
|
289
|
+
This parameter controls the smoothing strength of MYULA.
|
|
290
|
+
|
|
291
|
+
callback : callable, *Optional*
|
|
292
|
+
If set this function will be called after every sample.
|
|
293
|
+
The signature of the callback function is `callback(sample, sample_index)`,
|
|
294
|
+
where `sample` is the current sample and `sample_index` is the index of
|
|
295
|
+
the sample.
|
|
296
|
+
An example is shown in demos/demo31_callback.py.
|
|
297
|
+
|
|
298
|
+
A Deblur example can be found in demos/howtos/myula.py
|
|
299
|
+
# TODO: update demo once sampler merged
|
|
300
|
+
"""
|
|
301
|
+
def __init__(self, target=None, scale=1.0, smoothing_strength=0.1, **kwargs):
|
|
302
|
+
self.smoothing_strength = smoothing_strength
|
|
303
|
+
super().__init__(target=target, scale=scale, **kwargs)
|
|
304
|
+
|
|
305
|
+
@Sampler.target.setter
|
|
306
|
+
def target(self, value):
|
|
307
|
+
""" Set the target density. Runs validation of the target. """
|
|
308
|
+
self._target = value
|
|
309
|
+
|
|
310
|
+
if self._target is not None:
|
|
311
|
+
# Create a smoothed target
|
|
312
|
+
self._smoothed_target = self._create_smoothed_target(value)
|
|
313
|
+
|
|
314
|
+
# Validate the target
|
|
315
|
+
self.validate_target()
|
|
316
|
+
|
|
317
|
+
def _create_smoothed_target(self, value):
|
|
318
|
+
""" Create a smoothed target using a Moreau-Yoshida envelope. """
|
|
319
|
+
copied_value = deepcopy(value)
|
|
320
|
+
if isinstance(copied_value.prior, RestorationPrior):
|
|
321
|
+
copied_value.prior = MoreauYoshidaPrior(
|
|
322
|
+
copied_value.prior,
|
|
323
|
+
self.smoothing_strength)
|
|
324
|
+
return copied_value
|
|
325
|
+
|
|
326
|
+
def validate_target(self):
|
|
327
|
+
# Call ULA target validation
|
|
328
|
+
super().validate_target()
|
|
329
|
+
|
|
330
|
+
# Additional validation for MYULA target
|
|
331
|
+
if isinstance(self.target.prior, MoreauYoshidaPrior):
|
|
332
|
+
raise ValueError(("The prior is already smoothed, apply"
|
|
333
|
+
" ULA when using a MoreauYoshidaPrior."))
|
|
334
|
+
if not hasattr(self.target.prior, "restore"):
|
|
335
|
+
raise NotImplementedError(
|
|
336
|
+
("Using MYULA with a prior that does not have a restore method"
|
|
337
|
+
" is not supported.")
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
def _eval_target_grad(self, x):
|
|
341
|
+
return self._smoothed_target.gradient(x)
|
|
342
|
+
|
|
343
|
+
class PnPULA(MYULA):
|
|
344
|
+
"""Plug-and-Play Unadjusted Langevin algorithm (PnP-ULA)
|
|
345
|
+
(Laumont et al., 2022)
|
|
346
|
+
|
|
347
|
+
Samples a smoothed target distribution given its smoothed logpdf gradient based on
|
|
348
|
+
Langevin diffusion dL_t = dW_t + 1/2*Nabla target.logd(L_t)dt, where W_t is
|
|
349
|
+
a `dim`-dimensional standard Brownian motion.
|
|
350
|
+
It targets a differentiable density (partially) smoothed by a convolution
|
|
351
|
+
with Gaussian kernel with zero mean and smoothing_strength variance. The
|
|
352
|
+
smoothed target density can be made arbitrarily closed to the
|
|
353
|
+
true unsmoothed target density.
|
|
354
|
+
|
|
355
|
+
For more details see: Laumont, R., Bortoli, V. D., Almansa, A., Delon, J.,
|
|
356
|
+
Durmus, A., & Pereyra, M. (2022). Bayesian imaging using plug & play priors:
|
|
357
|
+
when Langevin meets Tweedie. SIAM Journal on Imaging Sciences, 15(2), 701-737.
|
|
358
|
+
|
|
359
|
+
Parameters
|
|
360
|
+
----------
|
|
361
|
+
|
|
362
|
+
target : `cuqi.distribution.Distribution`
|
|
363
|
+
The target distribution to sample. The target distribution result from
|
|
364
|
+
a differentiable likelihood and prior of type RestorationPrior.
|
|
365
|
+
|
|
366
|
+
initial_point : ndarray
|
|
367
|
+
Initial parameters. *Optional*
|
|
368
|
+
|
|
369
|
+
scale : float
|
|
370
|
+
The Langevin diffusion discretization time step (In practice, a scale of
|
|
371
|
+
1/L, where L is the Lipschitz of the gradient of the log target density
|
|
372
|
+
is recommended but not guaranteed to be the optimal choice).
|
|
373
|
+
|
|
374
|
+
smoothing_strength : float
|
|
375
|
+
This parameter controls the smoothing strength of PnP-ULA.
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
callback : callable, *Optional*
|
|
379
|
+
If set this function will be called after every sample.
|
|
380
|
+
The signature of the callback function is `callback(sample, sample_index)`,
|
|
381
|
+
where `sample` is the current sample and `sample_index` is the index of
|
|
382
|
+
the sample.
|
|
383
|
+
An example is shown in demos/demo31_callback.py.
|
|
384
|
+
|
|
385
|
+
# TODO: update demo once sampler merged
|
|
386
|
+
"""
|
|
387
|
+
def __init__ (self, target=None, scale=1.0, smoothing_strength=0.1, **kwargs):
|
|
388
|
+
super().__init__(target=target, scale=scale,
|
|
389
|
+
smoothing_strength=smoothing_strength, **kwargs)
|
|
@@ -1,3 +1,5 @@
|
|
|
1
1
|
from ._regularizedGaussian import RegularizedGaussian, ConstrainedGaussian, NonnegativeGaussian
|
|
2
2
|
from ._regularizedGMRF import RegularizedGMRF, ConstrainedGMRF, NonnegativeGMRF
|
|
3
3
|
from ._regularizedUnboundedUniform import RegularizedUnboundedUniform
|
|
4
|
+
from ._restorator import RestorationPrior, MoreauYoshidaPrior
|
|
5
|
+
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from cuqi.distribution import Distribution
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
class RestorationPrior(Distribution):
|
|
6
|
+
"""
|
|
7
|
+
This class defines an implicit distribution associated with a restoration operator
|
|
8
|
+
(eg denoiser). They are several works relating restorations operators with
|
|
9
|
+
priors, see
|
|
10
|
+
-Laumont et al. https://arxiv.org/abs/2103.04715
|
|
11
|
+
-Hu et al. https://openreview.net/pdf?id=x7d1qXEn1e
|
|
12
|
+
We cannot sample from this distribution, neither compute its logpdf except in
|
|
13
|
+
some cases. It allows us to apply algorithms such as MYULA and PnPULA.
|
|
14
|
+
|
|
15
|
+
Parameters
|
|
16
|
+
----------
|
|
17
|
+
restorator : callable f(x, restoration_strength)
|
|
18
|
+
Function f that accepts input x to be restored and returns the
|
|
19
|
+
restored version of x and information about the restoration operation.
|
|
20
|
+
|
|
21
|
+
restorator_kwargs : dictionary
|
|
22
|
+
Dictionary containing information about the restorator.
|
|
23
|
+
It contains keyword argument parameters that will be passed to the
|
|
24
|
+
restorator f. An example could be algorithm parameters such as the number
|
|
25
|
+
of iterations or the stopping criteria.
|
|
26
|
+
|
|
27
|
+
potential : callable function, optional
|
|
28
|
+
The potential corresponds to the negative logpdf when it is accessible.
|
|
29
|
+
This function is a mapping from the parameter domain to the real set.
|
|
30
|
+
It can be provided if the user knows how to relate it to the restorator.
|
|
31
|
+
Ex: restorator is the proximal operator of the total variation (TV), then
|
|
32
|
+
potential is the TV function.
|
|
33
|
+
"""
|
|
34
|
+
def __init__(self, restorator, restorator_kwargs
|
|
35
|
+
=None, potential=None, **kwargs):
|
|
36
|
+
if restorator_kwargs is None:
|
|
37
|
+
restorator_kwargs = {}
|
|
38
|
+
self.restorator = restorator
|
|
39
|
+
self.restorator_kwargs = restorator_kwargs
|
|
40
|
+
self.potential = potential
|
|
41
|
+
super().__init__(**kwargs)
|
|
42
|
+
|
|
43
|
+
def restore(self, x, restoration_strength):
|
|
44
|
+
"""This function allows us to restore the input x and returns the
|
|
45
|
+
restored version of x.
|
|
46
|
+
|
|
47
|
+
Parameters
|
|
48
|
+
----------
|
|
49
|
+
x : ndarray
|
|
50
|
+
parameter we want to restore.
|
|
51
|
+
|
|
52
|
+
restoration_strength: positive float
|
|
53
|
+
Strength of the restoration operation. In the case where the
|
|
54
|
+
restorator is a denoiser, this parameter might correspond to the
|
|
55
|
+
noise level.
|
|
56
|
+
"""
|
|
57
|
+
solution, info = self.restorator(x, restoration_strength=restoration_strength,
|
|
58
|
+
**self.restorator_kwargs)
|
|
59
|
+
self.info = info
|
|
60
|
+
return solution
|
|
61
|
+
|
|
62
|
+
def logpdf(self, x):
|
|
63
|
+
"""The logpdf function. It returns nan because we don't know the
|
|
64
|
+
logpdf of the implicit prior."""
|
|
65
|
+
if self.potential is None:
|
|
66
|
+
return np.nan
|
|
67
|
+
else:
|
|
68
|
+
return -self.potential(x)
|
|
69
|
+
|
|
70
|
+
def _sample(self, N, rng=None):
|
|
71
|
+
raise NotImplementedError("The sample method is not implemented for the"
|
|
72
|
+
+ "RestorationPrior class.")
|
|
73
|
+
|
|
74
|
+
@property
|
|
75
|
+
def _mutable_vars(self):
|
|
76
|
+
""" Returns the mutable variables of the distribution. """
|
|
77
|
+
# Currently mutable variables are not supported for user-defined
|
|
78
|
+
# distributions.
|
|
79
|
+
return []
|
|
80
|
+
|
|
81
|
+
def get_conditioning_variables(self):
|
|
82
|
+
""" Returns the conditioning variables of the distribution. """
|
|
83
|
+
# Currently conditioning variables are not supported for user-defined
|
|
84
|
+
# distributions.
|
|
85
|
+
return []
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class MoreauYoshidaPrior(Distribution):
|
|
89
|
+
"""
|
|
90
|
+
This class defines (implicit) smoothed priors for which we can apply
|
|
91
|
+
gradient-based algorithms. The smoothing is performed using
|
|
92
|
+
the Moreau-Yoshida envelope of the target prior potential.
|
|
93
|
+
|
|
94
|
+
In the following we give a detailed explanation of the
|
|
95
|
+
Moreau-Yoshida smoothing.
|
|
96
|
+
|
|
97
|
+
We consider a density such that - \log\pi(x) = -g(x) with g convex, lsc,
|
|
98
|
+
proper but not differentiable. Consequently, we cannot apply any
|
|
99
|
+
algorithm requiring the gradient of g.
|
|
100
|
+
Idea:
|
|
101
|
+
We consider the Moreau envelope of g defined as
|
|
102
|
+
|
|
103
|
+
g_{smoothing_strength} (x) = inf_z 0.5*\| x-z \|_2^2/smoothing_strength + g(z).
|
|
104
|
+
|
|
105
|
+
g_{smoothing_strength} has some nice properties
|
|
106
|
+
- g_{smoothing_strength}(x)-->g(x) as smoothing_strength-->0 for all x
|
|
107
|
+
- \nabla g_{smoothing_strength} is 1/smoothing_strength-Lipschitz
|
|
108
|
+
- \nabla g_{smoothing_strength}(x) = (x - prox_g^{smoothing_strength}(x))/smoothing_strength for all x with
|
|
109
|
+
|
|
110
|
+
prox_g^{smoothing_strength}(x) = argmin_z 0.5*\| x-z \|_2^2/smoothing_strength + g(z) .
|
|
111
|
+
|
|
112
|
+
Consequently, we can apply any gradient-based algorithm with
|
|
113
|
+
g_{smoothing_strength} in lieu of g. These algorithms do not require the
|
|
114
|
+
full knowledge of g_{smoothing_strength} but only its gradient. The gradient
|
|
115
|
+
of g_{smoothing_strength} is fully determined by prox_g^{smoothing_strength}
|
|
116
|
+
and smoothing_strength.
|
|
117
|
+
It is important as, although there exists an explicit formula for
|
|
118
|
+
g_{smoothing_strength}, it is rarely used in practice, as it would require
|
|
119
|
+
us to solve an optimization problem each time we want to
|
|
120
|
+
estimate g_{smoothing_strength}. Furthermore, there exist cases where we dont't
|
|
121
|
+
the regularization g with which the mapping prox_g^{smoothing_strength} is
|
|
122
|
+
associated.
|
|
123
|
+
|
|
124
|
+
Remark (Proximal operators are denoisers):
|
|
125
|
+
We consider the denoising inverse problem x = u + n, with
|
|
126
|
+
n \sim \mathcal{N}(0, smoothing_strength I).
|
|
127
|
+
A mapping solving a denoising inverse problem is called denoiser. It takes
|
|
128
|
+
the noisy observation x as an input and returns a less noisy version of x
|
|
129
|
+
which is an estimate of u.
|
|
130
|
+
We assume a prior density \pi(u) \propto exp(- g(u)).
|
|
131
|
+
Then the MAP estimate is given by
|
|
132
|
+
x_MAP = \argmin_z 0.5 \| x - z \|_2^2/smoothing_strength + g(z) = prox_g^smoothing_strength(x)
|
|
133
|
+
Then proximal operators are denoisers.
|
|
134
|
+
|
|
135
|
+
Remark (Denoisers are not necessarily proximal operators): Data-driven
|
|
136
|
+
denoisers are not necessarily proximal operators
|
|
137
|
+
(see https://arxiv.org/pdf/2201.13256)
|
|
138
|
+
|
|
139
|
+
Parameters
|
|
140
|
+
----------
|
|
141
|
+
prior : RestorationPrior
|
|
142
|
+
Prior of the RestorationPrior type. In order to stay within the MYULA
|
|
143
|
+
framework the restorator of RestorationPrior must be a proximal operator.
|
|
144
|
+
|
|
145
|
+
smoothing_strength : float
|
|
146
|
+
Smoothing strength of the Moreau-Yoshida envelope of the prior potential.
|
|
147
|
+
"""
|
|
148
|
+
|
|
149
|
+
def __init__(self, prior:RestorationPrior, smoothing_strength=0.1,
|
|
150
|
+
**kwargs):
|
|
151
|
+
self.prior = prior
|
|
152
|
+
self.smoothing_strength = smoothing_strength
|
|
153
|
+
|
|
154
|
+
# if kwargs does not contain the geometry,
|
|
155
|
+
# we set it to the geometry of the prior, if it exists
|
|
156
|
+
if "geometry" in kwargs:
|
|
157
|
+
raise ValueError(
|
|
158
|
+
"The geometry parameter is not supported for the"
|
|
159
|
+
+ "MoreauYoshidaPrior class. The geometry is"
|
|
160
|
+
+ "automatically set to the geometry of the prior.")
|
|
161
|
+
try:
|
|
162
|
+
geometry = prior.geometry
|
|
163
|
+
except:
|
|
164
|
+
geometry = None
|
|
165
|
+
|
|
166
|
+
super().__init__(geometry=geometry, **kwargs)
|
|
167
|
+
|
|
168
|
+
@property
|
|
169
|
+
def geometry(self):
|
|
170
|
+
return self.prior.geometry
|
|
171
|
+
|
|
172
|
+
@geometry.setter
|
|
173
|
+
def geometry(self, value):
|
|
174
|
+
self.prior.geometry = value
|
|
175
|
+
|
|
176
|
+
@property
|
|
177
|
+
def smoothing_strength(self):
|
|
178
|
+
""" smoothing_strength of the distribution"""
|
|
179
|
+
return self._smoothing_strength
|
|
180
|
+
|
|
181
|
+
@smoothing_strength.setter
|
|
182
|
+
def smoothing_strength(self, value):
|
|
183
|
+
self._smoothing_strength = value
|
|
184
|
+
|
|
185
|
+
@property
|
|
186
|
+
def prior(self):
|
|
187
|
+
"""Getter for the MoreauYoshida prior."""
|
|
188
|
+
return self._prior
|
|
189
|
+
|
|
190
|
+
@prior.setter
|
|
191
|
+
def prior(self, value):
|
|
192
|
+
self._prior = value
|
|
193
|
+
|
|
194
|
+
def gradient(self, x):
|
|
195
|
+
"""This is the gradient of the regularizer ie gradient of the negative
|
|
196
|
+
logpdf of the implicit prior."""
|
|
197
|
+
return -(x - self.prior.restore(x, self.smoothing_strength))/self.smoothing_strength
|
|
198
|
+
|
|
199
|
+
def logpdf(self, x):
|
|
200
|
+
"""The logpdf function. It returns nan because we don't know the
|
|
201
|
+
logpdf of the implicit prior."""
|
|
202
|
+
if self.prior.potential == None:
|
|
203
|
+
return np.nan
|
|
204
|
+
else:
|
|
205
|
+
return -(self.prior.potential(self.prior.restore(x, self.smoothing_strength))*self.smoothing_strength +
|
|
206
|
+
0.5*((x-self.prior.restore(x, self.smoothing_strength))**2).sum())
|
|
207
|
+
|
|
208
|
+
def _sample(self, N, rng=None):
|
|
209
|
+
raise NotImplementedError("The sample method is not implemented for the"
|
|
210
|
+
+ f"{self.__class__.__name__} class.")
|
|
211
|
+
|
|
212
|
+
@property
|
|
213
|
+
def _mutable_vars(self):
|
|
214
|
+
""" Returns the mutable variables of the distribution. """
|
|
215
|
+
# Currently mutable variables are not supported for user-defined
|
|
216
|
+
# distributions.
|
|
217
|
+
return []
|
|
218
|
+
|
|
219
|
+
def get_conditioning_variables(self):
|
|
220
|
+
""" Returns the conditioning variables of the distribution. """
|
|
221
|
+
# Currently conditioning variables are not supported for user-defined
|
|
222
|
+
# distributions.
|
|
223
|
+
return []
|