CUQIpy 1.1.1.post0.dev36__py3-none-any.whl → 1.4.1.post0.dev124__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

Files changed (92) hide show
  1. cuqi/__init__.py +2 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/algebra/__init__.py +2 -0
  4. cuqi/algebra/_abstract_syntax_tree.py +358 -0
  5. cuqi/algebra/_ordered_set.py +82 -0
  6. cuqi/algebra/_random_variable.py +457 -0
  7. cuqi/array/_array.py +4 -13
  8. cuqi/config.py +7 -0
  9. cuqi/density/_density.py +9 -1
  10. cuqi/distribution/__init__.py +3 -2
  11. cuqi/distribution/_beta.py +7 -11
  12. cuqi/distribution/_cauchy.py +2 -2
  13. cuqi/distribution/_custom.py +0 -6
  14. cuqi/distribution/_distribution.py +31 -45
  15. cuqi/distribution/_gamma.py +7 -3
  16. cuqi/distribution/_gaussian.py +2 -12
  17. cuqi/distribution/_inverse_gamma.py +4 -10
  18. cuqi/distribution/_joint_distribution.py +112 -15
  19. cuqi/distribution/_lognormal.py +0 -7
  20. cuqi/distribution/{_modifiedhalfnormal.py → _modified_half_normal.py} +23 -23
  21. cuqi/distribution/_normal.py +34 -7
  22. cuqi/distribution/_posterior.py +9 -0
  23. cuqi/distribution/_truncated_normal.py +129 -0
  24. cuqi/distribution/_uniform.py +47 -1
  25. cuqi/experimental/__init__.py +2 -2
  26. cuqi/experimental/_recommender.py +216 -0
  27. cuqi/geometry/__init__.py +2 -0
  28. cuqi/geometry/_geometry.py +15 -1
  29. cuqi/geometry/_product_geometry.py +181 -0
  30. cuqi/implicitprior/__init__.py +5 -3
  31. cuqi/implicitprior/_regularized_gaussian.py +483 -0
  32. cuqi/implicitprior/{_regularizedGMRF.py → _regularized_gmrf.py} +4 -2
  33. cuqi/implicitprior/{_regularizedUnboundedUniform.py → _regularized_unbounded_uniform.py} +3 -2
  34. cuqi/implicitprior/_restorator.py +269 -0
  35. cuqi/legacy/__init__.py +2 -0
  36. cuqi/{experimental/mcmc → legacy/sampler}/__init__.py +7 -11
  37. cuqi/legacy/sampler/_conjugate.py +55 -0
  38. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  39. cuqi/legacy/sampler/_cwmh.py +196 -0
  40. cuqi/legacy/sampler/_gibbs.py +231 -0
  41. cuqi/legacy/sampler/_hmc.py +335 -0
  42. cuqi/{experimental/mcmc → legacy/sampler}/_langevin_algorithm.py +82 -111
  43. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  44. cuqi/legacy/sampler/_mh.py +190 -0
  45. cuqi/legacy/sampler/_pcn.py +244 -0
  46. cuqi/{experimental/mcmc → legacy/sampler}/_rto.py +132 -90
  47. cuqi/legacy/sampler/_sampler.py +182 -0
  48. cuqi/likelihood/_likelihood.py +9 -1
  49. cuqi/model/__init__.py +1 -1
  50. cuqi/model/_model.py +1361 -359
  51. cuqi/pde/__init__.py +4 -0
  52. cuqi/pde/_observation_map.py +36 -0
  53. cuqi/pde/_pde.py +134 -33
  54. cuqi/problem/_problem.py +93 -87
  55. cuqi/sampler/__init__.py +120 -8
  56. cuqi/sampler/_conjugate.py +376 -35
  57. cuqi/sampler/_conjugate_approx.py +40 -16
  58. cuqi/sampler/_cwmh.py +132 -138
  59. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  60. cuqi/sampler/_gibbs.py +288 -130
  61. cuqi/sampler/_hmc.py +328 -201
  62. cuqi/sampler/_langevin_algorithm.py +284 -100
  63. cuqi/sampler/_laplace_approximation.py +87 -117
  64. cuqi/sampler/_mh.py +47 -157
  65. cuqi/sampler/_pcn.py +65 -213
  66. cuqi/sampler/_rto.py +211 -142
  67. cuqi/sampler/_sampler.py +553 -136
  68. cuqi/samples/__init__.py +1 -1
  69. cuqi/samples/_samples.py +24 -18
  70. cuqi/solver/__init__.py +6 -4
  71. cuqi/solver/_solver.py +230 -26
  72. cuqi/testproblem/_testproblem.py +2 -3
  73. cuqi/utilities/__init__.py +6 -1
  74. cuqi/utilities/_get_python_variable_name.py +2 -2
  75. cuqi/utilities/_utilities.py +182 -2
  76. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/METADATA +10 -6
  77. cuqipy-1.4.1.post0.dev124.dist-info/RECORD +101 -0
  78. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/WHEEL +1 -1
  79. CUQIpy-1.1.1.post0.dev36.dist-info/RECORD +0 -92
  80. cuqi/experimental/mcmc/_conjugate.py +0 -197
  81. cuqi/experimental/mcmc/_conjugate_approx.py +0 -81
  82. cuqi/experimental/mcmc/_cwmh.py +0 -191
  83. cuqi/experimental/mcmc/_gibbs.py +0 -268
  84. cuqi/experimental/mcmc/_hmc.py +0 -470
  85. cuqi/experimental/mcmc/_laplace_approximation.py +0 -156
  86. cuqi/experimental/mcmc/_mh.py +0 -78
  87. cuqi/experimental/mcmc/_pcn.py +0 -89
  88. cuqi/experimental/mcmc/_sampler.py +0 -561
  89. cuqi/experimental/mcmc/_utilities.py +0 -17
  90. cuqi/implicitprior/_regularizedGaussian.py +0 -323
  91. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info/licenses}/LICENSE +0 -0
  92. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,244 @@
1
+ import numpy as np
2
+ import cuqi
3
+ from cuqi.legacy.sampler import Sampler
4
+
5
+ class pCN(Sampler):
6
+ #Samples target*proposal
7
+ #TODO. Check proposal, needs to be Gaussian and zero mean.
8
+ """Preconditioned Crank-Nicolson sampler
9
+
10
+ Parameters
11
+ ----------
12
+ target : `cuqi.distribution.Posterior` or tuple of likelihood and prior objects
13
+ If target is of type cuqi.distribution.Posterior, it represents the posterior distribution.
14
+ If target is a tuple of (cuqi.likelihood.Likelihood, cuqi.distribution.Distribution) objects,
15
+ the first element is considered the likelihood and the second is considered the prior.
16
+
17
+ scale : int
18
+
19
+ x0 : `np.ndarray`
20
+ Initial point for the sampler
21
+
22
+ callback : callable, *Optional*
23
+ If set this function will be called after every sample.
24
+ The signature of the callback function is `callback(sample, sample_index)`,
25
+ where `sample` is the current sample and `sample_index` is the index of the sample.
26
+ An example is shown in demos/demo31_callback.py.
27
+
28
+ Example
29
+ -------
30
+
31
+ This uses a custom logpdf and sample function.
32
+
33
+ .. code-block:: python
34
+
35
+ # Parameters
36
+ dim = 5 # Dimension of distribution
37
+ mu = np.arange(dim) # Mean of Gaussian
38
+ std = 1 # standard deviation of Gaussian
39
+
40
+ # Logpdf function of likelihood
41
+ logpdf_func = lambda x: -1/(std**2)*np.sum((x-mu)**2)
42
+
43
+ # sample function of prior N(0,I)
44
+ sample_func = lambda : 0 + 1*np.random.randn(dim,1)
45
+
46
+ # Define as UserDefinedDistributions
47
+ likelihood = cuqi.likelihood.UserDefinedLikelihood(dim=dim, logpdf_func=logpdf_func)
48
+ prior = cuqi.distribution.UserDefinedDistribution(dim=dim, sample_func=sample_func)
49
+
50
+ # Set up sampler
51
+ sampler = cuqi.legacy.sampler.pCN((likelihood,prior), scale = 0.1)
52
+
53
+ # Sample
54
+ samples = sampler.sample(5000)
55
+
56
+ Example
57
+ -------
58
+
59
+ This uses CUQIpy distributions.
60
+
61
+ .. code-block:: python
62
+
63
+ # Parameters
64
+ dim = 5 # Dimension of distribution
65
+ mu = np.arange(dim) # Mean of Gaussian
66
+ std = 1 # standard deviation of Gaussian
67
+
68
+ # Define as UserDefinedDistributions
69
+ model = cuqi.model.Model(lambda x: x, range_geometry=dim, domain_geometry=dim)
70
+ likelihood = cuqi.distribution.Gaussian(mean=model, cov=np.ones(dim)).to_likelihood(mu)
71
+ prior = cuqi.distribution.Gaussian(mean=np.zeros(dim), cov=1)
72
+
73
+ target = cuqi.distribution.Posterior(likelihood, prior)
74
+
75
+ # Set up sampler
76
+ sampler = cuqi.legacy.sampler.pCN(target, scale = 0.1)
77
+
78
+ # Sample
79
+ samples = sampler.sample(5000)
80
+
81
+ """
82
+ def __init__(self, target, scale=None, x0=None, **kwargs):
83
+ super().__init__(target, x0=x0, dim=None, **kwargs)
84
+ self.scale = scale
85
+
86
+ @property
87
+ def prior(self):
88
+ if isinstance(self.target, cuqi.distribution.Posterior):
89
+ return self.target.prior
90
+ elif isinstance(self.target,tuple) and len(self.target)==2:
91
+ return self.target[1]
92
+
93
+ @property
94
+ def likelihood(self):
95
+ if isinstance(self.target, cuqi.distribution.Posterior):
96
+ return self.target.likelihood
97
+ elif isinstance(self.target,tuple) and len(self.target)==2:
98
+ return self.target[0]
99
+
100
+
101
+ @Sampler.target.setter
102
+ def target(self, value):
103
+ if isinstance(value, cuqi.distribution.Posterior):
104
+ self._target = value
105
+ self._loglikelihood = lambda x : self.likelihood.logd(x)
106
+ elif isinstance(value,tuple) and len(value)==2 and \
107
+ (isinstance(value[0], cuqi.likelihood.Likelihood) or isinstance(value[0], cuqi.likelihood.UserDefinedLikelihood)) and \
108
+ isinstance(value[1], cuqi.distribution.Distribution):
109
+ self._target = value
110
+ self._loglikelihood = lambda x : self.likelihood.logd(x)
111
+ else:
112
+ raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior'.")
113
+
114
+ #TODO:
115
+ #if not isinstance(self.prior,(cuqi.distribution.Gaussian, cuqi.distribution.Normal)):
116
+ # raise ValueError("The prior distribution of the target need to be Gaussian")
117
+
118
+ @property
119
+ def dim(self):
120
+ if hasattr(self,'target') and hasattr(self.target,'dim'):
121
+ self._dim = self.target.dim
122
+ elif hasattr(self,'target') and isinstance(self.target,tuple) and len(self.target)==2:
123
+ self._dim = self.target[0].dim
124
+ return self._dim
125
+
126
+ def _sample(self, N, Nb):
127
+ if self.scale is None:
128
+ raise ValueError("Scale must be set to sample without adaptation. Consider using sample_adapt instead.")
129
+
130
+ Ns = N+Nb # number of simulations
131
+
132
+ # allocation
133
+ samples = np.empty((self.dim, Ns))
134
+ loglike_eval = np.empty(Ns)
135
+ acc = np.zeros(Ns, dtype=int)
136
+
137
+ # initial state
138
+ samples[:, 0] = self.x0
139
+ loglike_eval[0] = self._loglikelihood(self.x0)
140
+ acc[0] = 1
141
+
142
+ # run MCMC
143
+ for s in range(Ns-1):
144
+ # run component by component
145
+ samples[:, s+1], loglike_eval[s+1], acc[s+1] = self.single_update(samples[:, s], loglike_eval[s])
146
+
147
+ self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
148
+ self._call_callback(samples[:, s+1], s+1)
149
+
150
+ # remove burn-in
151
+ samples = samples[:, Nb:]
152
+ loglike_eval = loglike_eval[Nb:]
153
+ accave = acc[Nb:].mean()
154
+ print('\nAverage acceptance rate:', accave, '\n')
155
+ #
156
+ return samples, loglike_eval, accave
157
+
158
+ def _sample_adapt(self, N, Nb):
159
+ # Set intial scale if not set
160
+ if self.scale is None:
161
+ self.scale = 0.1
162
+
163
+ Ns = N+Nb # number of simulations
164
+
165
+ # allocation
166
+ samples = np.empty((self.dim, Ns))
167
+ loglike_eval = np.empty(Ns)
168
+ acc = np.zeros(Ns)
169
+
170
+ # initial state
171
+ samples[:, 0] = self.x0
172
+ loglike_eval[0] = self._loglikelihood(self.x0)
173
+ acc[0] = 1
174
+
175
+ # initial adaptation params
176
+ Na = int(0.1*N) # iterations to adapt
177
+ hat_acc = np.empty(int(np.floor(Ns/Na))) # average acceptance rate of the chains
178
+ lambd = self.scale
179
+ star_acc = 0.44 # target acceptance rate RW
180
+ i, idx = 0, 0
181
+
182
+ # run MCMC
183
+ for s in range(Ns-1):
184
+ # run component by component
185
+ samples[:, s+1], loglike_eval[s+1], acc[s+1] = self.single_update(samples[:, s], loglike_eval[s])
186
+
187
+ # adapt prop spread using acc of past samples
188
+ if ((s+1) % Na == 0):
189
+ # evaluate average acceptance rate
190
+ hat_acc[i] = np.mean(acc[idx:idx+Na])
191
+
192
+ # d. compute new scaling parameter
193
+ zeta = 1/np.sqrt(i+1) # ensures that the variation of lambda(i) vanishes
194
+ lambd = np.exp(np.log(lambd) + zeta*(hat_acc[i]-star_acc))
195
+
196
+ # update parameters
197
+ self.scale = min(lambd, 1)
198
+
199
+ # update counters
200
+ i += 1
201
+ idx += Na
202
+
203
+ # display iterations
204
+ if ((s+1) % (max(Ns//100,1))) == 0 or (s+1) == Ns-1:
205
+ print("\r",'Sample', s+1, '/', Ns, end="")
206
+
207
+ self._call_callback(samples[:, s+1], s+1)
208
+
209
+ print("\r",'Sample', s+2, '/', Ns)
210
+
211
+ # remove burn-in
212
+ samples = samples[:, Nb:]
213
+ loglike_eval = loglike_eval[Nb:]
214
+ accave = acc[Nb:].mean()
215
+ print('\nAverage acceptance rate:', accave, 'MCMC scale:', self.scale, '\n')
216
+
217
+ return samples, loglike_eval, accave
218
+
219
+ def single_update(self, x_t, loglike_eval_t):
220
+ # propose state
221
+ xi = self.prior.sample(1).flatten() # sample from the prior
222
+ x_star = np.sqrt(1-self.scale**2)*x_t + self.scale*xi # pCN proposal
223
+
224
+ # evaluate target
225
+ loglike_eval_star = self._loglikelihood(x_star)
226
+
227
+ # ratio and acceptance probability
228
+ ratio = loglike_eval_star - loglike_eval_t # proposal is symmetric
229
+ alpha = min(0, ratio)
230
+
231
+ # accept/reject
232
+ u_theta = np.log(np.random.rand())
233
+ if (u_theta <= alpha):
234
+ x_next = x_star
235
+ loglike_eval_next = loglike_eval_star
236
+ acc = 1
237
+ else:
238
+ x_next = x_t
239
+ loglike_eval_next = loglike_eval_t
240
+ acc = 0
241
+
242
+ return x_next, loglike_eval_next, acc
243
+
244
+
@@ -4,14 +4,14 @@ from scipy.sparse.linalg import LinearOperator as scipyLinearOperator
4
4
  import numpy as np
5
5
  import cuqi
6
6
  from cuqi.solver import CGLS, FISTA
7
- from cuqi.experimental.mcmc import Sampler
7
+ from cuqi.legacy.sampler import Sampler
8
8
 
9
9
 
10
10
  class LinearRTO(Sampler):
11
11
  """
12
12
  Linear RTO (Randomize-Then-Optimize) sampler.
13
13
 
14
- Samples posterior related to the inverse problem with Gaussian likelihood and prior, and where the forward model is Linear.
14
+ Samples posterior related to the inverse problem with Gaussian likelihood and prior, and where the forward model is linear or more generally affine.
15
15
 
16
16
  Parameters
17
17
  ------------
@@ -22,12 +22,12 @@ class LinearRTO(Sampler):
22
22
 
23
23
  Here:
24
24
  data: is a m-dimensional numpy array containing the measured data.
25
- model: is a m by n dimensional matrix or LinearModel representing the forward model.
25
+ model: is a m by n dimensional matrix, AffineModel or LinearModel representing the forward model.
26
26
  L_sqrtprec: is the squareroot of the precision matrix of the Gaussian likelihood.
27
27
  P_mean: is the prior mean.
28
28
  P_sqrtprec: is the squareroot of the precision matrix of the Gaussian mean.
29
29
 
30
- initial_point : `np.ndarray`
30
+ x0 : `np.ndarray`
31
31
  Initial point for the sampler. *Optional*.
32
32
 
33
33
  maxit : int
@@ -43,48 +43,59 @@ class LinearRTO(Sampler):
43
43
  An example is shown in demos/demo31_callback.py.
44
44
 
45
45
  """
46
- def __init__(self, target=None, initial_point=None, maxit=10, tol=1e-6, **kwargs):
47
-
48
- super().__init__(target=target, initial_point=initial_point, **kwargs)
46
+ def __init__(self, target, x0=None, maxit=10, tol=1e-6, shift=0, **kwargs):
47
+
48
+ # Accept tuple of inputs and construct posterior
49
+ if isinstance(target, tuple) and len(target) == 5:
50
+ # Structure (data, model, L_sqrtprec, P_mean, P_sqrtprec)
51
+ data = target[0]
52
+ model = target[1]
53
+ L_sqrtprec = target[2]
54
+ P_mean = target[3]
55
+ P_sqrtprec = target[4]
56
+
57
+ # If numpy matrix convert to CUQI model
58
+ if isinstance(model, np.ndarray) and len(model.shape) == 2:
59
+ model = cuqi.model.LinearModel(model)
60
+
61
+ # Check model input
62
+ if not isinstance(model, cuqi.model.AffineModel):
63
+ raise TypeError("Model needs to be cuqi.model.AffineModel or matrix")
64
+
65
+ # Likelihood
66
+ L = cuqi.distribution.Gaussian(model, sqrtprec=L_sqrtprec).to_likelihood(data)
67
+
68
+ # Prior TODO: allow multiple priors stacked
69
+ #if isinstance(P_mean, list) and isinstance(P_sqrtprec, list):
70
+ # P = cuqi.distribution.JointGaussianSqrtPrec(P_mean, P_sqrtprec)
71
+ #else:
72
+ P = cuqi.distribution.Gaussian(P_mean, sqrtprec=P_sqrtprec)
73
+
74
+ # Construct posterior
75
+ target = cuqi.distribution.Posterior(L, P)
76
+
77
+ super().__init__(target, x0=x0, **kwargs)
78
+
79
+ self._check_posterior()
80
+
81
+ # Modify initial guess
82
+ if x0 is not None:
83
+ self.x0 = x0
84
+ else:
85
+ self.x0 = np.zeros(self.prior.dim)
49
86
 
50
87
  # Other parameters
51
88
  self.maxit = maxit
52
- self.tol = tol
53
-
54
- def _initialize(self):
55
- self._precompute()
56
-
57
- @property
58
- def prior(self):
59
- return self.target.prior
60
-
61
- @property
62
- def likelihood(self):
63
- return self.target.likelihood
64
-
65
- @property
66
- def likelihoods(self):
67
- if isinstance(self.target, cuqi.distribution.Posterior):
68
- return [self.target.likelihood]
69
- elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
70
- return self.target.likelihoods
71
-
72
- @property
73
- def model(self):
74
- return self.target.model
75
-
76
- @property
77
- def data(self):
78
- return self.target.data
79
-
80
- def _precompute(self):
89
+ self.tol = tol
90
+ self.shift = 0
91
+
81
92
  L1 = [likelihood.distribution.sqrtprec for likelihood in self.likelihoods]
82
93
  L2 = self.prior.sqrtprec
83
94
  L2mu = self.prior.sqrtprecTimesMean
84
95
 
85
96
  # pre-computations
86
- self.n = self.prior.dim
87
- self.b_tild = np.hstack([L@likelihood.data for (L, likelihood) in zip(L1, self.likelihoods)]+ [L2mu])
97
+ self.n = len(self.x0)
98
+ self.b_tild = np.hstack([L@(likelihood.data - model._shift) for (L, likelihood, model) in zip(L1, self.likelihoods, self.models)]+ [L2mu])
88
99
 
89
100
  callability = [callable(likelihood.model) for likelihood in self.likelihoods]
90
101
  notcallability = [not c for c in callability]
@@ -94,7 +105,7 @@ class LinearRTO(Sampler):
94
105
  # in this case, model is a function doing forward and backward operations
95
106
  def M(x, flag):
96
107
  if flag == 1:
97
- out1 = [L @ likelihood.model.forward(x) for (L, likelihood) in zip(L1, self.likelihoods)]
108
+ out1 = [L @ likelihood.model._forward_func_no_shift(x) for (L, likelihood) in zip(L1, self.likelihoods)] # Use forward function which excludes shift
98
109
  out2 = L2 @ x
99
110
  out = np.hstack(out1 + [out2])
100
111
  elif flag == 2:
@@ -103,34 +114,72 @@ class LinearRTO(Sampler):
103
114
  out1 = np.zeros(self.n)
104
115
  for likelihood in self.likelihoods:
105
116
  idx_end += len(likelihood.data)
106
- out1 += likelihood.model.adjoint(likelihood.distribution.sqrtprec.T@x[idx_start:idx_end])
117
+ out1 += likelihood.model._adjoint_func_no_shift(likelihood.distribution.sqrtprec.T@x[idx_start:idx_end]) # Use adjoint function which excludes shift
107
118
  idx_start = idx_end
108
119
  out2 = L2.T @ x[idx_end:]
109
120
  out = out1 + out2
110
121
  return out
111
122
  self.M = M
112
123
  else:
113
- raise TypeError("All likelihoods need to be callable or none need to be callable.")
124
+ raise TypeError("All likelihoods need to be callable or none need to be callable.")
114
125
 
115
- def step(self):
116
- y = self.b_tild + np.random.randn(len(self.b_tild))
117
- sim = CGLS(self.M, y, self.current_point, self.maxit, self.tol)
118
- self.current_point, _ = sim.solve()
119
- acc = 1
120
- return acc
126
+ @property
127
+ def prior(self):
128
+ return self.target.prior
121
129
 
122
- def tune(self, skip_len, update_count):
123
- pass
130
+ @property
131
+ def likelihood(self):
132
+ return self.target.likelihood
124
133
 
125
- def validate_target(self):
134
+ @property
135
+ def likelihoods(self):
136
+ if isinstance(self.target, cuqi.distribution.Posterior):
137
+ return [self.target.likelihood]
138
+ elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
139
+ return self.target.likelihoods
140
+
141
+ @property
142
+ def model(self):
143
+ return self.target.model
144
+
145
+ @property
146
+ def models(self):
147
+ if isinstance(self.target, cuqi.distribution.Posterior):
148
+ return [self.target.model]
149
+ elif isinstance(self.target, cuqi.distribution.MultipleLikelihoodPosterior):
150
+ return self.target.models
151
+
152
+ def _sample(self, N, Nb):
153
+ Ns = N+Nb # number of simulations
154
+ samples = np.empty((self.n, Ns))
155
+
156
+ # initial state
157
+ samples[:, 0] = self.x0
158
+ for s in range(Ns-1):
159
+ y = self.b_tild + np.random.randn(len(self.b_tild))
160
+ sim = CGLS(self.M, y, samples[:, s], self.maxit, self.tol, self.shift)
161
+ samples[:, s+1], _ = sim.solve()
162
+
163
+ self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
164
+ self._call_callback(samples[:, s+1], s+1)
165
+
166
+ # remove burn-in
167
+ samples = samples[:, Nb:]
168
+
169
+ return samples, None, None
170
+
171
+ def _sample_adapt(self, N, Nb):
172
+ return self._sample(N,Nb)
173
+
174
+ def _check_posterior(self):
126
175
  # Check target type
127
176
  if not isinstance(self.target, (cuqi.distribution.Posterior, cuqi.distribution.MultipleLikelihoodPosterior)):
128
177
  raise ValueError(f"To initialize an object of type {self.__class__}, 'target' need to be of type 'cuqi.distribution.Posterior' or 'cuqi.distribution.MultipleLikelihoodPosterior'.")
129
178
 
130
179
  # Check Linear model and Gaussian likelihood(s)
131
180
  if isinstance(self.target, cuqi.distribution.Posterior):
132
- if not isinstance(self.model, cuqi.model.LinearModel):
133
- raise TypeError("Model needs to be linear")
181
+ if not isinstance(self.model, cuqi.model.AffineModel):
182
+ raise TypeError("Model needs to be linear or affine")
134
183
 
135
184
  if not hasattr(self.likelihood.distribution, "sqrtprec"):
136
185
  raise TypeError("Distribution in Likelihood must contain a sqrtprec attribute")
@@ -149,10 +198,7 @@ class LinearRTO(Sampler):
149
198
 
150
199
  if not hasattr(self.prior, "sqrtprecTimesMean"):
151
200
  raise TypeError("Prior must contain a sqrtprecTimesMean attribute")
152
-
153
- def _get_default_initial_point(self, dim):
154
- """ Get the default initial point for the sampler. Defaults to an array of zeros. """
155
- return np.zeros(dim)
201
+
156
202
 
157
203
  class RegularizedLinearRTO(LinearRTO):
158
204
  """
@@ -163,9 +209,9 @@ class RegularizedLinearRTO(LinearRTO):
163
209
  Parameters
164
210
  ------------
165
211
  target : `cuqi.distribution.Posterior`
166
- See `cuqi.sampler.LinearRTO`
212
+ See `cuqi.legacy.sampler.LinearRTO`
167
213
 
168
- initial_point : `np.ndarray`
214
+ x0 : `np.ndarray`
169
215
  Initial point for the sampler. *Optional*.
170
216
 
171
217
  maxit : int
@@ -177,9 +223,6 @@ class RegularizedLinearRTO(LinearRTO):
177
223
 
178
224
  abstol : float
179
225
  Absolute tolerance of the inner FISTA solver. *Optional*.
180
-
181
- adaptive : bool
182
- If True, FISTA is used as inner solver, otherwise ISTA is used. *Optional*.
183
226
 
184
227
  callback : callable, *Optional*
185
228
  If set this function will be called after every sample.
@@ -188,32 +231,27 @@ class RegularizedLinearRTO(LinearRTO):
188
231
  An example is shown in demos/demo31_callback.py.
189
232
 
190
233
  """
191
- def __init__(self, target=None, initial_point=None, maxit=100, stepsize="automatic", abstol=1e-10, adaptive=True, **kwargs):
234
+ def __init__(self, target, x0=None, maxit=100, stepsize = "automatic", abstol=1e-10, adaptive = True, **kwargs):
235
+
236
+ if not callable(target.prior.proximal):
237
+ raise TypeError("Projector needs to be callable")
192
238
 
193
- super().__init__(target=target, initial_point=initial_point, **kwargs)
239
+ super().__init__(target, x0=x0, maxit=100, **kwargs)
194
240
 
195
241
  # Other parameters
196
242
  self.stepsize = stepsize
197
243
  self.abstol = abstol
198
244
  self.adaptive = adaptive
199
- self.maxit = maxit
200
-
201
- def _initialize(self):
202
- super()._initialize()
203
- self._stepsize = self._choose_stepsize()
245
+ self.proximal = target.prior.proximal
204
246
 
205
247
  @property
206
- def proximal(self):
207
- return self.target.prior.proximal
208
-
209
- def validate_target(self):
210
- super().validate_target()
211
- if not isinstance(self.target.prior, (cuqi.implicitprior.RegularizedGaussian, cuqi.implicitprior.RegularizedGMRF)):
212
- raise TypeError("Prior needs to be RegularizedGaussian or RegularizedGMRF")
213
- if not callable(self.proximal):
214
- raise TypeError("Proximal needs to be callable")
215
-
216
- def _choose_stepsize(self):
248
+ def prior(self):
249
+ return self.target.prior.gaussian
250
+
251
+ def _sample(self, N, Nb):
252
+ Ns = N+Nb # number of simulations
253
+ samples = np.empty((self.n, Ns))
254
+
217
255
  if isinstance(self.stepsize, str):
218
256
  if self.stepsize in ["automatic"]:
219
257
  if not callable(self.M):
@@ -227,16 +265,20 @@ class RegularizedLinearRTO(LinearRTO):
227
265
  raise ValueError("Stepsize choice not supported")
228
266
  else:
229
267
  _stepsize = self.stepsize
230
- return _stepsize
268
+
269
+ # initial state
270
+ samples[:, 0] = self.x0
271
+ for s in range(Ns-1):
272
+ y = self.b_tild + np.random.randn(len(self.b_tild))
273
+ sim = FISTA(self.M, y, self.proximal,
274
+ samples[:, s], maxit = self.maxit, stepsize = _stepsize, abstol = self.abstol, adaptive = self.adaptive)
275
+ samples[:, s+1], _ = sim.solve()
276
+
277
+ self._print_progress(s+2,Ns) #s+2 is the sample number, s+1 is index assuming x0 is the first sample
278
+ self._call_callback(samples[:, s+1], s+1)
279
+ # remove burn-in
280
+ samples = samples[:, Nb:]
281
+
282
+ return samples, None, None
231
283
 
232
- @property
233
- def prior(self):
234
- return self.target.prior.gaussian
235
284
 
236
- def step(self):
237
- y = self.b_tild + np.random.randn(len(self.b_tild))
238
- sim = FISTA(self.M, y, self.current_point, self.proximal,
239
- maxit = self.maxit, stepsize = self._stepsize, abstol = self.abstol, adaptive = self.adaptive)
240
- self.current_point, _ = sim.solve()
241
- acc = 1
242
- return acc