CUQIpy 0.5.0.post0.dev5__py3-none-any.whl → 0.5.0.post0.dev19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: CUQIpy
3
- Version: 0.5.0.post0.dev5
3
+ Version: 0.5.0.post0.dev19
4
4
  Summary: Computational Uncertainty Quantification for Inverse problems in Python
5
5
  Maintainer-email: "Nicolai A. B. Riis" <nabr@dtu.dk>, "Jakob S. Jørgensen" <jakj@dtu.dk>, "Amal M. Alghamdi" <amaal@dtu.dk>
6
6
  License: Apache License
@@ -1,6 +1,6 @@
1
1
  cuqi/__init__.py,sha256=K0ss2HNqoLUX7wGpSZdaPKxIaKdRS452fcJm4D0pcEs,433
2
2
  cuqi/_messages.py,sha256=fzEBrZT2kbmfecBBPm7spVu7yHdxGARQB4QzXhJbCJ0,415
3
- cuqi/_version.py,sha256=WVjSXW13o8zy07cCEoHRqZbhu68pBwi9VKjaFhBJ6kk,508
3
+ cuqi/_version.py,sha256=r_FphDLbYCelc5DxEAgEScmvtxrW3L7f79dMosuC-jU,509
4
4
  cuqi/config.py,sha256=wcYvz19wkeKW2EKCGIKJiTpWt5kdaxyt4imyRkvtTRA,526
5
5
  cuqi/diagnostics.py,sha256=5OrbJeqpynqRXOe5MtOKKhe7EAVdOEpHIqHnlMW9G_c,3029
6
6
  cuqi/array/__init__.py,sha256=-EeiaiWGNsE3twRS4dD814BIlfxEsNkTCZUc5gjOXb0,30
@@ -31,7 +31,7 @@ cuqi/distribution/_normal.py,sha256=UeoTtGDT7YSf4ZNo2amlVF9K-YQpYbf8q76jcRJTVFw,
31
31
  cuqi/distribution/_posterior.py,sha256=zAfL0GECxekZ2lBt1W6_LN0U_xskMwK4VNce5xAF7ig,5018
32
32
  cuqi/distribution/_uniform.py,sha256=7xJmCZH_LPhuGkwEDGh-_CTtzcWKrXMOxtTJUFb7Ydo,1607
33
33
  cuqi/geometry/__init__.py,sha256=Tz1WGzZBY-QGH3c0GiyKm9XHN8MGGcnU6TUHLZkzB3o,842
34
- cuqi/geometry/_geometry.py,sha256=2a_J_TASNe1W5v7zYWI-Zw_tNvYBBmc5en--Gy3RcP8,43683
34
+ cuqi/geometry/_geometry.py,sha256=WYFC-4_VBTW73b2ldsnfGYKvdSiCE8plr89xTSmkadg,46804
35
35
  cuqi/likelihood/__init__.py,sha256=ZUuc7ysBoFvx-L_IKetlKgqht9k8x4kMA_iQA0yuQFM,1774
36
36
  cuqi/likelihood/_likelihood.py,sha256=z3AXAbIrv_DjOYh4jy3iDHemuIFUUJu6wdvJ5e2dgW0,6913
37
37
  cuqi/model/__init__.py,sha256=IcN4aZCnyp9o-8TNIoZ8vew99QQgi0EmZvnsIuR6qYI,49
@@ -47,7 +47,7 @@ cuqi/sampler/_conjugate.py,sha256=Ip3HM12j8Bq9T0925N007_o-2xrKsn6q-KMf6vqYPVs,15
47
47
  cuqi/sampler/_conjugate_approx.py,sha256=xX-X71EgxGnZooOY6CIBhuJTs3dhcKfoLnoFxX3CO2g,1938
48
48
  cuqi/sampler/_cwmh.py,sha256=VlAVT1SXQU0yD5ZeR-_ckWvX-ifJrMweFFdFbxdfB_k,7775
49
49
  cuqi/sampler/_gibbs.py,sha256=4q4PGqLpbGTe5-n8nwzCNEoCKYO43T4lxkFJ5H-eQNQ,8626
50
- cuqi/sampler/_hmc.py,sha256=S83pTw25oHk9R7o1dwXX-pymrZQpn6t-3Tf2wP1hSTw,12140
50
+ cuqi/sampler/_hmc.py,sha256=76nPkvNU0wLSg4qvm-1s048MzQasl5Qk94sHpyeJ5hM,14819
51
51
  cuqi/sampler/_langevin_algorithm.py,sha256=Xk6BkhzwJesKMZEIZMYtbc-2KU7Essdk6abLvthMkRY,7860
52
52
  cuqi/sampler/_laplace_approximation.py,sha256=TyvigbxB1L2ClPhk5xL2WEwv12wMvS6hRV5NMDsyek8,6456
53
53
  cuqi/sampler/_mh.py,sha256=V5tIdn-KdfWo4J_Nbf-AH6XwKWblWUyc4BeuSikUHsE,7062
@@ -63,8 +63,8 @@ cuqi/testproblem/_testproblem.py,sha256=1a52jc92NqsLRGyM5DLySNW0lVVOC6tkyKM_QV4K
63
63
  cuqi/utilities/__init__.py,sha256=EfxHLdsyDNugbmbzs43nV_AeKcycM9sVBjG9WZydagA,351
64
64
  cuqi/utilities/_get_python_variable_name.py,sha256=QwlBVj2koJRA8s8pWd554p7-ElcI7HUwY32HknaR92E,1827
65
65
  cuqi/utilities/_utilities.py,sha256=rjycaxDWExdskIfYXV1z5ZlB0JTlqv3tCmKf08i6U5c,7973
66
- CUQIpy-0.5.0.post0.dev5.dist-info/LICENSE,sha256=kJWRPrtRoQoZGXyyvu50Uc91X6_0XRaVfT0YZssicys,10799
67
- CUQIpy-0.5.0.post0.dev5.dist-info/METADATA,sha256=MQZGzMO78uAG0Y7GTwpZdTPFqcHHlTld_ysBXszItiU,16420
68
- CUQIpy-0.5.0.post0.dev5.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
69
- CUQIpy-0.5.0.post0.dev5.dist-info/top_level.txt,sha256=AgmgMc6TKfPPqbjV0kvAoCBN334i_Lwwojc7HE3ZwD0,5
70
- CUQIpy-0.5.0.post0.dev5.dist-info/RECORD,,
66
+ CUQIpy-0.5.0.post0.dev19.dist-info/LICENSE,sha256=kJWRPrtRoQoZGXyyvu50Uc91X6_0XRaVfT0YZssicys,10799
67
+ CUQIpy-0.5.0.post0.dev19.dist-info/METADATA,sha256=u37W-6k-lsUTpDsDlcpO7ZckEDLP50aL8c-7IeBqu0g,16421
68
+ CUQIpy-0.5.0.post0.dev19.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
69
+ CUQIpy-0.5.0.post0.dev19.dist-info/top_level.txt,sha256=AgmgMc6TKfPPqbjV0kvAoCBN334i_Lwwojc7HE3ZwD0,5
70
+ CUQIpy-0.5.0.post0.dev19.dist-info/RECORD,,
cuqi/_version.py CHANGED
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2023-09-14T11:43:13+0200",
11
+ "date": "2023-09-20T22:14:47+0200",
12
12
  "dirty": false,
13
13
  "error": null,
14
- "full-revisionid": "10df75286cece2b7b0876697501f4d2354b36f5f",
15
- "version": "0.5.0.post0.dev5"
14
+ "full-revisionid": "85066baa356c88fbab83075272f0337134b6d832",
15
+ "version": "0.5.0.post0.dev19"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
@@ -331,6 +331,36 @@ class Continuous(Geometry, ABC):
331
331
 
332
332
  def fun2par(self,funvals):
333
333
  return funvals
334
+
335
+ def _reshape_par2fun_input(self, pars):
336
+ """Make sure that the parameter last dimension reflects the number of
337
+ parameter vectors: 1 for a single parameter vector and n for n parameter
338
+ vectors."""
339
+ # Ensure pars shape is correct (either squeezed single vector or
340
+ # multiple vectors with the correct shape).
341
+ if pars.shape != self.par_shape and\
342
+ pars.shape[:-1] != self.par_shape:
343
+ raise ValueError(
344
+ f"pars must have shape {self.par_shape} or {self.par_shape}"
345
+ + "+(n,) where n is the number of parameter vectors")
346
+
347
+ # Add dim to pars if pars is a single parameter vector.
348
+ return pars.reshape(self.par_shape + (-1,))
349
+
350
+ def _reshape_fun2par_input(self, funvals):
351
+ """Make sure that the function value last dimension reflects the number
352
+ of functions: 1 for a single function and n for n functions."""
353
+ # Ensure funvals shape is correct (either squeezed single function or
354
+ # multiple functions with the correct shape).
355
+ if funvals.shape != self.fun_shape and\
356
+ funvals.shape[:-1] != self.fun_shape:
357
+ raise ValueError(
358
+ f"funvals must have shape {self.fun_shape} or {self.fun_shape}"
359
+ + "+(n,) where n is the number of functions")
360
+
361
+ # Add dim to funvals if funvals is a single function.
362
+ return funvals.reshape(self.fun_shape + (-1,))
363
+
334
364
 
335
365
  class Continuous1D(Continuous):
336
366
  """A class that represents a continuous 1D geometry.
@@ -769,6 +799,7 @@ class KLExpansion(Continuous1D):
769
799
  self._normalizer = normalizer # normalizer factor
770
800
  self._num_modes = num_modes # number of modes
771
801
  self._coefs = None
802
+ self._coefs_inverse = None
772
803
 
773
804
  @property
774
805
  def par_shape(self):
@@ -796,10 +827,22 @@ class KLExpansion(Continuous1D):
796
827
  # If the coefficients are not computed, compute them.
797
828
  if self._coefs is None or len(self._coefs) != self.num_modes:
798
829
  eigvals = np.array(range(1, self.par_dim+1)) # KL eigvals
799
- self._coefs = 1/np.float_power(eigvals, self.decay_rate)
830
+ self._coefs = np.diag(1/np.float_power(eigvals, self.decay_rate))
800
831
 
801
832
  # Return the coefficients.
802
833
  return self._coefs
834
+
835
+ @property
836
+ def coefs_inverse(self):
837
+ """Computes the inverse of the coefficients diagonal matrix."""
838
+
839
+ # If the matrix is not computed, compute it.
840
+ if self._coefs_inverse is None or\
841
+ len(self._coefs_inverse) != self.num_modes:
842
+ self._coefs_inverse = np.diag(np.float_power(np.diag(self.coefs), -1))
843
+
844
+ # Return the inverse coefficients matrix.
845
+ return self._coefs_inverse
803
846
 
804
847
  @property
805
848
  def num_modes(self):
@@ -816,19 +859,22 @@ class KLExpansion(Continuous1D):
816
859
 
817
860
  # computes the real function out of expansion coefs
818
861
  def par2fun(self, p):
819
- # Check that the input is of the correct shape
820
- if len(p) != self.par_dim:
821
- raise ValueError(
822
- "Input array p must have length {}".format(self.par_dim))
823
862
 
824
- modes = p*self.coefs/self.normalizer
863
+ # Reshape the parameter vector
864
+ p = self._reshape_par2fun_input(p)
825
865
 
826
- # pad the remaining modes with zeros
827
- modes = np.pad(modes, (0, self.fun_dim-self.par_dim),
866
+ modes = self.coefs@p/self.normalizer
867
+
868
+ # pad the remaining modes with zeros (for single or multiple parameter
869
+ # parameter vectors)
870
+ modes = np.pad(modes, ((0, self.fun_dim-self.par_dim), (0, 0)),
828
871
  'constant', constant_values=0)
829
872
 
830
- real = idst(modes)/2
831
- return real
873
+ real = idst(modes.T).T/2
874
+
875
+ # squeeze to return single function value if only one parameter vector
876
+ # was given
877
+ return real.squeeze()
832
878
 
833
879
  def fun2par(self, funvals):
834
880
  """The function to parameter map used to map function values back to
@@ -837,11 +883,9 @@ class KLExpansion(Continuous1D):
837
883
  always the inverse of `par2fun` but it is the closest estimation of the
838
884
  function on the KL expansion coefficient space."""
839
885
 
840
- # Check that the input is of the correct shape
841
- if len(funvals) != self.fun_dim:
842
- raise ValueError(
843
- "Input array funvals must have length {}".format(self.fun_dim))
844
-
886
+ # Reshape the function values
887
+ funvals = self._reshape_fun2par_input(funvals)
888
+
845
889
  warnings.warn(
846
890
  f"fun2par for {self.__class__} is a projection on "
847
891
  + "the KL expansion coefficients space where only "
@@ -865,10 +909,14 @@ class KLExpansion(Continuous1D):
865
909
  # However, if we use, for example, scipy.fft instead of scipy.fftpack,
866
910
  # then this scaling is not needed.
867
911
 
868
- p = dst(funvals*2)[:self.par_dim]\
869
- *self.normalizer/(self.coefs*2*self.fun_dim)
912
+ # Transform (single or multiple functions) to expansion coefficients
913
+ p_temp = dst(funvals.T*2).T[:self.par_dim,:]
914
+ p = self.coefs_inverse@p_temp*self.normalizer/(2*self.fun_dim)
915
+
916
+ # squeeze to return single parameter vector if only one function value
917
+ # was given
918
+ return p.squeeze()
870
919
 
871
- return p
872
920
 
873
921
  class KLExpansion_Full(Continuous1D):
874
922
  '''
@@ -1138,24 +1186,55 @@ class StepExpansion(Continuous1D):
1138
1186
  return self._n_steps
1139
1187
 
1140
1188
  def par2fun(self, p):
1141
- real = np.zeros(self.grid.shape)
1189
+
1190
+ # Reshape the parameter vector
1191
+ p = self._reshape_par2fun_input(p)
1192
+
1193
+ # Extended fun_shape to include multiple functions.
1194
+ ext_fun_shape = self.fun_shape + (p.shape[-1],)
1195
+
1196
+ # Initialize fun to zeros.
1197
+ fun = np.zeros(ext_fun_shape)
1198
+
1199
+ # Fill fun with the step function values.
1142
1200
  for i in range(self._n_steps):
1143
- real[self._indices[i]] = p[i]
1201
+ fun[self._indices[i],:] = p[i,:]
1202
+
1203
+ # Squeeze to return single evaluated function if only one parameter
1204
+ # vector was given.
1205
+ return fun.squeeze()
1144
1206
 
1145
- return real
1146
1207
 
1147
1208
  def fun2par(self,f):
1148
- val = np.zeros(self._n_steps)
1209
+
1210
+ # Reshape the function values
1211
+ f = self._reshape_fun2par_input(f)
1212
+
1213
+ # Extended par_shape to include multiple parameter vectors.
1214
+ ext_par_shape = self.par_shape + (f.shape[-1],)
1215
+
1216
+ # Initialize par to zeros.
1217
+ par = np.zeros(ext_par_shape)
1218
+
1219
+ # Fill par with the projection of the function values.
1149
1220
  for i in range(self._n_steps):
1150
1221
  if self._fun2par_projection.lower() == 'mean':
1151
- val[i] = np.mean(f[self._indices[i]])
1222
+ projection_method = np.mean
1152
1223
  elif self._fun2par_projection.lower() == 'max':
1153
- val[i] = np.max(f[self._indices[i]])
1224
+ projection_method = np.max
1154
1225
  elif self._fun2par_projection.lower() == 'min':
1155
- val[i] = np.min(f[self._indices[i]])
1226
+ projection_method = np.min
1156
1227
  else:
1157
1228
  raise ValueError("Invalid projection option.")
1158
- return val
1229
+
1230
+ # Apply projection method to the function values in the ith
1231
+ # interval.
1232
+ par[i,:] = projection_method(f[self._indices[i],:], axis=0)
1233
+
1234
+ # Squeeze to return single parameter vector if only one function value
1235
+ # was given.
1236
+ return par.squeeze()
1237
+
1159
1238
 
1160
1239
  def _check_grid_setup(self):
1161
1240
 
cuqi/sampler/_hmc.py CHANGED
@@ -58,20 +58,75 @@ class NUTS(Sampler):
58
58
  # Plot samples
59
59
  samples.plot_pair()
60
60
 
61
+ After running the NUTS sampler, run diagnostics can be accessed via the
62
+ following attributes:
63
+
64
+ .. code-block:: python
65
+
66
+ # Number of tree nodes created each NUTS iteration
67
+ sampler.num_tree_node_list
68
+
69
+ # Step size used in each NUTS iteration
70
+ sampler.epsilon_list
71
+
72
+ # Suggested step size during adaptation (the value of this step size is
73
+ # only used after adaptation). The suggested step size is None if
74
+ # adaptation is not requested.
75
+ sampler.epsilon_bar_list
76
+
77
+ # Additionally, iterations' number can be accessed via
78
+ sampler.iteration_list
79
+
61
80
  """
62
81
  def __init__(self, target, x0=None, max_depth=15, adapt_step_size=True, opt_acc_rate=0.6, **kwargs):
63
82
  super().__init__(target, x0=x0, **kwargs)
64
83
  self.max_depth = max_depth
65
84
  self.adapt_step_size = adapt_step_size
66
85
  self.opt_acc_rate = opt_acc_rate
67
-
86
+
87
+ # NUTS run diagnostic
88
+ # number of tree nodes created each NUTS iteration
89
+ self._num_tree_node = 0
90
+ # Create lists to store NUTS run diagnostics
91
+ self._create_run_diagnostic_attributes()
92
+
93
+ def _create_run_diagnostic_attributes(self):
94
+ """A method to create attributes to store NUTS run diagnostic."""
95
+ self._reset_run_diagnostic_attributes()
96
+
97
+ def _reset_run_diagnostic_attributes(self):
98
+ """A method to reset attributes to store NUTS run diagnostic."""
99
+ # NUTS iterations
100
+ self.iteration_list = []
101
+ # List to store number of tree nodes created each NUTS iteration
102
+ self.num_tree_node_list = []
103
+ # List of step size used in each NUTS iteration
104
+ self.epsilon_list = []
105
+ # List of burn-in step size suggestion during adaptation
106
+ # only used when adaptation is done
107
+ # remains fixed after adaptation (after burn-in)
108
+ self.epsilon_bar_list = []
109
+
110
+ def _update_run_diagnostic_attributes(self, k, n_tree, eps, eps_bar):
111
+ """A method to update attributes to store NUTS run diagnostic."""
112
+ # Store the current iteration number k
113
+ self.iteration_list.append(k)
114
+ # Store the number of tree nodes created in iteration k
115
+ self.num_tree_node_list.append(n_tree)
116
+ # Store the step size used in iteration k
117
+ self.epsilon_list.append(eps)
118
+ # Store the step size suggestion during adaptation in iteration k
119
+ self.epsilon_bar_list.append(eps_bar)
120
+
68
121
  def _nuts_target(self, x): # returns logposterior tuple evaluation-gradient
69
122
  return self.target.logd(x), self.target.gradient(x)
70
123
 
71
124
  def _sample_adapt(self, N, Nb):
72
125
  return self._sample(N, Nb)
73
-
126
+
74
127
  def _sample(self, N, Nb):
128
+ # Reset run diagnostic attributes
129
+ self._reset_run_diagnostic_attributes()
75
130
 
76
131
  if self.adapt_step_size is True and Nb == 0:
77
132
  raise ValueError("Adaptive step size is True but number of burn-in steps is 0. Please set Nb > 0.")
@@ -86,6 +141,9 @@ class NUTS(Sampler):
86
141
  theta[:, 0] = self.x0
87
142
  joint_eval[0], grad = self._nuts_target(self.x0)
88
143
 
144
+ # Step size variables
145
+ epsilon, epsilon_bar = None, None
146
+
89
147
  # parameters dual averaging
90
148
  if (self.adapt_step_size == True):
91
149
  epsilon = self._FindGoodEpsilon(theta[:, 0], joint_eval[0], grad)
@@ -101,6 +159,9 @@ class NUTS(Sampler):
101
159
 
102
160
  # run NUTS
103
161
  for k in range(1, Ns):
162
+ # reset number of tree nodes for each iteration
163
+ self._num_tree_node = 0
164
+
104
165
  theta_k, joint_k = theta[:, k-1], joint_eval[k-1] # initial position (parameters)
105
166
  r_k = self._Kfun(1, 'sample') # resample momentum vector
106
167
  Ham = joint_k - self._Kfun(r_k, 'eval') # Hamiltonian
@@ -143,6 +204,10 @@ class NUTS(Sampler):
143
204
  s = s_prime * int((dtheta @ r_minus.T) >= 0) * int((dtheta @ r_plus.T) >= 0)
144
205
  j += 1
145
206
 
207
+ # update run diagnostic attributes
208
+ self._update_run_diagnostic_attributes(
209
+ k, self._num_tree_node, epsilon, epsilon_bar)
210
+
146
211
  # adapt epsilon during burn-in using dual averaging
147
212
  if (k <= Nb) and (self.adapt_step_size == True):
148
213
  eta1 = 1/(k + t_0)
@@ -160,7 +225,7 @@ class NUTS(Sampler):
160
225
 
161
226
  if np.isnan(joint_eval[k]):
162
227
  raise NameError('NaN potential func')
163
-
228
+
164
229
  # apply burn-in
165
230
  theta = theta[:, Nb:]
166
231
  joint_eval = joint_eval[Nb:]
@@ -211,6 +276,9 @@ class NUTS(Sampler):
211
276
  #=========================================================================
212
277
  # @functools.lru_cache(maxsize=128)
213
278
  def _BuildTree(self, theta, r, grad, Ham, log_u, v, j, epsilon, Delta_max=1000):
279
+ # Increment the number of tree nodes counter
280
+ self._num_tree_node += 1
281
+
214
282
  if (j == 0): # base case
215
283
  # single leapfrog step in the direction v
216
284
  theta_prime, r_prime, joint_prime, grad_prime = self._Leapfrog(theta, r, grad, v*epsilon)