CUQIpy 1.2.0.post0.dev30__py3-none-any.whl → 1.2.0.post0.dev42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: CUQIpy
3
- Version: 1.2.0.post0.dev30
3
+ Version: 1.2.0.post0.dev42
4
4
  Summary: Computational Uncertainty Quantification for Inverse problems in Python
5
5
  Maintainer-email: "Nicolai A. B. Riis" <nabr@dtu.dk>, "Jakob S. Jørgensen" <jakj@dtu.dk>, "Amal M. Alghamdi" <amaal@dtu.dk>, Chao Zhang <chaz@dtu.dk>
6
6
  License: Apache License
@@ -1,6 +1,6 @@
1
1
  cuqi/__init__.py,sha256=LsGilhl-hBLEn6Glt8S_l0OJzAA1sKit_rui8h-D-p0,488
2
2
  cuqi/_messages.py,sha256=fzEBrZT2kbmfecBBPm7spVu7yHdxGARQB4QzXhJbCJ0,415
3
- cuqi/_version.py,sha256=QJEhKLIZcwiFhDOxL5fW3HoGva9HIMdu-ceQvUCHsjs,509
3
+ cuqi/_version.py,sha256=8yG6yaTIxWU2I_4MDAqgTgzlSR5m8uT0oqard7PIDds,509
4
4
  cuqi/config.py,sha256=wcYvz19wkeKW2EKCGIKJiTpWt5kdaxyt4imyRkvtTRA,526
5
5
  cuqi/diagnostics.py,sha256=5OrbJeqpynqRXOe5MtOKKhe7EAVdOEpHIqHnlMW9G_c,3029
6
6
  cuqi/array/__init__.py,sha256=-EeiaiWGNsE3twRS4dD814BIlfxEsNkTCZUc5gjOXb0,30
@@ -39,14 +39,14 @@ cuqi/experimental/mcmc/_conjugate.py,sha256=VNPQkGity0mposcqxrx4UIeXm35EvJvZED4p
39
39
  cuqi/experimental/mcmc/_conjugate_approx.py,sha256=uEnY2ea9su5ivcNagyRAwpQP2gBY98sXU7N0y5hTADo,3653
40
40
  cuqi/experimental/mcmc/_cwmh.py,sha256=50v3uZaWhlVnfrEB5-lB_7pn8QoUVBe-xWxKGKbmNHg,7234
41
41
  cuqi/experimental/mcmc/_direct.py,sha256=9pQS_2Qk2-ybt6m8WTfPoKetcxQ00WaTRN85-Z0FrBY,777
42
- cuqi/experimental/mcmc/_gibbs.py,sha256=a_Zr007F233R6A3CJ_0zlQppsQrVuX8-oWCbNJWJxmA,12745
43
- cuqi/experimental/mcmc/_hmc.py,sha256=AR8ucnjPe9Q78em3IMoihLoifdBxu9oAjrQ51AYPHH4,19390
42
+ cuqi/experimental/mcmc/_gibbs.py,sha256=evgxf2tLFLlKB3hN0qz9a9NcZQSES8wdacnn3uNWocQ,12005
43
+ cuqi/experimental/mcmc/_hmc.py,sha256=8p4QxZBRpFLzwamH-DWHSdZE0aXX3FqonBzczz_XkDw,19340
44
44
  cuqi/experimental/mcmc/_langevin_algorithm.py,sha256=yNO7ABxmkixzcLG-lv57GOTyeTr7HwFs2DrrhuZW9OI,8398
45
45
  cuqi/experimental/mcmc/_laplace_approximation.py,sha256=rdiE3cMQFq6FLQcOQwPpuGIxrTAp3aoGPxMDSdeopV0,5688
46
46
  cuqi/experimental/mcmc/_mh.py,sha256=MXo0ahXP4KGFkaY4HtvcBE-TMQzsMlTmLKzSvpz7drU,2941
47
47
  cuqi/experimental/mcmc/_pcn.py,sha256=wqJBZLuRFSwxihaI53tumAg6AWVuceLMOmXssTetd1A,3374
48
48
  cuqi/experimental/mcmc/_rto.py,sha256=OtzgiYCxDoTdXp7y4mkLa2upj74qadesoqHYpr11ZCg,10061
49
- cuqi/experimental/mcmc/_sampler.py,sha256=lWb0ORdkYcVkZ6G0ypHfmrWXZPfaXnsUj1wAncKxa7g,20339
49
+ cuqi/experimental/mcmc/_sampler.py,sha256=xtoT70T8xe3Ye7yYdIFQD_kivjXlqUImyV3bMt406nk,20106
50
50
  cuqi/experimental/mcmc/_utilities.py,sha256=kUzHbhIS3HYZRbneNBK41IogUYX5dS_bJxqEGm7TQBI,525
51
51
  cuqi/geometry/__init__.py,sha256=Tz1WGzZBY-QGH3c0GiyKm9XHN8MGGcnU6TUHLZkzB3o,842
52
52
  cuqi/geometry/_geometry.py,sha256=SDRZdiN2CIuS591lXxqgFoPWPIpwY-MHk75116QvdYY,46901
@@ -85,8 +85,8 @@ cuqi/testproblem/_testproblem.py,sha256=x769LwwRdJdzIiZkcQUGb_5-vynNTNALXWKato7s
85
85
  cuqi/utilities/__init__.py,sha256=H7xpJe2UinjZftKvE2JuXtTi4DqtkR6uIezStAXwfGg,428
86
86
  cuqi/utilities/_get_python_variable_name.py,sha256=QwlBVj2koJRA8s8pWd554p7-ElcI7HUwY32HknaR92E,1827
87
87
  cuqi/utilities/_utilities.py,sha256=Jc4knn80vLoA7kgw9FzXwKVFGaNBOXiA9kgvltZU3Ao,11777
88
- CUQIpy-1.2.0.post0.dev30.dist-info/LICENSE,sha256=kJWRPrtRoQoZGXyyvu50Uc91X6_0XRaVfT0YZssicys,10799
89
- CUQIpy-1.2.0.post0.dev30.dist-info/METADATA,sha256=lPVBe_Ad6ghqiE2CkcVDyABxmJ8Hb610VL0cZSxmVeo,18495
90
- CUQIpy-1.2.0.post0.dev30.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
91
- CUQIpy-1.2.0.post0.dev30.dist-info/top_level.txt,sha256=AgmgMc6TKfPPqbjV0kvAoCBN334i_Lwwojc7HE3ZwD0,5
92
- CUQIpy-1.2.0.post0.dev30.dist-info/RECORD,,
88
+ CUQIpy-1.2.0.post0.dev42.dist-info/LICENSE,sha256=kJWRPrtRoQoZGXyyvu50Uc91X6_0XRaVfT0YZssicys,10799
89
+ CUQIpy-1.2.0.post0.dev42.dist-info/METADATA,sha256=HICsNy9FiNUfy2rnCMBDZxqvBsM__s0hbLpGAOccHG0,18495
90
+ CUQIpy-1.2.0.post0.dev42.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
91
+ CUQIpy-1.2.0.post0.dev42.dist-info/top_level.txt,sha256=AgmgMc6TKfPPqbjV0kvAoCBN334i_Lwwojc7HE3ZwD0,5
92
+ CUQIpy-1.2.0.post0.dev42.dist-info/RECORD,,
cuqi/_version.py CHANGED
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2024-10-11T09:05:14+0200",
11
+ "date": "2024-10-11T13:31:57+0300",
12
12
  "dirty": false,
13
13
  "error": null,
14
- "full-revisionid": "e46192df9743f4e53f2ad126462ed1fd95a97dd0",
15
- "version": "1.2.0.post0.dev30"
14
+ "full-revisionid": "85645ceb5eccff9ab309005d62defa609b9f53b2",
15
+ "version": "1.2.0.post0.dev42"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
@@ -136,13 +136,7 @@ class HybridGibbs:
136
136
  self._set_targets()
137
137
 
138
138
  # Initialize the samplers
139
- self._initialize_samplers()
140
-
141
- # Run over pre-sample methods for samplers that have it
142
- # TODO. Some samplers (NUTS) seem to require to run _pre_warmup before _pre_sample
143
- # This is not ideal and should be fixed in the future
144
- for sampler in self.samplers.values():
145
- self._pre_warmup_and_pre_sample_sampler(sampler)
139
+ self._initialize_samplers()
146
140
 
147
141
  # Validate all targets for samplers.
148
142
  self.validate_targets()
@@ -239,10 +233,6 @@ class HybridGibbs:
239
233
  sampler.set_state(sampler_state)
240
234
  sampler.set_history(sampler_history)
241
235
 
242
- # Run pre_warmup and pre_sample methods for sampler
243
- # TODO. Some samplers (NUTS) seem to require to run _pre_warmup before _pre_sample
244
- self._pre_warmup_and_pre_sample_sampler(sampler)
245
-
246
236
  # Allow for multiple sampling steps in each Gibbs step
247
237
  for _ in range(self.num_sampling_steps[par_name]):
248
238
  # Sampling step
@@ -291,10 +281,6 @@ class HybridGibbs:
291
281
  self.num_sampling_steps[par_name] = 1
292
282
 
293
283
 
294
- def _pre_warmup_and_pre_sample_sampler(self, sampler):
295
- if hasattr(sampler, '_pre_warmup'): sampler._pre_warmup()
296
- if hasattr(sampler, '_pre_sample'): sampler._pre_sample()
297
-
298
284
  def _set_targets(self):
299
285
  """ Set targets for all samplers using the current samples """
300
286
  par_names = self.par_names
@@ -199,6 +199,9 @@ class NUTS(Sampler):
199
199
  self._reset_run_diagnostic_attributes()
200
200
 
201
201
  def step(self):
202
+ if isinstance(self._epsilon_bar, str) and self._epsilon_bar == "unset":
203
+ self._epsilon_bar = self._epsilon
204
+
202
205
  # Convert current_point, logd, and grad to numpy arrays
203
206
  # if they are CUQIarray objects
204
207
  if isinstance(self.current_point, CUQIarray):
@@ -212,9 +215,9 @@ class NUTS(Sampler):
212
215
  self._num_tree_node = 0
213
216
 
214
217
  # copy current point, logd, and grad in local variables
215
- point_k = self.current_point.copy() # initial position (parameters)
218
+ point_k = self.current_point # initial position (parameters)
216
219
  logd_k = self.current_target_logd
217
- grad_k = self.current_target_grad.copy() # initial gradient
220
+ grad_k = self.current_target_grad # initial gradient
218
221
 
219
222
  # compute r_k and Hamiltonian
220
223
  r_k = self._Kfun(1, 'sample') # resample momentum vector
@@ -225,9 +228,9 @@ class NUTS(Sampler):
225
228
 
226
229
  # initialization
227
230
  j, s, n = 0, 1, 1
228
- point_minus, point_plus = np.copy(point_k), np.copy(point_k)
229
- grad_minus, grad_plus = np.copy(grad_k), np.copy(grad_k)
230
- r_minus, r_plus = np.copy(r_k), np.copy(r_k)
231
+ point_minus, point_plus = point_k.copy(), point_k.copy()
232
+ grad_minus, grad_plus = grad_k.copy(), grad_k.copy()
233
+ r_minus, r_plus = r_k.copy(), r_k.copy()
231
234
 
232
235
  # run NUTS
233
236
  acc = 0
@@ -255,9 +258,14 @@ class NUTS(Sampler):
255
258
  (np.random.rand() <= alpha2) and \
256
259
  (not np.isnan(logd_prime)) and \
257
260
  (not np.isinf(logd_prime)):
258
- self.current_point = point_prime
259
- self.current_target_logd = logd_prime
260
- self.current_target_grad = np.copy(grad_prime)
261
+ self.current_point = point_prime.copy()
262
+ # copy if array, else assign if scalar
263
+ self.current_target_logd = (
264
+ logd_prime.copy()
265
+ if isinstance(logd_prime, np.ndarray)
266
+ else logd_prime
267
+ )
268
+ self.current_target_grad = grad_prime.copy()
261
269
  acc = 1
262
270
 
263
271
 
@@ -281,6 +289,9 @@ class NUTS(Sampler):
281
289
 
282
290
  def tune(self, skip_len, update_count):
283
291
  """ adapt epsilon during burn-in using dual averaging"""
292
+ if isinstance(self._epsilon_bar, str) and self._epsilon_bar == "unset":
293
+ self._epsilon_bar = 1
294
+
284
295
  k = update_count+1
285
296
 
286
297
  # Fixed parameters that do not change during the run
@@ -294,26 +305,6 @@ class NUTS(Sampler):
294
305
  self._epsilon_bar =\
295
306
  np.exp(eta*np.log(self._epsilon) +(1-eta)*np.log(self._epsilon_bar))
296
307
 
297
- def _pre_warmup(self):
298
-
299
- # Set up tuning parameters (only first time tuning is called)
300
- # Note:
301
- # Parameters changes during the tune run
302
- # self._epsilon_bar
303
- # self._H_bar
304
- # self._epsilon
305
- # Parameters that does not change during the run
306
- # self._mu
307
- self._ensure_initialized()
308
- if self._epsilon_bar == "unset": # Initial value of epsilon_bar for tuning
309
- self._epsilon_bar = 1
310
-
311
- def _pre_sample(self):
312
- self._ensure_initialized()
313
- if self._epsilon_bar == "unset": # Initial value of epsilon_bar for sampling
314
- self._epsilon_bar = self._epsilon
315
-
316
-
317
308
  #=========================================================================
318
309
  def _nuts_target(self, x): # returns logposterior tuple evaluation-gradient
319
310
  return self.target.logd(x), self.target.gradient(x)
@@ -423,9 +414,14 @@ class NUTS(Sampler):
423
414
  # Metropolis step
424
415
  alpha2 = n_2prime / max(1, (n_prime + n_2prime))
425
416
  if (np.random.rand() <= alpha2):
426
- point_prime = np.copy(point_2prime)
427
- logd_prime = np.copy(logd_2prime)
428
- grad_prime = np.copy(grad_2prime)
417
+ point_prime = point_2prime.copy()
418
+ # copy if array, else assign if scalar
419
+ logd_prime = (
420
+ logd_2prime.copy()
421
+ if isinstance(logd_2prime, np.ndarray)
422
+ else logd_2prime
423
+ )
424
+ grad_prime = grad_2prime.copy()
429
425
 
430
426
  # update number of particles and stopping criterion
431
427
  alpha_prime += alpha_2prime
@@ -465,4 +461,4 @@ class NUTS(Sampler):
465
461
  # Store the step size used in iteration k
466
462
  self.epsilon_list.append(eps)
467
463
  # Store the step size suggestion during adaptation in iteration k
468
- self.epsilon_bar_list.append(eps_bar)
464
+ self.epsilon_bar_list.append(eps_bar)
@@ -216,9 +216,6 @@ class Sampler(ABC):
216
216
  if batch_size > 0:
217
217
  batch_handler = _BatchHandler(batch_size, sample_path)
218
218
 
219
- # Any code that needs to be run before sampling
220
- if hasattr(self, "_pre_sample"): self._pre_sample()
221
-
222
219
  # Draw samples
223
220
  pbar = tqdm(range(Ns), "Sample: ")
224
221
  for idx in pbar:
@@ -260,9 +257,6 @@ class Sampler(ABC):
260
257
 
261
258
  tune_interval = max(int(tune_freq * Nb), 1)
262
259
 
263
- # Any code that needs to be run before warmup
264
- if hasattr(self, "_pre_warmup"): self._pre_warmup()
265
-
266
260
  # Draw warmup samples with tuning
267
261
  pbar = tqdm(range(Nb), "Warmup: ")
268
262
  for idx in pbar:
@@ -566,4 +560,4 @@ class _BatchHandler:
566
560
 
567
561
  def finalize(self):
568
562
  """ Finalize the batch handler. Flush any remaining samples to disk. """
569
- self.flush()
563
+ self.flush()