CUQIpy 1.1.1.post0.dev7__py3-none-any.whl → 1.1.1.post0.dev36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: CUQIpy
3
- Version: 1.1.1.post0.dev7
3
+ Version: 1.1.1.post0.dev36
4
4
  Summary: Computational Uncertainty Quantification for Inverse problems in Python
5
5
  Maintainer-email: "Nicolai A. B. Riis" <nabr@dtu.dk>, "Jakob S. Jørgensen" <jakj@dtu.dk>, "Amal M. Alghamdi" <amaal@dtu.dk>, Chao Zhang <chaz@dtu.dk>
6
6
  License: Apache License
@@ -1,6 +1,6 @@
1
1
  cuqi/__init__.py,sha256=LsGilhl-hBLEn6Glt8S_l0OJzAA1sKit_rui8h-D-p0,488
2
2
  cuqi/_messages.py,sha256=fzEBrZT2kbmfecBBPm7spVu7yHdxGARQB4QzXhJbCJ0,415
3
- cuqi/_version.py,sha256=uVRNuD5Hnb14kKCLTMa_Wb7QhBn4Tavn4bdESiBfPz4,508
3
+ cuqi/_version.py,sha256=PxeXIVclRTk3kiFawV6Oz6g5wmG2chZXbA1vtHlEXMY,509
4
4
  cuqi/config.py,sha256=wcYvz19wkeKW2EKCGIKJiTpWt5kdaxyt4imyRkvtTRA,526
5
5
  cuqi/diagnostics.py,sha256=5OrbJeqpynqRXOe5MtOKKhe7EAVdOEpHIqHnlMW9G_c,3029
6
6
  cuqi/array/__init__.py,sha256=-EeiaiWGNsE3twRS4dD814BIlfxEsNkTCZUc5gjOXb0,30
@@ -49,7 +49,7 @@ cuqi/experimental/mcmc/_rto.py,sha256=OtzgiYCxDoTdXp7y4mkLa2upj74qadesoqHYpr11ZC
49
49
  cuqi/experimental/mcmc/_sampler.py,sha256=yzrbtBlqiajIHH151vocbD9SrHoCHj29wLP-IyrG3Mw,20017
50
50
  cuqi/experimental/mcmc/_utilities.py,sha256=kUzHbhIS3HYZRbneNBK41IogUYX5dS_bJxqEGm7TQBI,525
51
51
  cuqi/geometry/__init__.py,sha256=Tz1WGzZBY-QGH3c0GiyKm9XHN8MGGcnU6TUHLZkzB3o,842
52
- cuqi/geometry/_geometry.py,sha256=WYFC-4_VBTW73b2ldsnfGYKvdSiCE8plr89xTSmkadg,46804
52
+ cuqi/geometry/_geometry.py,sha256=SDRZdiN2CIuS591lXxqgFoPWPIpwY-MHk75116QvdYY,46901
53
53
  cuqi/implicitprior/__init__.py,sha256=CaDQGYtmeFzN37vf3QUmKhcN9-H5lO66ZbK035k4qUw,246
54
54
  cuqi/implicitprior/_regularizedGMRF.py,sha256=IR9tKzNMoz-b0RKu6ahVgMx_lDNB3jZHVWFMQm6QqZk,6259
55
55
  cuqi/implicitprior/_regularizedGaussian.py,sha256=cQtrgzyJU2pwoK4ORGl1erKLE9VY5NqwZTiqiViDswA,12371
@@ -63,7 +63,7 @@ cuqi/operator/_operator.py,sha256=yNwPTh7jR07AiKMbMQQ5_54EgirlKFsbq9JN1EODaQI,88
63
63
  cuqi/pde/__init__.py,sha256=NyS_ZYruCvy-Yg24qKlwm3ZIX058kLNQX9bqs-xg4ZM,99
64
64
  cuqi/pde/_pde.py,sha256=WRkOYyIdT_T3aZepRh0aS9C5nBbUZUcHaA80iSRvgoo,12572
65
65
  cuqi/problem/__init__.py,sha256=JxJty4JqHTOqSG6NeTGiXRQ7OLxiRK9jvVq3lXLeIRw,38
66
- cuqi/problem/_problem.py,sha256=XvNbo7BXcnDZvj3n36f879QknTYg3_-jnKhkVvqUQto,31944
66
+ cuqi/problem/_problem.py,sha256=t8y8NNQGKHYIbXN45FWuuwuU7pMgDdD8MS6az_1Tx8k,38160
67
67
  cuqi/sampler/__init__.py,sha256=D-dYa0gFgIwQukP8_VKhPGmlGKXbvVo7YqaET4SdAeQ,382
68
68
  cuqi/sampler/_conjugate.py,sha256=ztmUR3V3qZk9zelKx48ULnmMs_zKTDUfohc256VOIe8,2753
69
69
  cuqi/sampler/_conjugate_approx.py,sha256=xX-X71EgxGnZooOY6CIBhuJTs3dhcKfoLnoFxX3CO2g,1938
@@ -85,8 +85,8 @@ cuqi/testproblem/_testproblem.py,sha256=x769LwwRdJdzIiZkcQUGb_5-vynNTNALXWKato7s
85
85
  cuqi/utilities/__init__.py,sha256=H7xpJe2UinjZftKvE2JuXtTi4DqtkR6uIezStAXwfGg,428
86
86
  cuqi/utilities/_get_python_variable_name.py,sha256=QwlBVj2koJRA8s8pWd554p7-ElcI7HUwY32HknaR92E,1827
87
87
  cuqi/utilities/_utilities.py,sha256=Jc4knn80vLoA7kgw9FzXwKVFGaNBOXiA9kgvltZU3Ao,11777
88
- CUQIpy-1.1.1.post0.dev7.dist-info/LICENSE,sha256=kJWRPrtRoQoZGXyyvu50Uc91X6_0XRaVfT0YZssicys,10799
89
- CUQIpy-1.1.1.post0.dev7.dist-info/METADATA,sha256=8qP47Cs2y6ki9HTzz2bpn2PN9ll2WRno_-pKXrR0TRA,18389
90
- CUQIpy-1.1.1.post0.dev7.dist-info/WHEEL,sha256=ixB2d4u7mugx_bCBycvM9OzZ5yD7NmPXFRtKlORZS2Y,91
91
- CUQIpy-1.1.1.post0.dev7.dist-info/top_level.txt,sha256=AgmgMc6TKfPPqbjV0kvAoCBN334i_Lwwojc7HE3ZwD0,5
92
- CUQIpy-1.1.1.post0.dev7.dist-info/RECORD,,
88
+ CUQIpy-1.1.1.post0.dev36.dist-info/LICENSE,sha256=kJWRPrtRoQoZGXyyvu50Uc91X6_0XRaVfT0YZssicys,10799
89
+ CUQIpy-1.1.1.post0.dev36.dist-info/METADATA,sha256=rFoEwQu_k2FhhCV3x8SEdJp_j-wVLNtnP198njDqVWc,18390
90
+ CUQIpy-1.1.1.post0.dev36.dist-info/WHEEL,sha256=uCRv0ZEik_232NlR4YDw4Pv3Ajt5bKvMH13NUU7hFuI,91
91
+ CUQIpy-1.1.1.post0.dev36.dist-info/top_level.txt,sha256=AgmgMc6TKfPPqbjV0kvAoCBN334i_Lwwojc7HE3ZwD0,5
92
+ CUQIpy-1.1.1.post0.dev36.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (74.1.0)
2
+ Generator: setuptools (74.1.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
cuqi/_version.py CHANGED
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2024-09-03T13:01:53+0200",
11
+ "date": "2024-09-04T13:31:10+0200",
12
12
  "dirty": false,
13
13
  "error": null,
14
- "full-revisionid": "6731d720ebd90ec077628febef51db45ce1738a4",
15
- "version": "1.1.1.post0.dev7"
14
+ "full-revisionid": "7852699550f6a8cb8bd6ad61a579e2eecb7cc964",
15
+ "version": "1.1.1.post0.dev36"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
@@ -395,6 +395,8 @@ class Continuous1D(Continuous):
395
395
  self._grid = self._create_dimension(value)
396
396
 
397
397
  def _plot(self, values, *args, **kwargs):
398
+ if self.par_dim==1 and "marker" not in kwargs.keys():
399
+ kwargs["marker"] = "o"
398
400
  p = plt.plot(self.grid, values, *args, **kwargs)
399
401
  self._plot_config()
400
402
  return p
cuqi/problem/_problem.py CHANGED
@@ -218,10 +218,10 @@ class BayesianProblem(object):
218
218
  """
219
219
  if disp:
220
220
  # Print warning to user about the automatic solver selection
221
- print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
222
- print("!!! Automatic solver selection is experimental. !!!")
223
- print("!!! Always validate the computed results. !!!")
224
- print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
221
+ print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
222
+ print("!!! Automatic solver selection is a work-in-progress !!!")
223
+ print("!!! Always validate the computed results. !!!")
224
+ print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
225
225
  print("")
226
226
 
227
227
  x_ML, solver_info = self._solve_max_point(self.likelihood, disp=disp, x0=x0)
@@ -254,10 +254,10 @@ class BayesianProblem(object):
254
254
 
255
255
  if disp:
256
256
  # Print warning to user about the automatic solver selection
257
- print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
258
- print("!!! Automatic solver selection is experimental. !!!")
259
- print("!!! Always validate the computed results. !!!")
260
- print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
257
+ print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
258
+ print("!!! Automatic solver selection is a work-in-progress !!!")
259
+ print("!!! Always validate the computed results. !!!")
260
+ print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
261
261
  print("")
262
262
 
263
263
  if self._check_posterior(self, Gaussian, Gaussian, LinearModel, max_dim=config.MAX_DIM_INV):
@@ -288,7 +288,7 @@ class BayesianProblem(object):
288
288
  x_MAP.info = solver_info
289
289
  return x_MAP
290
290
 
291
- def sample_posterior(self, Ns, Nb=None, callback=None) -> cuqi.samples.Samples:
291
+ def sample_posterior(self, Ns, Nb=None, callback=None, experimental=False) -> cuqi.samples.Samples:
292
292
  """Sample the posterior. Sampler choice and tuning is handled automatically.
293
293
 
294
294
  Parameters
@@ -305,6 +305,9 @@ class BayesianProblem(object):
305
305
  where `sample` is the current sample and `sample_index` is the index of the sample.
306
306
  An example is shown in demos/demo31_callback.py.
307
307
 
308
+ experimental : bool, *Optional*
309
+ If set to True, the sampler selection will use the samplers from the :mod:`cuqi.experimental.mcmc` module.
310
+
308
311
  Returns
309
312
  -------
310
313
  samples : cuqi.samples.Samples
@@ -313,12 +316,18 @@ class BayesianProblem(object):
313
316
  """
314
317
 
315
318
  # Print warning to user about the automatic sampler selection
316
- print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
317
- print("!!! Automatic sampler selection is experimental. !!!")
318
- print("!!! Always validate the computed results. !!!")
319
- print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
319
+ print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
320
+ print("!!! Automatic sampler selection is a work-in-progress. !!!")
321
+ print("!!! Always validate the computed results. !!!")
322
+ print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
320
323
  print("")
321
324
 
325
+ if experimental:
326
+ print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
327
+ print("!!! Using samplers from cuqi.experimental.mcmc !!!")
328
+ print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
329
+ print("")
330
+
322
331
  # Set up burn-in if not provided
323
332
  if Nb is None:
324
333
  Nb = int(0.2*Ns)
@@ -326,7 +335,7 @@ class BayesianProblem(object):
326
335
  # If target is a joint distribution, try Gibbs sampling
327
336
  # This is still very experimental!
328
337
  if isinstance(self._target, JointDistribution):
329
- return self._sampleGibbs(Ns, Nb, callback=callback)
338
+ return self._sampleGibbs(Ns, Nb, callback=callback, experimental=experimental)
330
339
 
331
340
  # For Gaussian small-scale we can use direct sampling
332
341
  if self._check_posterior(self, Gaussian, Gaussian, LinearModel, config.MAX_DIM_INV) and not self._check_posterior(self, GMRF):
@@ -334,24 +343,24 @@ class BayesianProblem(object):
334
343
 
335
344
  # For larger-scale Gaussian we use Linear RTO. TODO: Improve checking once we have a common Gaussian class.
336
345
  elif hasattr(self.prior,"sqrtprecTimesMean") and hasattr(self.likelihood.distribution,"sqrtprec") and isinstance(self.model,LinearModel):
337
- return self._sampleLinearRTO(Ns, Nb, callback)
346
+ return self._sampleLinearRTO(Ns, Nb, callback, experimental=experimental)
338
347
 
339
348
  # For LMRF we use our awesome unadjusted Laplace approximation!
340
349
  elif self._check_posterior(self, LMRF, Gaussian):
341
- return self._sampleUGLA(Ns, Nb, callback)
350
+ return self._sampleUGLA(Ns, Nb, callback, experimental=experimental)
342
351
 
343
352
  # If we have gradients, use NUTS!
344
353
  # TODO: Fix cases where we have gradients but NUTS fails (see checks)
345
354
  elif self._check_posterior(self, must_have_gradient=True) and not self._check_posterior(self, (Beta, InverseGamma, Lognormal)):
346
- return self._sampleNUTS(Ns, Nb, callback)
355
+ return self._sampleNUTS(Ns, Nb, callback, experimental=experimental)
347
356
 
348
357
  # For Gaussians with non-linear model we use pCN
349
358
  elif self._check_posterior(self, (Gaussian, GMRF), Gaussian):
350
- return self._samplepCN(Ns, Nb, callback)
359
+ return self._samplepCN(Ns, Nb, callback, experimental=experimental)
351
360
 
352
361
  # For Regularized Gaussians with linear models we use RegularizedLinearRTO
353
362
  elif self._check_posterior(self, (RegularizedGaussian, RegularizedGMRF), Gaussian, LinearModel):
354
- return self._sampleRegularizedLinearRTO(Ns, Nb, callback)
363
+ return self._sampleRegularizedLinearRTO(Ns, Nb, callback, experimental=experimental)
355
364
 
356
365
  else:
357
366
  raise NotImplementedError(f"Automatic sampler choice is not implemented for model: {type(self.model)}, likelihood: {type(self.likelihood.distribution)} and prior: {type(self.prior)} and dim {self.prior.dim}. Manual sampler choice can be done via the 'sampler' module. Posterior distribution can be extracted via '.posterior' of any testproblem (BayesianProblem).")
@@ -384,7 +393,7 @@ class BayesianProblem(object):
384
393
  # Now sample prior problem
385
394
  return prior_problem.sample_posterior(Ns, Nb, callback)
386
395
 
387
- def UQ(self, Ns=1000, Nb=None, percent=95, exact=None) -> cuqi.samples.Samples:
396
+ def UQ(self, Ns=1000, Nb=None, percent=95, exact=None, experimental=False) -> cuqi.samples.Samples:
388
397
  """ Run an Uncertainty Quantification (UQ) analysis on the Bayesian problem and provide a summary of the results.
389
398
 
390
399
  Parameters
@@ -402,13 +411,16 @@ class BayesianProblem(object):
402
411
  percent : float, *Optional*
403
412
  The credible interval to plot. Defaults to 95%.
404
413
 
414
+ experimental : bool, *Optional*
415
+ If set to True, the sampler selection will use the samplers from the :mod:`cuqi.experimental.mcmc` module.
416
+
405
417
  Returns
406
418
  -------
407
419
  samples : cuqi.samples.Samples
408
420
  Samples from the posterior. The samples can be used to compute further statistics and plots.
409
421
  """
410
422
  print(f"Computing {Ns} samples")
411
- samples = self.sample_posterior(Ns, Nb)
423
+ samples = self.sample_posterior(Ns, Nb, experimental=experimental)
412
424
 
413
425
  print("Plotting results")
414
426
  # Gibbs case
@@ -475,19 +487,37 @@ class BayesianProblem(object):
475
487
  samples.funvals.vector.plot_variance()
476
488
  plt.title("Sample variance of function representation")
477
489
 
478
- def _sampleLinearRTO(self, Ns, Nb, callback=None):
479
- print("Using LinearRTO sampler.")
480
- print(f"burn-in: {Nb/Ns*100:g}%")
490
+ def _sampleLinearRTO(self, Ns, Nb, callback=None, experimental=False):
481
491
 
482
- # Start timing
483
- ti = time.time()
492
+ if experimental:
484
493
 
485
- # Sample
486
- sampler = cuqi.sampler.LinearRTO(self.posterior, callback=callback)
487
- samples = sampler.sample(Ns, Nb)
494
+ print("Using cuqi.experimental.mcmc LinearRTO sampler.")
495
+ print(f"burn-in: {Nb/Ns*100:g}%")
488
496
 
489
- # Print timing
490
- print('Elapsed time:', time.time() - ti)
497
+ sampler = cuqi.experimental.mcmc.LinearRTO(self.posterior, callback=callback)
498
+
499
+ ti = time.time()
500
+
501
+ sampler.warmup(Nb)
502
+ sampler.sample(Ns)
503
+ samples = sampler.get_samples().burnthin(Nb)
504
+
505
+ print('Elapsed time:', time.time() - ti)
506
+
507
+ else:
508
+
509
+ print("Using cuqi.sampler LinearRTO sampler.")
510
+ print(f"burn-in: {Nb/Ns*100:g}%")
511
+
512
+ # Start timing
513
+ ti = time.time()
514
+
515
+ # Sample
516
+ sampler = cuqi.sampler.LinearRTO(self.posterior, callback=callback)
517
+ samples = sampler.sample(Ns, Nb)
518
+
519
+ # Print timing
520
+ print('Elapsed time:', time.time() - ti)
491
521
 
492
522
  return samples
493
523
 
@@ -532,90 +562,180 @@ class BayesianProblem(object):
532
562
 
533
563
  return cuqi.samples.Samples(x_s,self.model.domain_geometry)
534
564
 
535
- def _sampleCWMH(self, Ns, Nb, callback=None):
536
- print("Using Component-wise Metropolis-Hastings (CWMH) sampler (sample_adapt)")
537
- print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.05, x0: 0.5 (vector)")
565
+ def _sampleCWMH(self, Ns, Nb, callback=None, experimental=False):
538
566
 
539
- # Dimension
540
- n = self.prior.dim
541
-
542
- # Set up target and proposal
543
- def proposal(x_t, sigma): return np.random.normal(x_t, sigma)
567
+ if experimental:
544
568
 
545
- # Set up sampler
546
- scale = 0.05*np.ones(n)
547
- x0 = 0.5*np.ones(n)
548
- MCMC = cuqi.sampler.CWMH(self.posterior, proposal, scale, x0, callback=callback)
549
-
550
- # Run sampler
551
- ti = time.time()
552
- x_s = MCMC.sample_adapt(Ns,Nb); #ToDo: Make results class
553
- print('Elapsed time:', time.time() - ti)
569
+ print("Using cuqi.experimental.mcmc Component-wise Metropolis-Hastings (CWMH) sampler.")
570
+ print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.05, x0: 0.5 (vector)")
571
+
572
+ scale = 0.05*np.ones(self.prior.dim)
573
+ x0 = 0.5*np.ones(self.prior.dim)
574
+
575
+ sampler = cuqi.experimental.mcmc.CWMH(self.posterior, scale, x0, callback=callback)
576
+
577
+ ti = time.time()
578
+
579
+ sampler.warmup(Nb)
580
+ sampler.sample(Ns)
581
+ x_s = sampler.get_samples().burnthin(Nb)
582
+
583
+ print('Elapsed time:', time.time() - ti)
584
+
585
+ else:
586
+
587
+ print("Using cuqi.sampler Component-wise Metropolis-Hastings (CWMH) sampler (sample_adapt)")
588
+ print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.05, x0: 0.5 (vector)")
589
+
590
+ # Dimension
591
+ n = self.prior.dim
592
+
593
+ # Set up target and proposal
594
+ def proposal(x_t, sigma): return np.random.normal(x_t, sigma)
595
+
596
+ # Set up sampler
597
+ scale = 0.05*np.ones(n)
598
+ x0 = 0.5*np.ones(n)
599
+ MCMC = cuqi.sampler.CWMH(self.posterior, proposal, scale, x0, callback=callback)
600
+
601
+ # Run sampler
602
+ ti = time.time()
603
+ x_s = MCMC.sample_adapt(Ns,Nb); #ToDo: Make results class
604
+ print('Elapsed time:', time.time() - ti)
554
605
 
555
606
  return x_s
556
607
 
557
- def _samplepCN(self, Ns, Nb, callback=None):
558
- print("Using preconditioned Crank-Nicolson (pCN) sampler (sample_adapt)")
559
- print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.02")
608
+ def _samplepCN(self, Ns, Nb, callback=None, experimental=False):
560
609
 
561
- scale = 0.02
562
- #x0 = np.zeros(n)
563
-
564
- MCMC = cuqi.sampler.pCN(self.posterior, scale, callback=callback)
565
-
566
- #Run sampler
567
- ti = time.time()
568
- x_s = MCMC.sample_adapt(Ns, Nb)
569
- print('Elapsed time:', time.time() - ti)
610
+ if experimental:
611
+
612
+ print("Using cuqi.experimental.mcmc preconditioned Crank-Nicolson (pCN) sampler.")
613
+ print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.02")
614
+
615
+ scale = 0.02
616
+
617
+ sampler = cuqi.experimental.mcmc.pCN(self.posterior, scale, callback=callback)
618
+
619
+ ti = time.time()
620
+
621
+ sampler.warmup(Nb)
622
+ sampler.sample(Ns)
623
+ x_s = sampler.get_samples().burnthin(Nb)
624
+
625
+ print('Elapsed time:', time.time() - ti)
626
+
627
+ else:
628
+
629
+ print("Using cuqi.sampler preconditioned Crank-Nicolson (pCN) sampler (sample_adapt)")
630
+ print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.02")
631
+
632
+ scale = 0.02
633
+
634
+ MCMC = cuqi.sampler.pCN(self.posterior, scale, callback=callback)
635
+
636
+ #Run sampler
637
+ ti = time.time()
638
+ x_s = MCMC.sample_adapt(Ns, Nb)
639
+ print('Elapsed time:', time.time() - ti)
570
640
 
571
641
  return x_s
572
642
 
573
- def _sampleNUTS(self, Ns, Nb, callback=None):
574
- print("Using No-U-Turn (NUTS) sampler")
575
- print(f"burn-in: {Nb/Ns*100:g}%")
643
+ def _sampleNUTS(self, Ns, Nb, callback=None, experimental=False):
576
644
 
577
- # MAP
578
- #print("Computing MAP ESTIMATE")
579
- #x_map, _ = self.MAP()
580
-
581
- MCMC = cuqi.sampler.NUTS(self.posterior, callback=callback)
582
-
583
- # Run sampler
584
- ti = time.time()
585
- x_s = MCMC.sample_adapt(Ns,Nb)
586
- print('Elapsed time:', time.time() - ti)
645
+ if experimental:
646
+
647
+ print("Using cuqi.experimental.mcmc No-U-Turn (NUTS) sampler.")
648
+ print(f"burn-in: {Nb/Ns*100:g}%")
649
+
650
+ sampler = cuqi.experimental.mcmc.NUTS(self.posterior, callback=callback)
651
+
652
+ ti = time.time()
653
+
654
+ sampler.warmup(Nb)
655
+ sampler.sample(Ns)
656
+ x_s = sampler.get_samples().burnthin(Nb)
657
+
658
+ print('Elapsed time:', time.time() - ti)
659
+
660
+ else:
661
+
662
+ print("Using cuqi.sampler No-U-Turn (NUTS) sampler")
663
+ print(f"burn-in: {Nb/Ns*100:g}%")
664
+
665
+ MCMC = cuqi.sampler.NUTS(self.posterior, callback=callback)
666
+
667
+ # Run sampler
668
+ ti = time.time()
669
+ x_s = MCMC.sample_adapt(Ns,Nb)
670
+ print('Elapsed time:', time.time() - ti)
587
671
 
588
672
  return x_s
589
673
 
590
- def _sampleUGLA(self, Ns, Nb, callback=None):
591
- print("Using UGLA sampler")
592
- print(f"burn-in: {Nb/Ns*100:g}%")
674
+ def _sampleUGLA(self, Ns, Nb, callback=None, experimental=False):
593
675
 
594
- # Start timing
595
- ti = time.time()
676
+ if experimental:
596
677
 
597
- # Sample
598
- sampler = cuqi.sampler.UGLA(self.posterior, callback=callback)
599
- samples = sampler.sample(Ns, Nb)
678
+ print("Using cuqi.experimental.mcmc Unadjusted Gaussian Laplace Approximation (UGLA) sampler.")
679
+ print(f"burn-in: {Nb/Ns*100:g}%")
600
680
 
601
- # Print timing
602
- print('Elapsed time:', time.time() - ti)
681
+ sampler = cuqi.experimental.mcmc.UGLA(self.posterior, callback=callback)
682
+
683
+ ti = time.time()
684
+
685
+ sampler.warmup(Nb)
686
+ sampler.sample(Ns)
687
+ samples = sampler.get_samples().burnthin(Nb)
688
+
689
+ print('Elapsed time:', time.time() - ti)
690
+
691
+ else:
692
+
693
+ print("Using cuqi.sampler UGLA sampler")
694
+ print(f"burn-in: {Nb/Ns*100:g}%")
695
+
696
+ # Start timing
697
+ ti = time.time()
698
+
699
+ # Sample
700
+ sampler = cuqi.sampler.UGLA(self.posterior, callback=callback)
701
+ samples = sampler.sample(Ns, Nb)
702
+
703
+ # Print timing
704
+ print('Elapsed time:', time.time() - ti)
603
705
 
604
706
  return samples
605
707
 
606
- def _sampleRegularizedLinearRTO(self, Ns, Nb, callback=None):
607
- print("Using Regularized LinearRTO sampler.")
608
- print(f"burn-in: {Nb/Ns*100:g}%")
708
+ def _sampleRegularizedLinearRTO(self, Ns, Nb, callback=None, experimental=False):
609
709
 
610
- # Start timing
611
- ti = time.time()
710
+ if experimental:
612
711
 
613
- # Sample
614
- sampler = cuqi.sampler.RegularizedLinearRTO(self.posterior, maxit=100, stepsize = "automatic", abstol=1e-10, callback=callback)
615
- samples = sampler.sample(Ns, Nb)
712
+ print("Using cuqi.experimental.mcmc Regularized LinearRTO sampler.")
713
+ print(f"burn-in: {Nb/Ns*100:g}%")
616
714
 
617
- # Print timing
618
- print('Elapsed time:', time.time() - ti)
715
+ sampler = cuqi.experimental.mcmc.RegularizedLinearRTO(self.posterior, maxit=100, stepsize = "automatic", abstol=1e-10, callback=callback)
716
+
717
+ ti = time.time()
718
+
719
+ sampler.warmup(Nb)
720
+ sampler.sample(Ns)
721
+ samples = sampler.get_samples().burnthin(Nb)
722
+
723
+ print('Elapsed time:', time.time() - ti)
724
+
725
+ else:
726
+
727
+ print("Using cuqi.sampler Regularized LinearRTO sampler.")
728
+ print(f"burn-in: {Nb/Ns*100:g}%")
729
+
730
+ # Start timing
731
+ ti = time.time()
732
+
733
+ # Sample
734
+ sampler = cuqi.sampler.RegularizedLinearRTO(self.posterior, maxit=100, stepsize = "automatic", abstol=1e-10, callback=callback)
735
+ samples = sampler.sample(Ns, Nb)
736
+
737
+ # Print timing
738
+ print('Elapsed time:', time.time() - ti)
619
739
 
620
740
  return samples
621
741
 
@@ -719,31 +839,61 @@ class BayesianProblem(object):
719
839
 
720
840
  return L and P and M and D and G
721
841
 
722
- def _sampleGibbs(self, Ns, Nb, callback=None):
842
+ def _sampleGibbs(self, Ns, Nb, callback=None, experimental=False):
723
843
  """ This is a helper function for sampling from the posterior using Gibbs sampler. """
724
844
 
725
- print("Using Gibbs sampler")
726
- print(f"burn-in: {Nb/Ns*100:g}%")
727
- print("")
845
+ if experimental:
728
846
 
729
- if callback is not None:
730
- raise NotImplementedError("Callback not implemented for Gibbs sampler")
847
+ print("Using cuqi.experimental.mcmc HybridGibbs sampler")
848
+ print(f"burn-in: {Nb/Ns*100:g}%")
849
+ print("")
731
850
 
732
- # Start timing
733
- ti = time.time()
851
+ if callback is not None:
852
+ raise NotImplementedError("Callback not implemented for Gibbs sampler")
734
853
 
735
- # Sampling strategy
736
- sampling_strategy = self._determine_sampling_strategy()
854
+ # Start timing
855
+ ti = time.time()
737
856
 
738
- sampler = cuqi.sampler.Gibbs(self._target, sampling_strategy)
739
- samples = sampler.sample(Ns, Nb)
857
+ # Sampling strategy
858
+ sampling_strategy = self._determine_sampling_strategy(experimental=True)
740
859
 
741
- # Print timing
742
- print('Elapsed time:', time.time() - ti)
860
+ sampler = cuqi.experimental.mcmc.HybridGibbs(self._target, sampling_strategy)
861
+ sampler.warmup(Nb)
862
+ sampler.sample(Ns)
863
+ samples = sampler.get_samples()
864
+ # Dict with Samples objects for each parameter
865
+ # Now apply burnthin to each value in dict
866
+ for key, value in samples.items():
867
+ samples[key] = value.burnthin(Nb)
868
+
869
+ # Print timing
870
+ print('Elapsed time:', time.time() - ti)
871
+
872
+ else:
873
+
874
+ print("Using Gibbs sampler")
875
+ print(f"burn-in: {Nb/Ns*100:g}%")
876
+ print("")
877
+
878
+ if callback is not None:
879
+ raise NotImplementedError("Callback not implemented for Gibbs sampler")
880
+
881
+ # Start timing
882
+ ti = time.time()
883
+
884
+ # Sampling strategy
885
+ sampling_strategy = self._determine_sampling_strategy()
886
+
887
+ sampler = cuqi.sampler.Gibbs(self._target, sampling_strategy)
888
+ samples = sampler.sample(Ns, Nb)
889
+
890
+ # Print timing
891
+ print('Elapsed time:', time.time() - ti)
743
892
 
744
893
  return samples
745
894
 
746
- def _determine_sampling_strategy(self):
895
+
896
+ def _determine_sampling_strategy(self, experimental=False):
747
897
  """ This is a helper function for determining the sampling strategy for Gibbs sampler.
748
898
 
749
899
  It is still very experimental and not very robust.
@@ -774,31 +924,49 @@ class BayesianProblem(object):
774
924
  raise NotImplementedError(f"Unable to determine sampling strategy for {par_name} with target {cond_target}")
775
925
 
776
926
  # Gamma prior, Gaussian likelihood -> Conjugate
777
- if self._check_posterior(cond_target, Gamma, (Gaussian, GMRF, RegularizedGaussian, RegularizedGMRF)):
778
- sampling_strategy[par_name] = cuqi.sampler.Conjugate
927
+ if self._check_posterior(cond_target, Gamma, (Gaussian, GMRF, RegularizedGaussian, RegularizedGMRF)):
928
+ if experimental:
929
+ sampling_strategy[par_name] = cuqi.experimental.mcmc.Conjugate()
930
+ else:
931
+ sampling_strategy[par_name] = cuqi.sampler.Conjugate
779
932
 
780
933
  # Gamma prior, LMRF likelihood -> ConjugateApprox
781
934
  elif self._check_posterior(cond_target, Gamma, LMRF):
782
- sampling_strategy[par_name] = cuqi.sampler.ConjugateApprox
935
+ if experimental:
936
+ sampling_strategy[par_name] = cuqi.experimental.mcmc.ConjugateApprox()
937
+ else:
938
+ sampling_strategy[par_name] = cuqi.sampler.ConjugateApprox
783
939
 
784
940
  # Gaussian prior, Gaussian likelihood, Linear model -> LinearRTO
785
941
  elif self._check_posterior(cond_target, (Gaussian, GMRF), Gaussian, LinearModel):
786
- sampling_strategy[par_name] = cuqi.sampler.LinearRTO
942
+ if experimental:
943
+ sampling_strategy[par_name] = cuqi.experimental.mcmc.LinearRTO()
944
+ else:
945
+ sampling_strategy[par_name] = cuqi.sampler.LinearRTO
787
946
 
788
947
  # Implicit Regularized Gaussian prior, Gaussian likelihood, linear model -> RegularizedLinearRTO
789
948
  elif self._check_posterior(cond_target, (RegularizedGaussian, RegularizedGMRF), Gaussian, LinearModel):
790
- sampling_strategy[par_name] = cuqi.sampler.RegularizedLinearRTO
949
+ if experimental:
950
+ sampling_strategy[par_name] = cuqi.experimental.mcmc.RegularizedLinearRTO()
951
+ else:
952
+ sampling_strategy[par_name] = cuqi.sampler.RegularizedLinearRTO
791
953
 
792
954
  # LMRF prior, Gaussian likelihood, Linear model -> UGLA
793
955
  elif self._check_posterior(cond_target, LMRF, Gaussian, LinearModel):
794
- sampling_strategy[par_name] = cuqi.sampler.UGLA
956
+ if experimental:
957
+ sampling_strategy[par_name] = cuqi.experimental.mcmc.UGLA()
958
+ else:
959
+ sampling_strategy[par_name] = cuqi.sampler.UGLA
795
960
 
796
961
  else:
797
962
  raise NotImplementedError(f"Unable to determine sampling strategy for {par_name} with target {cond_target}")
798
963
 
799
964
  print("Automatically determined sampling strategy:")
800
965
  for dist_name, strategy in sampling_strategy.items():
801
- print(f"\t{dist_name}: {strategy.__name__}")
966
+ if experimental:
967
+ print(f"\t{dist_name}: {strategy.__class__.__name__} (mcmc.experimental)")
968
+ else:
969
+ print(f"\t{dist_name}: {strategy.__name__}")
802
970
  print("")
803
971
 
804
972
  return sampling_strategy