CUQIpy 1.1.1.post0.dev36__py3-none-any.whl → 1.4.1.post0.dev124__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of CUQIpy might be problematic. Click here for more details.

Files changed (92) hide show
  1. cuqi/__init__.py +2 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/algebra/__init__.py +2 -0
  4. cuqi/algebra/_abstract_syntax_tree.py +358 -0
  5. cuqi/algebra/_ordered_set.py +82 -0
  6. cuqi/algebra/_random_variable.py +457 -0
  7. cuqi/array/_array.py +4 -13
  8. cuqi/config.py +7 -0
  9. cuqi/density/_density.py +9 -1
  10. cuqi/distribution/__init__.py +3 -2
  11. cuqi/distribution/_beta.py +7 -11
  12. cuqi/distribution/_cauchy.py +2 -2
  13. cuqi/distribution/_custom.py +0 -6
  14. cuqi/distribution/_distribution.py +31 -45
  15. cuqi/distribution/_gamma.py +7 -3
  16. cuqi/distribution/_gaussian.py +2 -12
  17. cuqi/distribution/_inverse_gamma.py +4 -10
  18. cuqi/distribution/_joint_distribution.py +112 -15
  19. cuqi/distribution/_lognormal.py +0 -7
  20. cuqi/distribution/{_modifiedhalfnormal.py → _modified_half_normal.py} +23 -23
  21. cuqi/distribution/_normal.py +34 -7
  22. cuqi/distribution/_posterior.py +9 -0
  23. cuqi/distribution/_truncated_normal.py +129 -0
  24. cuqi/distribution/_uniform.py +47 -1
  25. cuqi/experimental/__init__.py +2 -2
  26. cuqi/experimental/_recommender.py +216 -0
  27. cuqi/geometry/__init__.py +2 -0
  28. cuqi/geometry/_geometry.py +15 -1
  29. cuqi/geometry/_product_geometry.py +181 -0
  30. cuqi/implicitprior/__init__.py +5 -3
  31. cuqi/implicitprior/_regularized_gaussian.py +483 -0
  32. cuqi/implicitprior/{_regularizedGMRF.py → _regularized_gmrf.py} +4 -2
  33. cuqi/implicitprior/{_regularizedUnboundedUniform.py → _regularized_unbounded_uniform.py} +3 -2
  34. cuqi/implicitprior/_restorator.py +269 -0
  35. cuqi/legacy/__init__.py +2 -0
  36. cuqi/{experimental/mcmc → legacy/sampler}/__init__.py +7 -11
  37. cuqi/legacy/sampler/_conjugate.py +55 -0
  38. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  39. cuqi/legacy/sampler/_cwmh.py +196 -0
  40. cuqi/legacy/sampler/_gibbs.py +231 -0
  41. cuqi/legacy/sampler/_hmc.py +335 -0
  42. cuqi/{experimental/mcmc → legacy/sampler}/_langevin_algorithm.py +82 -111
  43. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  44. cuqi/legacy/sampler/_mh.py +190 -0
  45. cuqi/legacy/sampler/_pcn.py +244 -0
  46. cuqi/{experimental/mcmc → legacy/sampler}/_rto.py +132 -90
  47. cuqi/legacy/sampler/_sampler.py +182 -0
  48. cuqi/likelihood/_likelihood.py +9 -1
  49. cuqi/model/__init__.py +1 -1
  50. cuqi/model/_model.py +1361 -359
  51. cuqi/pde/__init__.py +4 -0
  52. cuqi/pde/_observation_map.py +36 -0
  53. cuqi/pde/_pde.py +134 -33
  54. cuqi/problem/_problem.py +93 -87
  55. cuqi/sampler/__init__.py +120 -8
  56. cuqi/sampler/_conjugate.py +376 -35
  57. cuqi/sampler/_conjugate_approx.py +40 -16
  58. cuqi/sampler/_cwmh.py +132 -138
  59. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  60. cuqi/sampler/_gibbs.py +288 -130
  61. cuqi/sampler/_hmc.py +328 -201
  62. cuqi/sampler/_langevin_algorithm.py +284 -100
  63. cuqi/sampler/_laplace_approximation.py +87 -117
  64. cuqi/sampler/_mh.py +47 -157
  65. cuqi/sampler/_pcn.py +65 -213
  66. cuqi/sampler/_rto.py +211 -142
  67. cuqi/sampler/_sampler.py +553 -136
  68. cuqi/samples/__init__.py +1 -1
  69. cuqi/samples/_samples.py +24 -18
  70. cuqi/solver/__init__.py +6 -4
  71. cuqi/solver/_solver.py +230 -26
  72. cuqi/testproblem/_testproblem.py +2 -3
  73. cuqi/utilities/__init__.py +6 -1
  74. cuqi/utilities/_get_python_variable_name.py +2 -2
  75. cuqi/utilities/_utilities.py +182 -2
  76. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/METADATA +10 -6
  77. cuqipy-1.4.1.post0.dev124.dist-info/RECORD +101 -0
  78. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/WHEEL +1 -1
  79. CUQIpy-1.1.1.post0.dev36.dist-info/RECORD +0 -92
  80. cuqi/experimental/mcmc/_conjugate.py +0 -197
  81. cuqi/experimental/mcmc/_conjugate_approx.py +0 -81
  82. cuqi/experimental/mcmc/_cwmh.py +0 -191
  83. cuqi/experimental/mcmc/_gibbs.py +0 -268
  84. cuqi/experimental/mcmc/_hmc.py +0 -470
  85. cuqi/experimental/mcmc/_laplace_approximation.py +0 -156
  86. cuqi/experimental/mcmc/_mh.py +0 -78
  87. cuqi/experimental/mcmc/_pcn.py +0 -89
  88. cuqi/experimental/mcmc/_sampler.py +0 -561
  89. cuqi/experimental/mcmc/_utilities.py +0 -17
  90. cuqi/implicitprior/_regularizedGaussian.py +0 -323
  91. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info/licenses}/LICENSE +0 -0
  92. {CUQIpy-1.1.1.post0.dev36.dist-info → cuqipy-1.4.1.post0.dev124.dist-info}/top_level.txt +0 -0
cuqi/problem/_problem.py CHANGED
@@ -5,7 +5,7 @@ from typing import Tuple
5
5
 
6
6
  import cuqi
7
7
  from cuqi import config
8
- from cuqi.distribution import Distribution, Gaussian, InverseGamma, LMRF, GMRF, Lognormal, Posterior, Beta, JointDistribution, Gamma, CMRF
8
+ from cuqi.distribution import Distribution, Gaussian, InverseGamma, LMRF, GMRF, Lognormal, Posterior, Beta, JointDistribution, Gamma, ModifiedHalfNormal, CMRF
9
9
  from cuqi.implicitprior import RegularizedGaussian, RegularizedGMRF
10
10
  from cuqi.density import Density
11
11
  from cuqi.model import LinearModel, Model
@@ -288,7 +288,7 @@ class BayesianProblem(object):
288
288
  x_MAP.info = solver_info
289
289
  return x_MAP
290
290
 
291
- def sample_posterior(self, Ns, Nb=None, callback=None, experimental=False) -> cuqi.samples.Samples:
291
+ def sample_posterior(self, Ns, Nb=None, callback=None, legacy=False) -> cuqi.samples.Samples:
292
292
  """Sample the posterior. Sampler choice and tuning is handled automatically.
293
293
 
294
294
  Parameters
@@ -297,16 +297,24 @@ class BayesianProblem(object):
297
297
  Number of samples to draw.
298
298
 
299
299
  Nb : int or None, *Optional*
300
- Number of burn-in samples. If not provided, 20% of the samples will be used for burn-in.
300
+ Number of burn-in samples. If not provided, 20% of the samples will be used
301
+ for burn-in.
301
302
 
302
303
  callback : callable, *Optional*
303
304
  If set this function will be called after every sample.
304
- The signature of the callback function is `callback(sample, sample_index)`,
305
- where `sample` is the current sample and `sample_index` is the index of the sample.
305
+ If the parameter `legacy` is set to False, which is the default, the callback
306
+ function should take three arguments: the sampler object, the index of the
307
+ current sampling step, the total number of requested samples. The last two
308
+ arguments are integers. An example of the callback function signature in the
309
+ case is: `callback(sampler, sample_index, num_of_samples)`.
310
+ If the parameter `legacy` is set to True, the signature of the callback
311
+ function is `callback(sample, sample_index)`, where `sample` is the current
312
+ sample and `sample_index` is the index of the sample.
306
313
  An example is shown in demos/demo31_callback.py.
307
314
 
308
- experimental : bool, *Optional*
309
- If set to True, the sampler selection will use the samplers from the :mod:`cuqi.experimental.mcmc` module.
315
+ legacy : bool, *Optional*
316
+ Default is False. If set to True, the sampler selection will use the samplers from the legacy sampler module, :mod:`cuqi.legacy.sampler` module.
317
+
310
318
 
311
319
  Returns
312
320
  -------
@@ -322,9 +330,9 @@ class BayesianProblem(object):
322
330
  print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
323
331
  print("")
324
332
 
325
- if experimental:
333
+ if legacy:
326
334
  print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
327
- print("!!! Using samplers from cuqi.experimental.mcmc !!!")
335
+ print("!! Using legacy samplers from cuqi.legacy.sampler !!")
328
336
  print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
329
337
  print("")
330
338
 
@@ -335,7 +343,7 @@ class BayesianProblem(object):
335
343
  # If target is a joint distribution, try Gibbs sampling
336
344
  # This is still very experimental!
337
345
  if isinstance(self._target, JointDistribution):
338
- return self._sampleGibbs(Ns, Nb, callback=callback, experimental=experimental)
346
+ return self._sampleGibbs(Ns, Nb, callback=callback, legacy=legacy)
339
347
 
340
348
  # For Gaussian small-scale we can use direct sampling
341
349
  if self._check_posterior(self, Gaussian, Gaussian, LinearModel, config.MAX_DIM_INV) and not self._check_posterior(self, GMRF):
@@ -343,24 +351,24 @@ class BayesianProblem(object):
343
351
 
344
352
  # For larger-scale Gaussian we use Linear RTO. TODO: Improve checking once we have a common Gaussian class.
345
353
  elif hasattr(self.prior,"sqrtprecTimesMean") and hasattr(self.likelihood.distribution,"sqrtprec") and isinstance(self.model,LinearModel):
346
- return self._sampleLinearRTO(Ns, Nb, callback, experimental=experimental)
354
+ return self._sampleLinearRTO(Ns, Nb, callback, legacy=legacy)
347
355
 
348
356
  # For LMRF we use our awesome unadjusted Laplace approximation!
349
357
  elif self._check_posterior(self, LMRF, Gaussian):
350
- return self._sampleUGLA(Ns, Nb, callback, experimental=experimental)
358
+ return self._sampleUGLA(Ns, Nb, callback, legacy=legacy)
351
359
 
352
360
  # If we have gradients, use NUTS!
353
361
  # TODO: Fix cases where we have gradients but NUTS fails (see checks)
354
362
  elif self._check_posterior(self, must_have_gradient=True) and not self._check_posterior(self, (Beta, InverseGamma, Lognormal)):
355
- return self._sampleNUTS(Ns, Nb, callback, experimental=experimental)
363
+ return self._sampleNUTS(Ns, Nb, callback, legacy=legacy)
356
364
 
357
365
  # For Gaussians with non-linear model we use pCN
358
366
  elif self._check_posterior(self, (Gaussian, GMRF), Gaussian):
359
- return self._samplepCN(Ns, Nb, callback, experimental=experimental)
367
+ return self._samplepCN(Ns, Nb, callback, legacy=legacy)
360
368
 
361
369
  # For Regularized Gaussians with linear models we use RegularizedLinearRTO
362
370
  elif self._check_posterior(self, (RegularizedGaussian, RegularizedGMRF), Gaussian, LinearModel):
363
- return self._sampleRegularizedLinearRTO(Ns, Nb, callback, experimental=experimental)
371
+ return self._sampleRegularizedLinearRTO(Ns, Nb, callback, legacy=legacy)
364
372
 
365
373
  else:
366
374
  raise NotImplementedError(f"Automatic sampler choice is not implemented for model: {type(self.model)}, likelihood: {type(self.likelihood.distribution)} and prior: {type(self.prior)} and dim {self.prior.dim}. Manual sampler choice can be done via the 'sampler' module. Posterior distribution can be extracted via '.posterior' of any testproblem (BayesianProblem).")
@@ -393,7 +401,7 @@ class BayesianProblem(object):
393
401
  # Now sample prior problem
394
402
  return prior_problem.sample_posterior(Ns, Nb, callback)
395
403
 
396
- def UQ(self, Ns=1000, Nb=None, percent=95, exact=None, experimental=False) -> cuqi.samples.Samples:
404
+ def UQ(self, Ns=1000, Nb=None, percent=95, exact=None, legacy=False) -> cuqi.samples.Samples:
397
405
  """ Run an Uncertainty Quantification (UQ) analysis on the Bayesian problem and provide a summary of the results.
398
406
 
399
407
  Parameters
@@ -411,8 +419,8 @@ class BayesianProblem(object):
411
419
  percent : float, *Optional*
412
420
  The credible interval to plot. Defaults to 95%.
413
421
 
414
- experimental : bool, *Optional*
415
- If set to True, the sampler selection will use the samplers from the :mod:`cuqi.experimental.mcmc` module.
422
+ legacy : bool, *Optional*
423
+ Default is False. If set to True, the sampler selection will use the samplers from the legacy sampler module, :mod:`cuqi.legacy.sampler` module.
416
424
 
417
425
  Returns
418
426
  -------
@@ -420,7 +428,7 @@ class BayesianProblem(object):
420
428
  Samples from the posterior. The samples can be used to compute further statistics and plots.
421
429
  """
422
430
  print(f"Computing {Ns} samples")
423
- samples = self.sample_posterior(Ns, Nb, experimental=experimental)
431
+ samples = self.sample_posterior(Ns, Nb, legacy=legacy)
424
432
 
425
433
  print("Plotting results")
426
434
  # Gibbs case
@@ -487,14 +495,14 @@ class BayesianProblem(object):
487
495
  samples.funvals.vector.plot_variance()
488
496
  plt.title("Sample variance of function representation")
489
497
 
490
- def _sampleLinearRTO(self, Ns, Nb, callback=None, experimental=False):
498
+ def _sampleLinearRTO(self, Ns, Nb, callback=None, legacy=False):
491
499
 
492
- if experimental:
500
+ if not legacy:
493
501
 
494
- print("Using cuqi.experimental.mcmc LinearRTO sampler.")
502
+ print("Using cuqi.sampler LinearRTO sampler.")
495
503
  print(f"burn-in: {Nb/Ns*100:g}%")
496
504
 
497
- sampler = cuqi.experimental.mcmc.LinearRTO(self.posterior, callback=callback)
505
+ sampler = cuqi.sampler.LinearRTO(self.posterior, callback=callback)
498
506
 
499
507
  ti = time.time()
500
508
 
@@ -506,14 +514,14 @@ class BayesianProblem(object):
506
514
 
507
515
  else:
508
516
 
509
- print("Using cuqi.sampler LinearRTO sampler.")
517
+ print("Using cuqi.legacy.sampler LinearRTO sampler.")
510
518
  print(f"burn-in: {Nb/Ns*100:g}%")
511
519
 
512
520
  # Start timing
513
521
  ti = time.time()
514
522
 
515
523
  # Sample
516
- sampler = cuqi.sampler.LinearRTO(self.posterior, callback=callback)
524
+ sampler = cuqi.legacy.sampler.LinearRTO(self.posterior, callback=callback)
517
525
  samples = sampler.sample(Ns, Nb)
518
526
 
519
527
  # Print timing
@@ -562,17 +570,17 @@ class BayesianProblem(object):
562
570
 
563
571
  return cuqi.samples.Samples(x_s,self.model.domain_geometry)
564
572
 
565
- def _sampleCWMH(self, Ns, Nb, callback=None, experimental=False):
573
+ def _sampleCWMH(self, Ns, Nb, callback=None, legacy=False):
566
574
 
567
- if experimental:
575
+ if not legacy:
568
576
 
569
- print("Using cuqi.experimental.mcmc Component-wise Metropolis-Hastings (CWMH) sampler.")
577
+ print("Using cuqi.sampler Component-wise Metropolis-Hastings (CWMH) sampler.")
570
578
  print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.05, x0: 0.5 (vector)")
571
579
 
572
580
  scale = 0.05*np.ones(self.prior.dim)
573
581
  x0 = 0.5*np.ones(self.prior.dim)
574
582
 
575
- sampler = cuqi.experimental.mcmc.CWMH(self.posterior, scale, x0, callback=callback)
583
+ sampler = cuqi.sampler.CWMH(self.posterior, scale, x0, callback=callback)
576
584
 
577
585
  ti = time.time()
578
586
 
@@ -584,7 +592,7 @@ class BayesianProblem(object):
584
592
 
585
593
  else:
586
594
 
587
- print("Using cuqi.sampler Component-wise Metropolis-Hastings (CWMH) sampler (sample_adapt)")
595
+ print("Using cuqi.legacy.sampler Component-wise Metropolis-Hastings (CWMH) sampler (sample_adapt)")
588
596
  print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.05, x0: 0.5 (vector)")
589
597
 
590
598
  # Dimension
@@ -596,7 +604,7 @@ class BayesianProblem(object):
596
604
  # Set up sampler
597
605
  scale = 0.05*np.ones(n)
598
606
  x0 = 0.5*np.ones(n)
599
- MCMC = cuqi.sampler.CWMH(self.posterior, proposal, scale, x0, callback=callback)
607
+ MCMC = cuqi.legacy.sampler.CWMH(self.posterior, proposal, scale, x0, callback=callback)
600
608
 
601
609
  # Run sampler
602
610
  ti = time.time()
@@ -605,16 +613,16 @@ class BayesianProblem(object):
605
613
 
606
614
  return x_s
607
615
 
608
- def _samplepCN(self, Ns, Nb, callback=None, experimental=False):
616
+ def _samplepCN(self, Ns, Nb, callback=None, legacy=False):
609
617
 
610
- if experimental:
618
+ if not legacy:
611
619
 
612
- print("Using cuqi.experimental.mcmc preconditioned Crank-Nicolson (pCN) sampler.")
620
+ print("Using cuqi.sampler preconditioned Crank-Nicolson (pCN) sampler.")
613
621
  print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.02")
614
622
 
615
623
  scale = 0.02
616
624
 
617
- sampler = cuqi.experimental.mcmc.pCN(self.posterior, scale, callback=callback)
625
+ sampler = cuqi.sampler.PCN(self.posterior, scale, callback=callback)
618
626
 
619
627
  ti = time.time()
620
628
 
@@ -626,12 +634,12 @@ class BayesianProblem(object):
626
634
 
627
635
  else:
628
636
 
629
- print("Using cuqi.sampler preconditioned Crank-Nicolson (pCN) sampler (sample_adapt)")
637
+ print("Using cuqi.legacy.sampler preconditioned Crank-Nicolson (pCN) sampler (sample_adapt)")
630
638
  print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.02")
631
639
 
632
640
  scale = 0.02
633
641
 
634
- MCMC = cuqi.sampler.pCN(self.posterior, scale, callback=callback)
642
+ MCMC = cuqi.legacy.sampler.pCN(self.posterior, scale, callback=callback)
635
643
 
636
644
  #Run sampler
637
645
  ti = time.time()
@@ -640,14 +648,14 @@ class BayesianProblem(object):
640
648
 
641
649
  return x_s
642
650
 
643
- def _sampleNUTS(self, Ns, Nb, callback=None, experimental=False):
651
+ def _sampleNUTS(self, Ns, Nb, callback=None, legacy=False):
644
652
 
645
- if experimental:
653
+ if not legacy:
646
654
 
647
- print("Using cuqi.experimental.mcmc No-U-Turn (NUTS) sampler.")
655
+ print("Using cuqi.sampler No-U-Turn (NUTS) sampler.")
648
656
  print(f"burn-in: {Nb/Ns*100:g}%")
649
657
 
650
- sampler = cuqi.experimental.mcmc.NUTS(self.posterior, callback=callback)
658
+ sampler = cuqi.sampler.NUTS(self.posterior, callback=callback)
651
659
 
652
660
  ti = time.time()
653
661
 
@@ -659,10 +667,10 @@ class BayesianProblem(object):
659
667
 
660
668
  else:
661
669
 
662
- print("Using cuqi.sampler No-U-Turn (NUTS) sampler")
670
+ print("Using cuqi.legacy.sampler No-U-Turn (NUTS) sampler")
663
671
  print(f"burn-in: {Nb/Ns*100:g}%")
664
672
 
665
- MCMC = cuqi.sampler.NUTS(self.posterior, callback=callback)
673
+ MCMC = cuqi.legacy.sampler.NUTS(self.posterior, callback=callback)
666
674
 
667
675
  # Run sampler
668
676
  ti = time.time()
@@ -671,14 +679,14 @@ class BayesianProblem(object):
671
679
 
672
680
  return x_s
673
681
 
674
- def _sampleUGLA(self, Ns, Nb, callback=None, experimental=False):
682
+ def _sampleUGLA(self, Ns, Nb, callback=None, legacy=False):
675
683
 
676
- if experimental:
684
+ if not legacy:
677
685
 
678
- print("Using cuqi.experimental.mcmc Unadjusted Gaussian Laplace Approximation (UGLA) sampler.")
686
+ print("Using cuqi.sampler Unadjusted Gaussian Laplace Approximation (UGLA) sampler.")
679
687
  print(f"burn-in: {Nb/Ns*100:g}%")
680
688
 
681
- sampler = cuqi.experimental.mcmc.UGLA(self.posterior, callback=callback)
689
+ sampler = cuqi.sampler.UGLA(self.posterior, callback=callback)
682
690
 
683
691
  ti = time.time()
684
692
 
@@ -690,14 +698,14 @@ class BayesianProblem(object):
690
698
 
691
699
  else:
692
700
 
693
- print("Using cuqi.sampler UGLA sampler")
701
+ print("Using cuqi.legacy.sampler UGLA sampler")
694
702
  print(f"burn-in: {Nb/Ns*100:g}%")
695
703
 
696
704
  # Start timing
697
705
  ti = time.time()
698
706
 
699
707
  # Sample
700
- sampler = cuqi.sampler.UGLA(self.posterior, callback=callback)
708
+ sampler = cuqi.legacy.sampler.UGLA(self.posterior, callback=callback)
701
709
  samples = sampler.sample(Ns, Nb)
702
710
 
703
711
  # Print timing
@@ -705,14 +713,14 @@ class BayesianProblem(object):
705
713
 
706
714
  return samples
707
715
 
708
- def _sampleRegularizedLinearRTO(self, Ns, Nb, callback=None, experimental=False):
716
+ def _sampleRegularizedLinearRTO(self, Ns, Nb, callback=None, legacy=False):
709
717
 
710
- if experimental:
718
+ if not legacy:
711
719
 
712
- print("Using cuqi.experimental.mcmc Regularized LinearRTO sampler.")
720
+ print("Using cuqi.sampler Regularized LinearRTO sampler.")
713
721
  print(f"burn-in: {Nb/Ns*100:g}%")
714
722
 
715
- sampler = cuqi.experimental.mcmc.RegularizedLinearRTO(self.posterior, maxit=100, stepsize = "automatic", abstol=1e-10, callback=callback)
723
+ sampler = cuqi.sampler.RegularizedLinearRTO(self.posterior, maxit=100, stepsize = "automatic", abstol=1e-10, callback=callback)
716
724
 
717
725
  ti = time.time()
718
726
 
@@ -724,14 +732,14 @@ class BayesianProblem(object):
724
732
 
725
733
  else:
726
734
 
727
- print("Using cuqi.sampler Regularized LinearRTO sampler.")
735
+ print("Using cuqi.legacy.sampler Regularized LinearRTO sampler.")
728
736
  print(f"burn-in: {Nb/Ns*100:g}%")
729
737
 
730
738
  # Start timing
731
739
  ti = time.time()
732
740
 
733
741
  # Sample
734
- sampler = cuqi.sampler.RegularizedLinearRTO(self.posterior, maxit=100, stepsize = "automatic", abstol=1e-10, callback=callback)
742
+ sampler = cuqi.legacy.sampler.RegularizedLinearRTO(self.posterior, maxit=100, stepsize = "automatic", abstol=1e-10, callback=callback)
735
743
  samples = sampler.sample(Ns, Nb)
736
744
 
737
745
  # Print timing
@@ -771,11 +779,11 @@ class BayesianProblem(object):
771
779
  if self._check_posterior(self, CMRF, must_have_gradient=True): # Use L-BFGS-B for CMRF prior as it has better performance for this multi-modal posterior
772
780
  if disp: print(f"Using scipy.optimize.L_BFGS_B on negative log of {density.__class__.__name__}")
773
781
  if disp: print("x0: ones vector")
774
- solver = cuqi.solver.L_BFGS_B(func, x0, gradfunc=gradfunc)
782
+ solver = cuqi.solver.ScipyLBFGSB(func, x0, gradfunc=gradfunc)
775
783
  else:
776
784
  if disp: print(f"Using scipy.optimize.minimize on negative log of {density.__class__.__name__}")
777
785
  if disp: print("x0: ones vector")
778
- solver = cuqi.solver.minimize(func, x0, gradfunc=gradfunc)
786
+ solver = cuqi.solver.ScipyMinimizer(func, x0, gradfunc=gradfunc)
779
787
 
780
788
  x_MAP, solver_info = solver.solve()
781
789
 
@@ -839,25 +847,23 @@ class BayesianProblem(object):
839
847
 
840
848
  return L and P and M and D and G
841
849
 
842
- def _sampleGibbs(self, Ns, Nb, callback=None, experimental=False):
850
+ def _sampleGibbs(self, Ns, Nb, callback=None, legacy=False):
843
851
  """ This is a helper function for sampling from the posterior using Gibbs sampler. """
844
852
 
845
- if experimental:
853
+ if not legacy:
846
854
 
847
- print("Using cuqi.experimental.mcmc HybridGibbs sampler")
855
+ print("Using cuqi.sampler HybridGibbs sampler")
848
856
  print(f"burn-in: {Nb/Ns*100:g}%")
849
857
  print("")
850
858
 
851
- if callback is not None:
852
- raise NotImplementedError("Callback not implemented for Gibbs sampler")
853
-
854
859
  # Start timing
855
860
  ti = time.time()
856
861
 
857
862
  # Sampling strategy
858
- sampling_strategy = self._determine_sampling_strategy(experimental=True)
863
+ sampling_strategy = self._determine_sampling_strategy(legacy=False)
859
864
 
860
- sampler = cuqi.experimental.mcmc.HybridGibbs(self._target, sampling_strategy)
865
+ sampler = cuqi.sampler.HybridGibbs(
866
+ self._target, sampling_strategy, callback=callback)
861
867
  sampler.warmup(Nb)
862
868
  sampler.sample(Ns)
863
869
  samples = sampler.get_samples()
@@ -876,7 +882,7 @@ class BayesianProblem(object):
876
882
  print("")
877
883
 
878
884
  if callback is not None:
879
- raise NotImplementedError("Callback not implemented for Gibbs sampler")
885
+ raise NotImplementedError("Callback not implemented for the legacy Gibbs sampler. It is only implemented for cuqi.sampler Gibbs (cuqi.sampler.HybridGibbs) sampler.")
880
886
 
881
887
  # Start timing
882
888
  ti = time.time()
@@ -884,7 +890,7 @@ class BayesianProblem(object):
884
890
  # Sampling strategy
885
891
  sampling_strategy = self._determine_sampling_strategy()
886
892
 
887
- sampler = cuqi.sampler.Gibbs(self._target, sampling_strategy)
893
+ sampler = cuqi.legacy.sampler.Gibbs(self._target, sampling_strategy)
888
894
  samples = sampler.sample(Ns, Nb)
889
895
 
890
896
  # Print timing
@@ -893,7 +899,7 @@ class BayesianProblem(object):
893
899
  return samples
894
900
 
895
901
 
896
- def _determine_sampling_strategy(self, experimental=False):
902
+ def _determine_sampling_strategy(self, legacy=True):
897
903
  """ This is a helper function for determining the sampling strategy for Gibbs sampler.
898
904
 
899
905
  It is still very experimental and not very robust.
@@ -923,48 +929,48 @@ class BayesianProblem(object):
923
929
  if not isinstance(cond_target, Posterior):
924
930
  raise NotImplementedError(f"Unable to determine sampling strategy for {par_name} with target {cond_target}")
925
931
 
926
- # Gamma prior, Gaussian likelihood -> Conjugate
927
- if self._check_posterior(cond_target, Gamma, (Gaussian, GMRF, RegularizedGaussian, RegularizedGMRF)):
928
- if experimental:
929
- sampling_strategy[par_name] = cuqi.experimental.mcmc.Conjugate()
932
+ # Gamma or ModifiedHalfNormal prior, Gaussian or RegularizedGaussian likelihood -> Conjugate
933
+ if self._check_posterior(cond_target, (Gamma, ModifiedHalfNormal), (Gaussian, GMRF, RegularizedGaussian, RegularizedGMRF)):
934
+ if not legacy:
935
+ sampling_strategy[par_name] = cuqi.sampler.Conjugate()
930
936
  else:
931
- sampling_strategy[par_name] = cuqi.sampler.Conjugate
937
+ sampling_strategy[par_name] = cuqi.legacy.sampler.Conjugate
932
938
 
933
939
  # Gamma prior, LMRF likelihood -> ConjugateApprox
934
940
  elif self._check_posterior(cond_target, Gamma, LMRF):
935
- if experimental:
936
- sampling_strategy[par_name] = cuqi.experimental.mcmc.ConjugateApprox()
941
+ if not legacy:
942
+ sampling_strategy[par_name] = cuqi.sampler.ConjugateApprox()
937
943
  else:
938
- sampling_strategy[par_name] = cuqi.sampler.ConjugateApprox
944
+ sampling_strategy[par_name] = cuqi.legacy.sampler.ConjugateApprox
939
945
 
940
946
  # Gaussian prior, Gaussian likelihood, Linear model -> LinearRTO
941
947
  elif self._check_posterior(cond_target, (Gaussian, GMRF), Gaussian, LinearModel):
942
- if experimental:
943
- sampling_strategy[par_name] = cuqi.experimental.mcmc.LinearRTO()
948
+ if not legacy:
949
+ sampling_strategy[par_name] = cuqi.sampler.LinearRTO()
944
950
  else:
945
- sampling_strategy[par_name] = cuqi.sampler.LinearRTO
951
+ sampling_strategy[par_name] = cuqi.legacy.sampler.LinearRTO
946
952
 
947
953
  # Implicit Regularized Gaussian prior, Gaussian likelihood, linear model -> RegularizedLinearRTO
948
954
  elif self._check_posterior(cond_target, (RegularizedGaussian, RegularizedGMRF), Gaussian, LinearModel):
949
- if experimental:
950
- sampling_strategy[par_name] = cuqi.experimental.mcmc.RegularizedLinearRTO()
955
+ if not legacy:
956
+ sampling_strategy[par_name] = cuqi.sampler.RegularizedLinearRTO()
951
957
  else:
952
- sampling_strategy[par_name] = cuqi.sampler.RegularizedLinearRTO
958
+ sampling_strategy[par_name] = cuqi.legacy.sampler.RegularizedLinearRTO
953
959
 
954
960
  # LMRF prior, Gaussian likelihood, Linear model -> UGLA
955
961
  elif self._check_posterior(cond_target, LMRF, Gaussian, LinearModel):
956
- if experimental:
957
- sampling_strategy[par_name] = cuqi.experimental.mcmc.UGLA()
962
+ if not legacy:
963
+ sampling_strategy[par_name] = cuqi.sampler.UGLA()
958
964
  else:
959
- sampling_strategy[par_name] = cuqi.sampler.UGLA
965
+ sampling_strategy[par_name] = cuqi.legacy.sampler.UGLA
960
966
 
961
967
  else:
962
968
  raise NotImplementedError(f"Unable to determine sampling strategy for {par_name} with target {cond_target}")
963
969
 
964
970
  print("Automatically determined sampling strategy:")
965
971
  for dist_name, strategy in sampling_strategy.items():
966
- if experimental:
967
- print(f"\t{dist_name}: {strategy.__class__.__name__} (mcmc.experimental)")
972
+ if not legacy:
973
+ print(f"\t{dist_name}: {strategy.__class__.__name__} (mcmc.sampler)")
968
974
  else:
969
975
  print(f"\t{dist_name}: {strategy.__name__}")
970
976
  print("")
cuqi/sampler/__init__.py CHANGED
@@ -1,11 +1,123 @@
1
+ """
2
+ The sampler module of CUQIpy. It has been re-implemented to improve design, flexibility,
3
+ and extensibility. The old sampler module can be found in :py:mod:`cuqi.legacy.sampler`.
4
+
5
+ Main changes for users in this implementation
6
+ ---------------------------------------------
7
+
8
+ 1. Sampling API
9
+ ^^^^^^^^^^^^
10
+
11
+ Previously one would call the `.sample` or `sample_adapt` methods of a sampler instance at :py:mod:`cuqi.legacy.sampler` to sample from a target distribution and store the samples as the output as follows:
12
+
13
+ .. code-block:: python
14
+
15
+ from cuqi.legacy.sampler import MH
16
+ from cuqi.distribution import DistributionGallery
17
+
18
+ # Target distribution
19
+ target = DistributionGallery("donut")
20
+
21
+ # Set up sampler
22
+ sampler = MH(target)
23
+
24
+ # Sample from the target distribution (Alternatively calling sample with explicit scale parameter set in sampler)
25
+ samples = sampler.sample_adapt(Ns=100, Nb=100) # Burn-in (Nb) removed by default
26
+
27
+ This has now changed to to a more object-oriented API which provides more flexibility and control over the sampling process.
28
+
29
+ For example one can now more explicitly control when the sampler is tuned (warmup) and when it is sampling with fixed parameters.
30
+
31
+ .. code-block:: python
32
+
33
+ from cuqi.sampler import MH
34
+ from cuqi.distribution import DistributionGallery
35
+
36
+ # Target distribution
37
+ target = DistributionGallery("donut")
38
+
39
+ # Set up sampler
40
+ sampler = MH(target)
41
+
42
+ # Sample from the target distribution
43
+ sampler.warmup(Nb=100) # Explicit warmup (tuning) of sampler
44
+ sampler.sample(Ns=100) # Sampling with fixed parameters
45
+ samples = sampler.get_samples().burnthin(Nb=100) # Getting samples and removing burn-in from warmup
46
+
47
+ Importantly, the removal of burn-in from e.g. warmup is now a separate step that is done after the sampling process is complete.
48
+
49
+ 2. Sampling API for BayesianProblem
50
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
51
+
52
+ :py:class:`cuqi.problem.BayesianProblem` continues to have the same API for `sample_posterior` and the `UQ` method.
53
+
54
+ There is a flag `legacy` that can be set to `True` to use the legacy MCMC samplers.
55
+
56
+ By default, the flag is set to `False` and the samplers in `cuqi.sampler` are used.
57
+
58
+ For this more high-level interface, burn-in is automatically removed from the samples as was the case before.
59
+
60
+
61
+ 3. More options for Gibbs sampling
62
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
63
+
64
+ There are now more options for Gibbs sampling. Previously it was only possible to sample with Gibbs for samplers :py:class:`cuqi.legacy.sampler.LinearRTO`, :py:class:`cuqi.legacy.sampler.RegularizedLinearRTO`, :py:class:`cuqi.legacy.sampler.Conjugate`, and :py:class:`cuqi.legacy.sampler.ConjugateApprox`.
65
+
66
+ Now, it is possible to define a Gibbs sampling scheme using any sampler from the :py:mod:`cuqi.sampler` module.
67
+
68
+ **Example using a NUTS-within-Gibbs scheme for a 1D deconvolution problem:**
69
+
70
+ .. code-block:: python
71
+
72
+ import cuqi
73
+ import numpy as np
74
+ from cuqi.distribution import Gamma, Gaussian, GMRF, JointDistribution
75
+ from cuqi.sampler import NUTS, HybridGibbs, Conjugate
76
+ from cuqi.testproblem import Deconvolution1D
77
+
78
+ # Forward problem
79
+ A, y_data, info = Deconvolution1D(dim=128, phantom='sinc', noise_std=0.001).get_components()
80
+
81
+ # Bayesian Inverse Problem
82
+ s = Gamma(1, 1e-4)
83
+ x = GMRF(np.zeros(A.domain_dim), 50)
84
+ y = Gaussian(A @ x, lambda s: 1 / s)
85
+
86
+ # Posterior
87
+ target = JointDistribution(y, x, s)(y=y_data)
88
+
89
+ # Gibbs sampling strategy. Note we can define initial_points and various parameters for each sampler
90
+ sampling_strategy = {
91
+ "x": NUTS(max_depth=10, initial_point=np.zeros(A.domain_dim)),
92
+ "s": Conjugate()
93
+ }
94
+
95
+ # Here we do 10 internal steps with NUTS for each Gibbs step
96
+ num_sampling_steps = {
97
+ "x": 10,
98
+ "s": 1
99
+ }
100
+
101
+ sampler = HybridGibbs(target, sampling_strategy, num_sampling_steps)
102
+
103
+ sampler.warmup(50)
104
+ sampler.sample(200)
105
+ samples = sampler.get_samples().burnthin(Nb=50)
106
+
107
+ samples["x"].plot_ci(exact=info.exactSolution)
108
+ """
109
+
110
+
111
+
1
112
  from ._sampler import Sampler, ProposalBasedSampler
2
- from ._conjugate import Conjugate
3
- from ._conjugate_approx import ConjugateApprox
4
- from ._cwmh import CWMH
5
- from ._gibbs import Gibbs
6
- from ._hmc import NUTS
7
- from ._langevin_algorithm import ULA, MALA
8
- from ._laplace_approximation import UGLA
113
+ from ._langevin_algorithm import ULA, MALA, MYULA, PnPULA
9
114
  from ._mh import MH
10
- from ._pcn import pCN
115
+ from ._pcn import PCN
11
116
  from ._rto import LinearRTO, RegularizedLinearRTO
117
+ from ._cwmh import CWMH
118
+ from ._laplace_approximation import UGLA
119
+ from ._hmc import NUTS
120
+ from ._gibbs import HybridGibbs
121
+ from ._conjugate import Conjugate
122
+ from ._conjugate_approx import ConjugateApprox
123
+ from ._direct import Direct