CUQIpy 1.3.0.post0.dev298__py3-none-any.whl → 1.4.0.post0.dev92__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. cuqi/__init__.py +2 -0
  2. cuqi/_version.py +3 -3
  3. cuqi/algebra/__init__.py +2 -0
  4. cuqi/{experimental/algebra/_randomvariable.py → algebra/_random_variable.py} +4 -4
  5. cuqi/density/_density.py +9 -1
  6. cuqi/distribution/_distribution.py +25 -16
  7. cuqi/distribution/_joint_distribution.py +99 -14
  8. cuqi/distribution/_posterior.py +9 -0
  9. cuqi/experimental/__init__.py +1 -4
  10. cuqi/experimental/_recommender.py +4 -4
  11. cuqi/geometry/__init__.py +2 -0
  12. cuqi/{experimental/geometry/_productgeometry.py → geometry/_product_geometry.py} +1 -1
  13. cuqi/implicitprior/__init__.py +1 -1
  14. cuqi/implicitprior/_restorator.py +35 -1
  15. cuqi/legacy/__init__.py +2 -0
  16. cuqi/legacy/sampler/__init__.py +11 -0
  17. cuqi/legacy/sampler/_conjugate.py +55 -0
  18. cuqi/legacy/sampler/_conjugate_approx.py +52 -0
  19. cuqi/legacy/sampler/_cwmh.py +196 -0
  20. cuqi/legacy/sampler/_gibbs.py +231 -0
  21. cuqi/legacy/sampler/_hmc.py +335 -0
  22. cuqi/legacy/sampler/_langevin_algorithm.py +198 -0
  23. cuqi/legacy/sampler/_laplace_approximation.py +184 -0
  24. cuqi/legacy/sampler/_mh.py +190 -0
  25. cuqi/legacy/sampler/_pcn.py +244 -0
  26. cuqi/legacy/sampler/_rto.py +284 -0
  27. cuqi/legacy/sampler/_sampler.py +182 -0
  28. cuqi/likelihood/_likelihood.py +1 -1
  29. cuqi/model/_model.py +225 -90
  30. cuqi/pde/__init__.py +4 -0
  31. cuqi/pde/_observation_map.py +36 -0
  32. cuqi/pde/_pde.py +52 -21
  33. cuqi/problem/_problem.py +87 -80
  34. cuqi/sampler/__init__.py +120 -8
  35. cuqi/sampler/_conjugate.py +376 -35
  36. cuqi/sampler/_conjugate_approx.py +40 -16
  37. cuqi/sampler/_cwmh.py +132 -138
  38. cuqi/{experimental/mcmc → sampler}/_direct.py +1 -1
  39. cuqi/sampler/_gibbs.py +276 -130
  40. cuqi/sampler/_hmc.py +328 -201
  41. cuqi/sampler/_langevin_algorithm.py +282 -98
  42. cuqi/sampler/_laplace_approximation.py +87 -117
  43. cuqi/sampler/_mh.py +47 -157
  44. cuqi/sampler/_pcn.py +65 -213
  45. cuqi/sampler/_rto.py +206 -140
  46. cuqi/sampler/_sampler.py +540 -135
  47. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/METADATA +1 -1
  48. cuqipy-1.4.0.post0.dev92.dist-info/RECORD +101 -0
  49. cuqi/experimental/algebra/__init__.py +0 -2
  50. cuqi/experimental/geometry/__init__.py +0 -1
  51. cuqi/experimental/mcmc/__init__.py +0 -122
  52. cuqi/experimental/mcmc/_conjugate.py +0 -396
  53. cuqi/experimental/mcmc/_conjugate_approx.py +0 -76
  54. cuqi/experimental/mcmc/_cwmh.py +0 -190
  55. cuqi/experimental/mcmc/_gibbs.py +0 -374
  56. cuqi/experimental/mcmc/_hmc.py +0 -460
  57. cuqi/experimental/mcmc/_langevin_algorithm.py +0 -382
  58. cuqi/experimental/mcmc/_laplace_approximation.py +0 -154
  59. cuqi/experimental/mcmc/_mh.py +0 -80
  60. cuqi/experimental/mcmc/_pcn.py +0 -89
  61. cuqi/experimental/mcmc/_rto.py +0 -306
  62. cuqi/experimental/mcmc/_sampler.py +0 -564
  63. cuqipy-1.3.0.post0.dev298.dist-info/RECORD +0 -100
  64. /cuqi/{experimental/algebra/_ast.py → algebra/_abstract_syntax_tree.py} +0 -0
  65. /cuqi/{experimental/algebra/_orderedset.py → algebra/_ordered_set.py} +0 -0
  66. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/WHEEL +0 -0
  67. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/licenses/LICENSE +0 -0
  68. {cuqipy-1.3.0.post0.dev298.dist-info → cuqipy-1.4.0.post0.dev92.dist-info}/top_level.txt +0 -0
cuqi/problem/_problem.py CHANGED
@@ -288,7 +288,7 @@ class BayesianProblem(object):
288
288
  x_MAP.info = solver_info
289
289
  return x_MAP
290
290
 
291
- def sample_posterior(self, Ns, Nb=None, callback=None, experimental=False) -> cuqi.samples.Samples:
291
+ def sample_posterior(self, Ns, Nb=None, callback=None, legacy=False) -> cuqi.samples.Samples:
292
292
  """Sample the posterior. Sampler choice and tuning is handled automatically.
293
293
 
294
294
  Parameters
@@ -297,17 +297,24 @@ class BayesianProblem(object):
297
297
  Number of samples to draw.
298
298
 
299
299
  Nb : int or None, *Optional*
300
- Number of burn-in samples. If not provided, 20% of the samples will be used for burn-in.
300
+ Number of burn-in samples. If not provided, 20% of the samples will be used
301
+ for burn-in.
301
302
 
302
303
  callback : callable, *Optional*
303
304
  If set this function will be called after every sample.
304
- The signature of the callback function is `callback(sample, sample_index)`,
305
- where `sample` is the current sample and `sample_index` is the index of the sample.
305
+ If the parameter `legacy` is set to False, which is the default, the callback
306
+ function should take three arguments: the sampler object, the index of the
307
+ current sampling step, the total number of requested samples. The last two
308
+ arguments are integers. An example of the callback function signature in the
309
+ case is: `callback(sampler, sample_index, num_of_samples)`.
310
+ If the parameter `legacy` is set to True, the signature of the callback
311
+ function is `callback(sample, sample_index)`, where `sample` is the current
312
+ sample and `sample_index` is the index of the sample.
306
313
  An example is shown in demos/demo31_callback.py.
307
- Note: if the parameter `experimental` is set to True, the callback function should take three arguments: the sampler object, the index of the current sampling step, the total number of requested samples. The last two arguments are integers. An example of the callback function signature in the case is: `callback(sampler, sample_index, num_of_samples)`.
308
314
 
309
- experimental : bool, *Optional*
310
- If set to True, the sampler selection will use the samplers from the :mod:`cuqi.experimental.mcmc` module.
315
+ legacy : bool, *Optional*
316
+ Default is False. If set to True, the sampler selection will use the samplers from the legacy sampler module, :mod:`cuqi.legacy.sampler` module.
317
+
311
318
 
312
319
  Returns
313
320
  -------
@@ -323,9 +330,9 @@ class BayesianProblem(object):
323
330
  print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
324
331
  print("")
325
332
 
326
- if experimental:
333
+ if not legacy:
327
334
  print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
328
- print("!!! Using samplers from cuqi.experimental.mcmc !!!")
335
+ print("!!! Using samplers from cuqi.sampler !!!")
329
336
  print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
330
337
  print("")
331
338
 
@@ -336,7 +343,7 @@ class BayesianProblem(object):
336
343
  # If target is a joint distribution, try Gibbs sampling
337
344
  # This is still very experimental!
338
345
  if isinstance(self._target, JointDistribution):
339
- return self._sampleGibbs(Ns, Nb, callback=callback, experimental=experimental)
346
+ return self._sampleGibbs(Ns, Nb, callback=callback, legacy=legacy)
340
347
 
341
348
  # For Gaussian small-scale we can use direct sampling
342
349
  if self._check_posterior(self, Gaussian, Gaussian, LinearModel, config.MAX_DIM_INV) and not self._check_posterior(self, GMRF):
@@ -344,24 +351,24 @@ class BayesianProblem(object):
344
351
 
345
352
  # For larger-scale Gaussian we use Linear RTO. TODO: Improve checking once we have a common Gaussian class.
346
353
  elif hasattr(self.prior,"sqrtprecTimesMean") and hasattr(self.likelihood.distribution,"sqrtprec") and isinstance(self.model,LinearModel):
347
- return self._sampleLinearRTO(Ns, Nb, callback, experimental=experimental)
354
+ return self._sampleLinearRTO(Ns, Nb, callback, legacy=legacy)
348
355
 
349
356
  # For LMRF we use our awesome unadjusted Laplace approximation!
350
357
  elif self._check_posterior(self, LMRF, Gaussian):
351
- return self._sampleUGLA(Ns, Nb, callback, experimental=experimental)
358
+ return self._sampleUGLA(Ns, Nb, callback, legacy=legacy)
352
359
 
353
360
  # If we have gradients, use NUTS!
354
361
  # TODO: Fix cases where we have gradients but NUTS fails (see checks)
355
362
  elif self._check_posterior(self, must_have_gradient=True) and not self._check_posterior(self, (Beta, InverseGamma, Lognormal)):
356
- return self._sampleNUTS(Ns, Nb, callback, experimental=experimental)
363
+ return self._sampleNUTS(Ns, Nb, callback, legacy=legacy)
357
364
 
358
365
  # For Gaussians with non-linear model we use pCN
359
366
  elif self._check_posterior(self, (Gaussian, GMRF), Gaussian):
360
- return self._samplepCN(Ns, Nb, callback, experimental=experimental)
367
+ return self._samplepCN(Ns, Nb, callback, legacy=legacy)
361
368
 
362
369
  # For Regularized Gaussians with linear models we use RegularizedLinearRTO
363
370
  elif self._check_posterior(self, (RegularizedGaussian, RegularizedGMRF), Gaussian, LinearModel):
364
- return self._sampleRegularizedLinearRTO(Ns, Nb, callback, experimental=experimental)
371
+ return self._sampleRegularizedLinearRTO(Ns, Nb, callback, legacy=legacy)
365
372
 
366
373
  else:
367
374
  raise NotImplementedError(f"Automatic sampler choice is not implemented for model: {type(self.model)}, likelihood: {type(self.likelihood.distribution)} and prior: {type(self.prior)} and dim {self.prior.dim}. Manual sampler choice can be done via the 'sampler' module. Posterior distribution can be extracted via '.posterior' of any testproblem (BayesianProblem).")
@@ -394,7 +401,7 @@ class BayesianProblem(object):
394
401
  # Now sample prior problem
395
402
  return prior_problem.sample_posterior(Ns, Nb, callback)
396
403
 
397
- def UQ(self, Ns=1000, Nb=None, percent=95, exact=None, experimental=False) -> cuqi.samples.Samples:
404
+ def UQ(self, Ns=1000, Nb=None, percent=95, exact=None, legacy=False) -> cuqi.samples.Samples:
398
405
  """ Run an Uncertainty Quantification (UQ) analysis on the Bayesian problem and provide a summary of the results.
399
406
 
400
407
  Parameters
@@ -412,8 +419,8 @@ class BayesianProblem(object):
412
419
  percent : float, *Optional*
413
420
  The credible interval to plot. Defaults to 95%.
414
421
 
415
- experimental : bool, *Optional*
416
- If set to True, the sampler selection will use the samplers from the :mod:`cuqi.experimental.mcmc` module.
422
+ legacy : bool, *Optional*
423
+ Default is False. If set to True, the sampler selection will use the samplers from the legacy sampler module, :mod:`cuqi.legacy.sampler` module.
417
424
 
418
425
  Returns
419
426
  -------
@@ -421,7 +428,7 @@ class BayesianProblem(object):
421
428
  Samples from the posterior. The samples can be used to compute further statistics and plots.
422
429
  """
423
430
  print(f"Computing {Ns} samples")
424
- samples = self.sample_posterior(Ns, Nb, experimental=experimental)
431
+ samples = self.sample_posterior(Ns, Nb, legacy=legacy)
425
432
 
426
433
  print("Plotting results")
427
434
  # Gibbs case
@@ -488,14 +495,14 @@ class BayesianProblem(object):
488
495
  samples.funvals.vector.plot_variance()
489
496
  plt.title("Sample variance of function representation")
490
497
 
491
- def _sampleLinearRTO(self, Ns, Nb, callback=None, experimental=False):
498
+ def _sampleLinearRTO(self, Ns, Nb, callback=None, legacy=False):
492
499
 
493
- if experimental:
500
+ if not legacy:
494
501
 
495
- print("Using cuqi.experimental.mcmc LinearRTO sampler.")
502
+ print("Using cuqi.sampler LinearRTO sampler.")
496
503
  print(f"burn-in: {Nb/Ns*100:g}%")
497
504
 
498
- sampler = cuqi.experimental.mcmc.LinearRTO(self.posterior, callback=callback)
505
+ sampler = cuqi.sampler.LinearRTO(self.posterior, callback=callback)
499
506
 
500
507
  ti = time.time()
501
508
 
@@ -507,14 +514,14 @@ class BayesianProblem(object):
507
514
 
508
515
  else:
509
516
 
510
- print("Using cuqi.sampler LinearRTO sampler.")
517
+ print("Using cuqi.legacy.sampler LinearRTO sampler.")
511
518
  print(f"burn-in: {Nb/Ns*100:g}%")
512
519
 
513
520
  # Start timing
514
521
  ti = time.time()
515
522
 
516
523
  # Sample
517
- sampler = cuqi.sampler.LinearRTO(self.posterior, callback=callback)
524
+ sampler = cuqi.legacy.sampler.LinearRTO(self.posterior, callback=callback)
518
525
  samples = sampler.sample(Ns, Nb)
519
526
 
520
527
  # Print timing
@@ -563,17 +570,17 @@ class BayesianProblem(object):
563
570
 
564
571
  return cuqi.samples.Samples(x_s,self.model.domain_geometry)
565
572
 
566
- def _sampleCWMH(self, Ns, Nb, callback=None, experimental=False):
573
+ def _sampleCWMH(self, Ns, Nb, callback=None, legacy=False):
567
574
 
568
- if experimental:
575
+ if not legacy:
569
576
 
570
- print("Using cuqi.experimental.mcmc Component-wise Metropolis-Hastings (CWMH) sampler.")
577
+ print("Using cuqi.sampler Component-wise Metropolis-Hastings (CWMH) sampler.")
571
578
  print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.05, x0: 0.5 (vector)")
572
579
 
573
580
  scale = 0.05*np.ones(self.prior.dim)
574
581
  x0 = 0.5*np.ones(self.prior.dim)
575
582
 
576
- sampler = cuqi.experimental.mcmc.CWMH(self.posterior, scale, x0, callback=callback)
583
+ sampler = cuqi.sampler.CWMH(self.posterior, scale, x0, callback=callback)
577
584
 
578
585
  ti = time.time()
579
586
 
@@ -585,7 +592,7 @@ class BayesianProblem(object):
585
592
 
586
593
  else:
587
594
 
588
- print("Using cuqi.sampler Component-wise Metropolis-Hastings (CWMH) sampler (sample_adapt)")
595
+ print("Using cuqi.legacy.sampler Component-wise Metropolis-Hastings (CWMH) sampler (sample_adapt)")
589
596
  print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.05, x0: 0.5 (vector)")
590
597
 
591
598
  # Dimension
@@ -597,7 +604,7 @@ class BayesianProblem(object):
597
604
  # Set up sampler
598
605
  scale = 0.05*np.ones(n)
599
606
  x0 = 0.5*np.ones(n)
600
- MCMC = cuqi.sampler.CWMH(self.posterior, proposal, scale, x0, callback=callback)
607
+ MCMC = cuqi.legacy.sampler.CWMH(self.posterior, proposal, scale, x0, callback=callback)
601
608
 
602
609
  # Run sampler
603
610
  ti = time.time()
@@ -606,16 +613,16 @@ class BayesianProblem(object):
606
613
 
607
614
  return x_s
608
615
 
609
- def _samplepCN(self, Ns, Nb, callback=None, experimental=False):
616
+ def _samplepCN(self, Ns, Nb, callback=None, legacy=False):
610
617
 
611
- if experimental:
618
+ if not legacy:
612
619
 
613
- print("Using cuqi.experimental.mcmc preconditioned Crank-Nicolson (pCN) sampler.")
620
+ print("Using cuqi.sampler preconditioned Crank-Nicolson (pCN) sampler.")
614
621
  print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.02")
615
622
 
616
623
  scale = 0.02
617
624
 
618
- sampler = cuqi.experimental.mcmc.PCN(self.posterior, scale, callback=callback)
625
+ sampler = cuqi.sampler.PCN(self.posterior, scale, callback=callback)
619
626
 
620
627
  ti = time.time()
621
628
 
@@ -627,12 +634,12 @@ class BayesianProblem(object):
627
634
 
628
635
  else:
629
636
 
630
- print("Using cuqi.sampler preconditioned Crank-Nicolson (pCN) sampler (sample_adapt)")
637
+ print("Using cuqi.legacy.sampler preconditioned Crank-Nicolson (pCN) sampler (sample_adapt)")
631
638
  print(f"burn-in: {Nb/Ns*100:g}%, scale: 0.02")
632
639
 
633
640
  scale = 0.02
634
641
 
635
- MCMC = cuqi.sampler.pCN(self.posterior, scale, callback=callback)
642
+ MCMC = cuqi.legacy.sampler.pCN(self.posterior, scale, callback=callback)
636
643
 
637
644
  #Run sampler
638
645
  ti = time.time()
@@ -641,14 +648,14 @@ class BayesianProblem(object):
641
648
 
642
649
  return x_s
643
650
 
644
- def _sampleNUTS(self, Ns, Nb, callback=None, experimental=False):
651
+ def _sampleNUTS(self, Ns, Nb, callback=None, legacy=False):
645
652
 
646
- if experimental:
653
+ if not legacy:
647
654
 
648
- print("Using cuqi.experimental.mcmc No-U-Turn (NUTS) sampler.")
655
+ print("Using cuqi.sampler No-U-Turn (NUTS) sampler.")
649
656
  print(f"burn-in: {Nb/Ns*100:g}%")
650
657
 
651
- sampler = cuqi.experimental.mcmc.NUTS(self.posterior, callback=callback)
658
+ sampler = cuqi.sampler.NUTS(self.posterior, callback=callback)
652
659
 
653
660
  ti = time.time()
654
661
 
@@ -660,10 +667,10 @@ class BayesianProblem(object):
660
667
 
661
668
  else:
662
669
 
663
- print("Using cuqi.sampler No-U-Turn (NUTS) sampler")
670
+ print("Using cuqi.legacy.sampler No-U-Turn (NUTS) sampler")
664
671
  print(f"burn-in: {Nb/Ns*100:g}%")
665
672
 
666
- MCMC = cuqi.sampler.NUTS(self.posterior, callback=callback)
673
+ MCMC = cuqi.legacy.sampler.NUTS(self.posterior, callback=callback)
667
674
 
668
675
  # Run sampler
669
676
  ti = time.time()
@@ -672,14 +679,14 @@ class BayesianProblem(object):
672
679
 
673
680
  return x_s
674
681
 
675
- def _sampleUGLA(self, Ns, Nb, callback=None, experimental=False):
682
+ def _sampleUGLA(self, Ns, Nb, callback=None, legacy=False):
676
683
 
677
- if experimental:
684
+ if not legacy:
678
685
 
679
- print("Using cuqi.experimental.mcmc Unadjusted Gaussian Laplace Approximation (UGLA) sampler.")
686
+ print("Using cuqi.sampler Unadjusted Gaussian Laplace Approximation (UGLA) sampler.")
680
687
  print(f"burn-in: {Nb/Ns*100:g}%")
681
688
 
682
- sampler = cuqi.experimental.mcmc.UGLA(self.posterior, callback=callback)
689
+ sampler = cuqi.sampler.UGLA(self.posterior, callback=callback)
683
690
 
684
691
  ti = time.time()
685
692
 
@@ -691,14 +698,14 @@ class BayesianProblem(object):
691
698
 
692
699
  else:
693
700
 
694
- print("Using cuqi.sampler UGLA sampler")
701
+ print("Using cuqi.legacy.sampler UGLA sampler")
695
702
  print(f"burn-in: {Nb/Ns*100:g}%")
696
703
 
697
704
  # Start timing
698
705
  ti = time.time()
699
706
 
700
707
  # Sample
701
- sampler = cuqi.sampler.UGLA(self.posterior, callback=callback)
708
+ sampler = cuqi.legacy.sampler.UGLA(self.posterior, callback=callback)
702
709
  samples = sampler.sample(Ns, Nb)
703
710
 
704
711
  # Print timing
@@ -706,14 +713,14 @@ class BayesianProblem(object):
706
713
 
707
714
  return samples
708
715
 
709
- def _sampleRegularizedLinearRTO(self, Ns, Nb, callback=None, experimental=False):
716
+ def _sampleRegularizedLinearRTO(self, Ns, Nb, callback=None, legacy=False):
710
717
 
711
- if experimental:
718
+ if not legacy:
712
719
 
713
- print("Using cuqi.experimental.mcmc Regularized LinearRTO sampler.")
720
+ print("Using cuqi.sampler Regularized LinearRTO sampler.")
714
721
  print(f"burn-in: {Nb/Ns*100:g}%")
715
722
 
716
- sampler = cuqi.experimental.mcmc.RegularizedLinearRTO(self.posterior, maxit=100, stepsize = "automatic", abstol=1e-10, callback=callback)
723
+ sampler = cuqi.sampler.RegularizedLinearRTO(self.posterior, maxit=100, stepsize = "automatic", abstol=1e-10, callback=callback)
717
724
 
718
725
  ti = time.time()
719
726
 
@@ -725,14 +732,14 @@ class BayesianProblem(object):
725
732
 
726
733
  else:
727
734
 
728
- print("Using cuqi.sampler Regularized LinearRTO sampler.")
735
+ print("Using cuqi.legacy.sampler Regularized LinearRTO sampler.")
729
736
  print(f"burn-in: {Nb/Ns*100:g}%")
730
737
 
731
738
  # Start timing
732
739
  ti = time.time()
733
740
 
734
741
  # Sample
735
- sampler = cuqi.sampler.RegularizedLinearRTO(self.posterior, maxit=100, stepsize = "automatic", abstol=1e-10, callback=callback)
742
+ sampler = cuqi.legacy.sampler.RegularizedLinearRTO(self.posterior, maxit=100, stepsize = "automatic", abstol=1e-10, callback=callback)
736
743
  samples = sampler.sample(Ns, Nb)
737
744
 
738
745
  # Print timing
@@ -840,12 +847,12 @@ class BayesianProblem(object):
840
847
 
841
848
  return L and P and M and D and G
842
849
 
843
- def _sampleGibbs(self, Ns, Nb, callback=None, experimental=False):
850
+ def _sampleGibbs(self, Ns, Nb, callback=None, legacy=False):
844
851
  """ This is a helper function for sampling from the posterior using Gibbs sampler. """
845
852
 
846
- if experimental:
853
+ if not legacy:
847
854
 
848
- print("Using cuqi.experimental.mcmc HybridGibbs sampler")
855
+ print("Using cuqi.sampler HybridGibbs sampler")
849
856
  print(f"burn-in: {Nb/Ns*100:g}%")
850
857
  print("")
851
858
 
@@ -853,9 +860,9 @@ class BayesianProblem(object):
853
860
  ti = time.time()
854
861
 
855
862
  # Sampling strategy
856
- sampling_strategy = self._determine_sampling_strategy(experimental=True)
863
+ sampling_strategy = self._determine_sampling_strategy(legacy=False)
857
864
 
858
- sampler = cuqi.experimental.mcmc.HybridGibbs(
865
+ sampler = cuqi.sampler.HybridGibbs(
859
866
  self._target, sampling_strategy, callback=callback)
860
867
  sampler.warmup(Nb)
861
868
  sampler.sample(Ns)
@@ -875,7 +882,7 @@ class BayesianProblem(object):
875
882
  print("")
876
883
 
877
884
  if callback is not None:
878
- raise NotImplementedError("Callback not implemented for Gibbs sampler. It is only implemented for experimental Gibbs sampler.")
885
+ raise NotImplementedError("Callback not implemented for the legacy Gibbs sampler. It is only implemented for cuqi.sampler Gibbs (cuqi.sampler.HybridGibbs) sampler.")
879
886
 
880
887
  # Start timing
881
888
  ti = time.time()
@@ -883,7 +890,7 @@ class BayesianProblem(object):
883
890
  # Sampling strategy
884
891
  sampling_strategy = self._determine_sampling_strategy()
885
892
 
886
- sampler = cuqi.sampler.Gibbs(self._target, sampling_strategy)
893
+ sampler = cuqi.legacy.sampler.Gibbs(self._target, sampling_strategy)
887
894
  samples = sampler.sample(Ns, Nb)
888
895
 
889
896
  # Print timing
@@ -892,7 +899,7 @@ class BayesianProblem(object):
892
899
  return samples
893
900
 
894
901
 
895
- def _determine_sampling_strategy(self, experimental=False):
902
+ def _determine_sampling_strategy(self, legacy=True):
896
903
  """ This is a helper function for determining the sampling strategy for Gibbs sampler.
897
904
 
898
905
  It is still very experimental and not very robust.
@@ -924,46 +931,46 @@ class BayesianProblem(object):
924
931
 
925
932
  # Gamma or ModifiedHalfNormal prior, Gaussian or RegularizedGaussian likelihood -> Conjugate
926
933
  if self._check_posterior(cond_target, (Gamma, ModifiedHalfNormal), (Gaussian, GMRF, RegularizedGaussian, RegularizedGMRF)):
927
- if experimental:
928
- sampling_strategy[par_name] = cuqi.experimental.mcmc.Conjugate()
934
+ if not legacy:
935
+ sampling_strategy[par_name] = cuqi.sampler.Conjugate()
929
936
  else:
930
- sampling_strategy[par_name] = cuqi.sampler.Conjugate
937
+ sampling_strategy[par_name] = cuqi.legacy.sampler.Conjugate
931
938
 
932
939
  # Gamma prior, LMRF likelihood -> ConjugateApprox
933
940
  elif self._check_posterior(cond_target, Gamma, LMRF):
934
- if experimental:
935
- sampling_strategy[par_name] = cuqi.experimental.mcmc.ConjugateApprox()
941
+ if not legacy:
942
+ sampling_strategy[par_name] = cuqi.sampler.ConjugateApprox()
936
943
  else:
937
- sampling_strategy[par_name] = cuqi.sampler.ConjugateApprox
944
+ sampling_strategy[par_name] = cuqi.legacy.sampler.ConjugateApprox
938
945
 
939
946
  # Gaussian prior, Gaussian likelihood, Linear model -> LinearRTO
940
947
  elif self._check_posterior(cond_target, (Gaussian, GMRF), Gaussian, LinearModel):
941
- if experimental:
942
- sampling_strategy[par_name] = cuqi.experimental.mcmc.LinearRTO()
948
+ if not legacy:
949
+ sampling_strategy[par_name] = cuqi.sampler.LinearRTO()
943
950
  else:
944
- sampling_strategy[par_name] = cuqi.sampler.LinearRTO
951
+ sampling_strategy[par_name] = cuqi.legacy.sampler.LinearRTO
945
952
 
946
953
  # Implicit Regularized Gaussian prior, Gaussian likelihood, linear model -> RegularizedLinearRTO
947
954
  elif self._check_posterior(cond_target, (RegularizedGaussian, RegularizedGMRF), Gaussian, LinearModel):
948
- if experimental:
949
- sampling_strategy[par_name] = cuqi.experimental.mcmc.RegularizedLinearRTO()
955
+ if not legacy:
956
+ sampling_strategy[par_name] = cuqi.sampler.RegularizedLinearRTO()
950
957
  else:
951
- sampling_strategy[par_name] = cuqi.sampler.RegularizedLinearRTO
958
+ sampling_strategy[par_name] = cuqi.legacy.sampler.RegularizedLinearRTO
952
959
 
953
960
  # LMRF prior, Gaussian likelihood, Linear model -> UGLA
954
961
  elif self._check_posterior(cond_target, LMRF, Gaussian, LinearModel):
955
- if experimental:
956
- sampling_strategy[par_name] = cuqi.experimental.mcmc.UGLA()
962
+ if not legacy:
963
+ sampling_strategy[par_name] = cuqi.sampler.UGLA()
957
964
  else:
958
- sampling_strategy[par_name] = cuqi.sampler.UGLA
965
+ sampling_strategy[par_name] = cuqi.legacy.sampler.UGLA
959
966
 
960
967
  else:
961
968
  raise NotImplementedError(f"Unable to determine sampling strategy for {par_name} with target {cond_target}")
962
969
 
963
970
  print("Automatically determined sampling strategy:")
964
971
  for dist_name, strategy in sampling_strategy.items():
965
- if experimental:
966
- print(f"\t{dist_name}: {strategy.__class__.__name__} (mcmc.experimental)")
972
+ if not legacy:
973
+ print(f"\t{dist_name}: {strategy.__class__.__name__} (mcmc.sampler)")
967
974
  else:
968
975
  print(f"\t{dist_name}: {strategy.__name__}")
969
976
  print("")
cuqi/sampler/__init__.py CHANGED
@@ -1,11 +1,123 @@
1
+ """
2
+ The sampler module of CUQIpy. It has been re-implemented to improve design, flexibility,
3
+ and extensibility. The old sampler module can be found in :py:mod:`cuqi.legacy.sampler`.
4
+
5
+ Main changes for users in this implementation
6
+ ---------------------------------------------
7
+
8
+ 1. Sampling API
9
+ ^^^^^^^^^^^^
10
+
11
+ Previously one would call the `.sample` or `sample_adapt` methods of a sampler instance at :py:mod:`cuqi.legacy.sampler` to sample from a target distribution and store the samples as the output as follows:
12
+
13
+ .. code-block:: python
14
+
15
+ from cuqi.legacy.sampler import MH
16
+ from cuqi.distribution import DistributionGallery
17
+
18
+ # Target distribution
19
+ target = DistributionGallery("donut")
20
+
21
+ # Set up sampler
22
+ sampler = MH(target)
23
+
24
+ # Sample from the target distribution (Alternatively calling sample with explicit scale parameter set in sampler)
25
+ samples = sampler.sample_adapt(Ns=100, Nb=100) # Burn-in (Nb) removed by default
26
+
27
+ This has now changed to to a more object-oriented API which provides more flexibility and control over the sampling process.
28
+
29
+ For example one can now more explicitly control when the sampler is tuned (warmup) and when it is sampling with fixed parameters.
30
+
31
+ .. code-block:: python
32
+
33
+ from cuqi.sampler import MH
34
+ from cuqi.distribution import DistributionGallery
35
+
36
+ # Target distribution
37
+ target = DistributionGallery("donut")
38
+
39
+ # Set up sampler
40
+ sampler = MH(target)
41
+
42
+ # Sample from the target distribution
43
+ sampler.warmup(Nb=100) # Explicit warmup (tuning) of sampler
44
+ sampler.sample(Ns=100) # Sampling with fixed parameters
45
+ samples = sampler.get_samples().burnthin(Nb=100) # Getting samples and removing burn-in from warmup
46
+
47
+ Importantly, the removal of burn-in from e.g. warmup is now a separate step that is done after the sampling process is complete.
48
+
49
+ 2. Sampling API for BayesianProblem
50
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
51
+
52
+ :py:class:`cuqi.problem.BayesianProblem` continues to have the same API for `sample_posterior` and the `UQ` method.
53
+
54
+ There is a flag `legacy` that can be set to `True` to use the legacy MCMC samplers.
55
+
56
+ By default, the flag is set to `False` and the samplers in `cuqi.sampler` are used.
57
+
58
+ For this more high-level interface, burn-in is automatically removed from the samples as was the case before.
59
+
60
+
61
+ 3. More options for Gibbs sampling
62
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
63
+
64
+ There are now more options for Gibbs sampling. Previously it was only possible to sample with Gibbs for samplers :py:class:`cuqi.legacy.sampler.LinearRTO`, :py:class:`cuqi.legacy.sampler.RegularizedLinearRTO`, :py:class:`cuqi.legacy.sampler.Conjugate`, and :py:class:`cuqi.legacy.sampler.ConjugateApprox`.
65
+
66
+ Now, it is possible to define a Gibbs sampling scheme using any sampler from the :py:mod:`cuqi.sampler` module.
67
+
68
+ **Example using a NUTS-within-Gibbs scheme for a 1D deconvolution problem:**
69
+
70
+ .. code-block:: python
71
+
72
+ import cuqi
73
+ import numpy as np
74
+ from cuqi.distribution import Gamma, Gaussian, GMRF, JointDistribution
75
+ from cuqi.sampler import NUTS, HybridGibbs, Conjugate
76
+ from cuqi.testproblem import Deconvolution1D
77
+
78
+ # Forward problem
79
+ A, y_data, info = Deconvolution1D(dim=128, phantom='sinc', noise_std=0.001).get_components()
80
+
81
+ # Bayesian Inverse Problem
82
+ s = Gamma(1, 1e-4)
83
+ x = GMRF(np.zeros(A.domain_dim), 50)
84
+ y = Gaussian(A @ x, lambda s: 1 / s)
85
+
86
+ # Posterior
87
+ target = JointDistribution(y, x, s)(y=y_data)
88
+
89
+ # Gibbs sampling strategy. Note we can define initial_points and various parameters for each sampler
90
+ sampling_strategy = {
91
+ "x": NUTS(max_depth=10, initial_point=np.zeros(A.domain_dim)),
92
+ "s": Conjugate()
93
+ }
94
+
95
+ # Here we do 10 internal steps with NUTS for each Gibbs step
96
+ num_sampling_steps = {
97
+ "x": 10,
98
+ "s": 1
99
+ }
100
+
101
+ sampler = HybridGibbs(target, sampling_strategy, num_sampling_steps)
102
+
103
+ sampler.warmup(50)
104
+ sampler.sample(200)
105
+ samples = sampler.get_samples().burnthin(Nb=50)
106
+
107
+ samples["x"].plot_ci(exact=info.exactSolution)
108
+ """
109
+
110
+
111
+
1
112
  from ._sampler import Sampler, ProposalBasedSampler
2
- from ._conjugate import Conjugate
3
- from ._conjugate_approx import ConjugateApprox
4
- from ._cwmh import CWMH
5
- from ._gibbs import Gibbs
6
- from ._hmc import NUTS
7
- from ._langevin_algorithm import ULA, MALA
8
- from ._laplace_approximation import UGLA
113
+ from ._langevin_algorithm import ULA, MALA, MYULA, PnPULA
9
114
  from ._mh import MH
10
- from ._pcn import pCN
115
+ from ._pcn import PCN
11
116
  from ._rto import LinearRTO, RegularizedLinearRTO
117
+ from ._cwmh import CWMH
118
+ from ._laplace_approximation import UGLA
119
+ from ._hmc import NUTS
120
+ from ._gibbs import HybridGibbs
121
+ from ._conjugate import Conjugate
122
+ from ._conjugate_approx import ConjugateApprox
123
+ from ._direct import Direct