vegas 6.2.1__cp313-cp313-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vegas might be problematic. Click here for more details.

vegas/__init__.py ADDED
@@ -0,0 +1,1311 @@
1
+ """ Introduction
2
+ --------------------
3
+ This package provides tools for estimating multidimensional
4
+ integrals numerically using an enhanced version of
5
+ the adaptive Monte Carlo |vegas| algorithm (G. P. Lepage,
6
+ J. Comput. Phys. 27(1978) 192, and J. Comput. Phys. 439(2021)
7
+ 110386).
8
+
9
+ A |vegas| code generally involves two objects, one representing
10
+ the integrand and the other representing an integration
11
+ operator for a particular multidimensional volume. A typical
12
+ code sequence for a D-dimensional integral has the structure::
13
+
14
+ # create the integrand
15
+ def f(x):
16
+ ... compute the integrand at point x[d] d=0,1...D-1
17
+ ...
18
+
19
+ # create an integrator for volume with
20
+ # xl0 <= x[0] <= xu0, xl1 <= x[1] <= xu1 ...
21
+ integration_region = [[xl0, xu0], [xl1, xu1], ...]
22
+ integrator = vegas.Integrator(integration_region)
23
+
24
+ # do the integral and print out the result
25
+ result = integrator(f, nitn=10, neval=10000)
26
+ print(result)
27
+
28
+ The algorithm iteratively adapts to the integrand over
29
+ ``nitn`` iterations, each of which uses at most ``neval``
30
+ integrand samples to generate a Monte Carlo estimate of
31
+ the integral. The final result is the weighted average
32
+ of the results from all iterations. Increase ``neval``
33
+ to increase the precision of the result. Typically
34
+ ``nitn`` is between 10 and 20. ``neval`` can be
35
+ 1000s to millions, or more, depending upon
36
+ the integrand and the precision desired.
37
+
38
+ The integrator remembers how it adapted to ``f(x)``
39
+ and uses this information as its starting point if it is reapplied
40
+ to ``f(x)`` or applied to some other function ``g(x)``.
41
+ An integrator's state can be archived for future applications
42
+ using Python's :mod:`pickle` module.
43
+
44
+ See the extensive Tutorial in the first section of the |vegas| documentation.
45
+ """
46
+
47
+ # Created by G. Peter Lepage (Cornell University) in 12/2013.
48
+ # Copyright (c) 2013-24 G. Peter Lepage.
49
+ #
50
+ # This program is free software: you can redistribute it and/or modify
51
+ # it under the terms of the GNU General Public License as published by
52
+ # the Free Software Foundation, either version 3 of the License, or
53
+ # any later version (see <http://www.gnu.org/licenses/>).
54
+ #
55
+ # This program is distributed in the hope that it will be useful,
56
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
57
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
58
+ # GNU General Public License for more details.
59
+
60
+ from ._vegas import RAvg, RAvgArray, RAvgDict
61
+ from ._vegas import AdaptiveMap, Integrator, BatchIntegrand
62
+ from ._vegas import reporter, VegasIntegrand, batchintegrand
63
+ from ._vegas import rbatchintegrand, RBatchIntegrand
64
+ from ._vegas import lbatchintegrand, LBatchIntegrand
65
+ from ._vegas import MPIintegrand
66
+ from ._version import __version__
67
+
68
+ # legacy names:
69
+ from ._vegas import vecintegrand, VecIntegrand
70
+
71
+ import gvar as _gvar
72
+ import functools
73
+ import numpy
74
+ import pickle
75
+
76
+ ###############################################
77
+ # PDFEV, etc PDFIntegrator expectation values
78
+
79
+
80
+ class PDFEV(_gvar.GVar):
81
+ r""" Expectation value from |PDFIntegrator|.
82
+
83
+ Expectation values are returned by
84
+ :meth:`vegas.PDFIntegrator.__call__` and
85
+ :class:`vegas.PDFIntegrator.stats`::
86
+
87
+ >>> g = gvar.gvar(['1(1)', '10(10)'])
88
+ >>> g_ev = vegas.PDFIntegrator(g)
89
+ >>> def f(p):
90
+ ... return p[0] * p[1]
91
+ >>> print(g_ev(f))
92
+ 10.051(57)
93
+ >>> print(g_ev.stats(f))
94
+ 10(17)
95
+
96
+ In the first case, the quoted error is the uncertainty
97
+ in the :mod:`vegas` estimate of the mean of ``f(p)``.
98
+ In the second case, the quoted uncertainty is the
99
+ standard deviation evaluated with respect to the
100
+ Gaussian distribution associated with ``g`` (added
101
+ in quadrature to the :mod:`vegas` error, which
102
+ is negligible here).
103
+
104
+ :class:`vegas.PDFEV`\s have the following attributes:
105
+
106
+ Attributes:
107
+
108
+ pdfnorm: Divide PDF by ``pdfnorm`` to normalize it.
109
+
110
+ results (:class:`vegas.RAvgDict`): Results from
111
+ the underlying integrals.
112
+
113
+ In addition, they have all the attributes of the :class:`vegas.RAvgDict`
114
+ (``results``) corresponding to the underlying integrals.
115
+
116
+ A :class:`vegas.PDFEV` returned by
117
+ ``vegas.PDFIntegrator.stats(self, f...)`` has three further attributes:
118
+
119
+ Attributes:
120
+
121
+ stats: An instance of :class:`gvar.PDFStatistics`
122
+ containing statistical information about
123
+ the distribution of ``f(p)``.
124
+
125
+ vegas_mean: |vegas| estimate for the mean value of
126
+ ``f(p)``. The uncertainties in ``vegas_mean``
127
+ are the integration errors from |vegas|.
128
+
129
+ vegas_cov: |vegas| estimate for the covariance matrix
130
+ of ``f(p)``. The uncertainties in ``vegas_cov``
131
+ are the integration errors from |vegas|.
132
+
133
+ vegas_sdev: |vegas| estimate for the standard deviation
134
+ of ``f(p)``. The uncertainties in ``vegas_sdev``
135
+ are the integration errors from |vegas|.
136
+ """
137
+ def __init__(self, results, analyzer=None):
138
+ self.results = pickle.loads(results) if isinstance(results, bytes) else results
139
+ if analyzer is None:
140
+ ans = self.results['f(p)*pdf'] / self.results['pdf']
141
+ super(PDFEV, self).__init__(*ans.internaldata)
142
+ self.analyzer = None
143
+ else:
144
+ ans, extras = analyzer(self.results)
145
+ super(PDFEV, self).__init__(*ans.internaldata)
146
+ for k in extras:
147
+ setattr(self, k, extras[k])
148
+ self.analyzer = analyzer
149
+
150
+ def extend(self, pdfev):
151
+ r""" Merge results from :class:`PDFEV` object ``pdfev`` after results currently in ``self``. """
152
+ self.results.extend(pdfev.results)
153
+
154
+ def __getattr__(self, k):
155
+ if k in ['keys']:
156
+ raise AttributeError('no keys method')
157
+ if k == 'pdfnorm':
158
+ return self.results['pdf']
159
+ return getattr(self.results, k)
160
+
161
+ def _remove_gvars(self, gvlist):
162
+ tmp = PDFEV(results=self.results, analyzer=self.analyzer)
163
+ tmp.results = _gvar.remove_gvars(tmp.results, gvlist)
164
+ tgvar = _gvar.gvar_factory() # small cov matrix
165
+ super(PDFEV, tmp).__init__(*tgvar(0,0).internaldata)
166
+ return tmp
167
+
168
+ def _distribute_gvars(self, gvlist):
169
+ return PDFEV(
170
+ results = _gvar.distribute_gvars(self.results, gvlist),
171
+ analyzer=self.analyzer
172
+ )
173
+
174
+ def __reduce_ex__(self, protocol):
175
+ return (PDFEV, (pickle.dumps(self.results), self.analyzer))
176
+
177
+ class PDFEVArray(numpy.ndarray):
178
+ r""" Array of expectation values from |PDFIntegrator|.
179
+
180
+ Expectation values are returned by
181
+ :meth:`vegas.PDFIntegrator.__call__` and
182
+ :class:`vegas.PDFIntegrator.stats`::
183
+
184
+ >>> g = gvar.gvar(['1(1)', '10(10)'])
185
+ >>> g_ev = vegas.PDFIntegrator(g)
186
+ >>> def f(p):
187
+ ... return [p[0], p[1], p[0] * p[1]]
188
+ >>> print(g_ev(f))
189
+ [0.9992(31) 10.024(29) 10.051(57)]
190
+ >>> print(g_ev.stats(f))
191
+ [1.0(1.0) 10(10) 10(17)]
192
+
193
+ In the first case, the quoted errors are the uncertainties
194
+ in the :mod:`vegas` estimates of the means. In the second
195
+ case, the quoted uncertainties are the standard deviations
196
+ evaluated with respect to the Gaussian distribution
197
+ associated with ``g`` (added in quadrature to the
198
+ :mod:`vegas` errors, which are negligible here).
199
+
200
+ :class:`vegas.PDFEVArray`\s have the following attributes:
201
+
202
+ Attributes:
203
+
204
+ pdfnorm: Divide PDF by ``pdfnorm`` to normalize it.
205
+
206
+ results (:class:`vegas.RAvgDict`): Results from
207
+ the underlying integrals.
208
+
209
+ In addition, they have all the attributes of the :class:`vegas.RAvgDict`
210
+ (``results``) corresponding to the underlying integrals.
211
+
212
+ A :class:`vegas.PDFEVArray` ``s`` returned by
213
+ ``vegas.PDFIntegrator.stats(self, f...)`` has three further
214
+ attributes:
215
+
216
+ Attributes:
217
+
218
+ stats: ``s.stats[i]`` is a :class:`gvar.PDFStatistics`
219
+ object containing statistical information about
220
+ the distribution of ``f(p)[i]``.
221
+
222
+ vegas_mean: |vegas| estimates for the mean values
223
+ of ``f(p)``. The uncertainties in ``vegas_mean``
224
+ are the integration errors from |vegas|.
225
+
226
+ vegas_cov: |vegas| estimate for the covariance matrix
227
+ of ``f(p)``. The uncertainties in ``vegas_cov``
228
+ are the integration errors from |vegas|.
229
+
230
+ vegas_sdev: |vegas| estimate for the standard deviation
231
+ of ``f(p)``. The uncertainties in ``vegas_sdev``
232
+ are the integration errors from |vegas|.
233
+ """
234
+ def __new__(cls, results, analyzer=None):
235
+ results = pickle.loads(results) if isinstance(results, bytes) else results
236
+ if analyzer is None:
237
+ self = numpy.asarray(results['f(p)*pdf'] / results['pdf']).view(cls)
238
+ self.analyzer = None
239
+ else:
240
+ ans, extras = analyzer(results)
241
+ self = numpy.asarray(ans).view(cls)
242
+ for k in extras:
243
+ setattr(self, k, extras[k])
244
+ self.analyzer = analyzer
245
+ self.results = results
246
+ return self
247
+
248
+ def extend(self, pdfev):
249
+ r""" Merge results from :class:`PDFEVArray` object ``pdfev`` after results currently in ``self``. """
250
+ self.results.extend(pdfev.results)
251
+
252
+ def __getattr__(self, k):
253
+ if k in ['keys']:
254
+ raise AttributeError('no keys method')
255
+ if k == 'pdfnorm':
256
+ return self.results['pdf']
257
+ return getattr(self.results, k)
258
+
259
+ def _remove_gvars(self, gvlist):
260
+ tmp = PDFEVArray(results=self.results, analyzer=self.analyzer)
261
+ tmp.results = _gvar.remove_gvars(tmp.results, gvlist)
262
+ tmp.flat[:] = _gvar.remove_gvars(numpy.array(tmp), gvlist)
263
+ return tmp
264
+
265
+ def _distribute_gvars(self, gvlist):
266
+ return PDFEVArray(
267
+ results=_gvar.distribute_gvars(self.results, gvlist), analyzer=self.analyzer
268
+ )
269
+
270
+ def __reduce_ex__(self, protocol):
271
+ return (PDFEVArray, (pickle.dumps(self.results), self.analyzer))
272
+
273
+ class PDFEVDict(_gvar.BufferDict):
274
+ r""" Dictionary of expectation values from |PDFIntegrator|.
275
+
276
+ Expectation values are returned by
277
+ :meth:`vegas.PDFIntegrator.__call__` and
278
+ :class:`vegas.PDFIntegrator.stats`::
279
+
280
+ >>> g = gvar.gvar(['1(1)', '10(10)'])
281
+ >>> g_ev = vegas.PDFIntegrator(g)
282
+ >>> def f(p):
283
+ ... return dict(p=p, prod=p[0] * p[1])
284
+ >>> print(g_ev(f))
285
+ {'p': array([0.9992(31), 10.024(29)], dtype=object), 'prod': 10.051(57)}
286
+ >>> print(g_ev.stats(f))
287
+ {'p': array([1.0(1.0), 10(10)], dtype=object), 'prod': 10(17)}
288
+
289
+ In the first case, the quoted errors are the uncertainties
290
+ in the :mod:`vegas` estimates of the means. In the second
291
+ case, the quoted uncertainties are the standard deviations
292
+ evaluated with respect to the Gaussian distribution
293
+ associated with ``g`` (added in quadrature to the
294
+ :mod:`vegas` errors, which are negligible here).
295
+
296
+ :class:`vegas.PDFEVDict` objects have the following attributes:
297
+
298
+ Attributes:
299
+
300
+ pdfnorm: Divide PDF by ``pdfnorm`` to normalize it.
301
+
302
+ results (:class:`vegas.RAvgDict`): Results from
303
+ the underlying integrals.
304
+
305
+ In addition, they have all the attributes of the :class:`vegas.RAvgDict`
306
+ (``results``) corresponding to the underlying integrals.
307
+
308
+ A :class:`vegas.PDFEVDict` object ``s`` returned by
309
+ :meth:`vegas.PDFIntegrator.stats` has three further attributes:
310
+
311
+ Attributes:
312
+
313
+ stats: ``s.stats[k]`` is a :class:`gvar.PDFStatistics`
314
+ object containing statistical information about
315
+ the distribution of ``f(p)[k]``.
316
+
317
+ vegas_mean: |vegas| estimates for the mean values
318
+ of ``f(p)``. The uncertainties in ``vegas_mean``
319
+ are the integration errors from |vegas|.
320
+
321
+ vegas_cov: |vegas| estimate for the covariance matrix
322
+ of ``f(p)``. The uncertainties in ``vegas_cov``
323
+ are the integration errors from |vegas|.
324
+
325
+ vegas_sdev: |vegas| estimate for the standard deviation
326
+ of ``f(p)``. The uncertainties in ``vegas_sdev``
327
+ are the integration errors from |vegas|.
328
+ """
329
+ def __init__(self, results, analyzer=None):
330
+ super(PDFEVDict, self).__init__()
331
+ self.results = pickle.loads(results) if isinstance(results, bytes) else results
332
+ if analyzer is None:
333
+ for k in self.results:
334
+ if k == 'pdf':
335
+ continue
336
+ self[k[1]] = self.results[k]
337
+ self.buf[:] /= self.results['pdf']
338
+ self.analyzer = None
339
+ else:
340
+ ans, extras = analyzer(self.results)
341
+ for k in extras:
342
+ setattr(self, k, extras[k])
343
+ for k in ans:
344
+ self[k] = ans[k]
345
+ self.analyzer = analyzer
346
+
347
+ def extend(self, pdfev):
348
+ r""" Merge results from :class:`PDFEVDict` object ``pdfev`` after results currently in ``self``. """
349
+ self.results.extend(pdfev.results)
350
+
351
+ def _remove_gvars(self, gvlist):
352
+ tmp = PDFEVDict(results=self.results, analyzer=self.analyzer)
353
+ tmp.results = _gvar.remove_gvars(tmp.results, gvlist)
354
+ tmp._buf = _gvar.remove_gvars(tmp.buf, gvlist)
355
+ return tmp
356
+
357
+ def _distribute_gvars(self, gvlist):
358
+ return PDFEVDict(
359
+ results=_gvar.distribute_gvars(self.results, gvlist),
360
+ analyzer=self.analyzer
361
+ )
362
+
363
+ def __getattr__(self, k):
364
+ if k == 'pdfnorm':
365
+ return self.results['pdf']
366
+ return getattr(self.results, k)
367
+
368
+ def __reduce_ex__(self, protocol):
369
+ pickle.dumps(self.results)
370
+ return (PDFEVDict, (pickle.dumps(self.results), self.analyzer))
371
+
372
+ class PDFIntegrator(Integrator):
373
+ r""" :mod:`vegas` integrator for PDF expectation values.
374
+
375
+ ``PDFIntegrator(param, pdf)`` creates a |vegas| integrator that
376
+ evaluates expectation values of arbitrary functions ``f(p)`` with
377
+ respect to the probability density function ``pdf(p)``, where
378
+ ``p`` is a point in the parameter space defined by ``param``.
379
+
380
+ ``param`` is a collection of :class:`gvar.GVar`\s (Gaussian random
381
+ variables) that together define a multi-dimensional Gaussian
382
+ distribution with the same parameter space as the distribution
383
+ described by ``pdf(p)``. ``PDFIntegrator`` internally
384
+ re-expresses the integrals over these parameters in terms
385
+ of new variables that emphasize the region defined by
386
+ ``param`` (i.e., the region where the PDF associated with
387
+ the ``param``'s Gaussian distribution is large).
388
+ The new variables are also aligned with the principal axes
389
+ of ``param``'s correlation matrix, to facilitate integration.
390
+
391
+ ``param``'s means and covariances are chosen to emphasize the
392
+ important regions of the ``pdf``'s distribution (e.g., ``param``
393
+ might be set equal to the prior in a Bayesian analysis).
394
+ ``param`` is used to define and optimize the integration variables;
395
+ it does not affect the values of the integrals but can have a big
396
+ effect on the accuracy.
397
+
398
+ The Gaussian PDF associated with ``param`` is used if
399
+ ``pdf`` is unspecified (i.e., ``pdf=None``, which is the default).
400
+
401
+ Typical usage is illustrated by the following code, where
402
+ dictionary ``g`` specifies both the parameterization (``param``)
403
+ and the PDF::
404
+
405
+ import vegas
406
+ import gvar as gv
407
+ import numpy as np
408
+
409
+ g = gv.BufferDict()
410
+ g['a'] = gv.gvar([10., 2.], [[1, 1.4], [1.4, 2]])
411
+ g['fb(b)'] = gv.BufferDict.uniform('fb', 2.9, 3.1)
412
+
413
+ g_ev = vegas.PDFIntegrator(g)
414
+
415
+ def f(p):
416
+ a = p['a']
417
+ b = p['b']
418
+ return a[0] + np.fabs(a[1]) ** b
419
+
420
+ result = g_ev(f, neval=10_000, nitn=5)
421
+ print('<f(p)> =', result)
422
+
423
+ Here ``g`` indicates a three-dimensional distribution
424
+ where the first two variables ``g['a'][0]`` and ``g['a'][1]``
425
+ are Gaussian with means 10 and 2, respectively, and covariance
426
+ matrix [[1, 1.4], [1.4, 2.]]. The last variable ``g['b']`` is
427
+ uniformly distributed on interval [2.9, 3.1]. The result
428
+ is: ``<f(p)> = 30.145(83)``.
429
+
430
+ ``PDFIntegrator`` evaluates integrals of both ``f(p) * pdf(p)``
431
+ and ``pdf(p)``. The expectation value of ``f(p)`` is the ratio
432
+ of these two integrals (so ``pdf(p)`` need not be normalized).
433
+ The result of a ``PDFIntegrator`` integration
434
+ has an extra attribute, ``result.pdfnorm``, which is the
435
+ |vegas| estimate of the integral over the PDF.
436
+
437
+ Args:
438
+ param : A |GVar|, array of |GVar|\s, or dictionary, whose values
439
+ are |GVar|\s or arrays of |GVar|\s, that specifies the
440
+ integration parameters. When parameter ``pdf=None``, the
441
+ PDF is set equal to the Gaussian distribution corresponding
442
+ to ``param``.
443
+
444
+ pdf: The probability density function ``pdf(p)``.
445
+ The PDF's parameters ``p`` have the same layout
446
+ as ``param`` (arrays or dictionary), with the same
447
+ keys and/or shapes. The Gaussian PDF associated with
448
+ ``param`` is used when ``pdf=None`` (default).
449
+ Note that PDFs need not be normalized.
450
+
451
+ adapt_to_pdf (bool): :mod:`vegas` adapts to the PDF
452
+ when ``adapt_to_pdf=True`` (default). :mod:`vegas` adapts
453
+ to ``pdf(p) * f(p)`` when calculating the expectation
454
+ value of ``f(p)`` if ``adapt_to_pdf=False``.
455
+
456
+ limit (positive float): Integration variables are determined from
457
+ ``param``. ``limit`` limits the range of each variable to
458
+ a region of size ``limit`` times the standard deviation on
459
+ either side of the mean, where means and standard deviations
460
+ are specified by ``param``. This can be useful if the
461
+ functions being integrated misbehave for large parameter
462
+ values (e.g., ``numpy.exp`` overflows for a large range of
463
+ arguments). Default is ``limit=100``; results should become
464
+ independent of ``limit`` as it is increased.
465
+
466
+ scale (positive float): The integration variables are
467
+ rescaled to emphasize parameter values of order
468
+ ``scale`` times the standard deviation measured from
469
+ the mean, where means and standard deviations are
470
+ specified by ``param``. The rescaling
471
+ does not change the value of the integral but it
472
+ can reduce uncertainties in the :mod:`vegas` estimate.
473
+ Default is ``scale=1.0``.
474
+
475
+ svdcut (non-negative float or None): If not ``None``, replace
476
+ correlation matrix of ``param`` with a new matrix whose
477
+ small eigenvalues are modified: eigenvalues smaller than
478
+ ``svdcut`` times the maximum eigenvalue ``eig_max`` are
479
+ replaced by ``svdcut*eig_max``. This can ameliorate
480
+ problems caused by roundoff errors when inverting the
481
+ covariance matrix. It increases the uncertainty associated
482
+ with the modified eigenvalues and so is conservative.
483
+ Setting ``svdcut=None`` or ``svdcut=0`` leaves the
484
+ covariance matrix unchanged. Default is ``svdcut=1e-12``.
485
+
486
+ All other keyword parameters are passed on to the the underlying
487
+ :class:`vegas.Integrator`; the ``uses_jac`` keyword is ignored.
488
+ """
489
+ def __init__(self, param, pdf=None, adapt_to_pdf=True, limit=100., scale=1., svdcut=1e-15, **kargs):
490
+ if 'g' in kargs and param is None:
491
+ # for legacy code
492
+ param = kargs['g']
493
+ del kargs['g']
494
+ if param is None:
495
+ raise ValueError('param must be specified')
496
+ if isinstance(param, PDFIntegrator):
497
+ super(PDFIntegrator, self).__init__(param)
498
+ for k in ['param_pdf', 'param_sample', 'pdf', 'adapt_to_pdf', 'limit', 'scale']:
499
+ setattr(self, k, getattr(param, k))
500
+ return
501
+ elif isinstance(param, _gvar.PDF):
502
+ self.param_pdf = param
503
+ else:
504
+ self.param_pdf = _gvar.PDF(param, svdcut=svdcut)
505
+ self.param_sample = self.param_pdf.sample(mode=None)
506
+ self.limit = abs(limit)
507
+ self.scale = abs(scale)
508
+ self.set(adapt_to_pdf=adapt_to_pdf, pdf=pdf)
509
+ integ_map = self._make_map(self.limit / self.scale)
510
+ if kargs and 'uses_jac' in kargs:
511
+ kargs = dict(kargs)
512
+ del kargs['uses_jac']
513
+ super(PDFIntegrator, self).__init__(
514
+ AdaptiveMap(self.param_pdf.size * [integ_map]), **kargs
515
+ )
516
+ if getattr(self, 'mpi') and getattr(self, 'sync_ran'):
517
+ # needed because of the Monte Carlo in _make_map()
518
+ Integrator.synchronize_random() # for mpi only
519
+
520
+ def __reduce__(self):
521
+ kargs = dict()
522
+ for k in Integrator.defaults:
523
+ if Integrator.defaults[k] != getattr(self, k) and k != 'uses_jac':
524
+ kargs[k] = getattr(self, k)
525
+ kargs['sigf'] = numpy.array(self.sigf)
526
+ return (
527
+ PDFIntegrator,
528
+ (self.param_pdf, self.pdf, self.adapt_to_pdf, self.limit, self.scale),
529
+ kargs,
530
+ )
531
+
532
+ def __setstate__(self, kargs):
533
+ self.set(**kargs)
534
+
535
+ def set(self, ka={}, **kargs):
536
+ r""" Reset default parameters in integrator.
537
+
538
+ Usage is analogous to the constructor
539
+ for :class:`PDFIntegrator`: for example, ::
540
+
541
+ old_defaults = pdf_itg.set(neval=1e6, nitn=20)
542
+
543
+ resets the default values for ``neval`` and ``nitn``
544
+ in :class:`PDFIntegrator` ``pdf_itg``. A dictionary,
545
+ here ``old_defaults``, is returned. It can be used
546
+ to restore the old defaults using, for example::
547
+
548
+ pdf_itg.set(old_defaults)
549
+ """
550
+ if kargs:
551
+ kargs.update(ka)
552
+ else:
553
+ kargs = ka
554
+ old_defaults = {}
555
+ if 'param' in kargs:
556
+ raise ValueError("Can't reset param.")
557
+ if 'pdf' in kargs:
558
+ if hasattr(self, 'pdf'):
559
+ old_defaults['pdf'] = self.pdf
560
+ pdf = kargs['pdf']
561
+ self.pdf = (
562
+ pdf if pdf is None else
563
+ self._make_std_integrand(pdf, xsample=self.param_sample)
564
+ )
565
+ del kargs['pdf']
566
+ if 'adapt_to_pdf' in kargs:
567
+ if hasattr(self, 'adapt_to_pdf'):
568
+ old_defaults['adapt_to_pdf'] = self.adapt_to_pdf
569
+ self.adapt_to_pdf = kargs['adapt_to_pdf']
570
+ del kargs['adapt_to_pdf']
571
+ if kargs:
572
+ old_defaults.update(super(PDFIntegrator, self).set(kargs))
573
+ return old_defaults
574
+
575
+ def _make_map(self, limit):
576
+ r""" Make vegas grid that is adapted to the pdf. """
577
+ ny = 2000
578
+ y = _gvar.RNG.random((ny,1))
579
+ limit = numpy.arctan(limit)
580
+ m = AdaptiveMap([[-limit, limit]], ninc=100)
581
+ theta = numpy.empty(y.shape, float)
582
+ jac = numpy.empty(y.shape[0], float)
583
+ for itn in range(10):
584
+ m.map(y, theta, jac)
585
+ tan_theta = numpy.tan(theta[:, 0])
586
+ x = self.scale * tan_theta
587
+ fx = (tan_theta ** 2 + 1) * numpy.exp(-(x ** 2) / 2.)
588
+ m.add_training_data(y, (jac * fx) ** 2)
589
+ m.adapt(alpha=1.5)
590
+ return numpy.array(m.grid[0])
591
+
592
+ @staticmethod
593
+ def _f_lbatch(theta, f, param_pdf, pdf, scale, adapt_to_pdf):
594
+ r""" Integrand for PDFIntegrator.
595
+
596
+ N.B. Static method is more efficient because less to carry around
597
+ (eg, when nproc>1).
598
+ N.B. ``f`` has been converted to a ``VegasIntegrand`` object (as has
599
+ ``self.pdf`` if it is defined externally.
600
+ """
601
+ tan_theta = numpy.tan(theta)
602
+ chiv = scale * tan_theta
603
+ dp_dtheta = numpy.prod(scale * (tan_theta ** 2 + 1.), axis=1) * param_pdf.dp_dchiv
604
+ p = param_pdf.pflat(chiv, mode='lbatch')
605
+ if pdf is None:
606
+ # normalized in chiv space so don't want param_pdf.dp_dchiv in jac
607
+ pdf = numpy.prod(numpy.exp(-(chiv ** 2) / 2.) / numpy.sqrt(2 * numpy.pi), axis=1) / param_pdf.dp_dchiv
608
+ else:
609
+ pdf = numpy.prod(pdf.eval(p, jac=None), axis=1)
610
+ if f is None:
611
+ ans = _gvar.BufferDict(pdf=dp_dtheta * pdf)
612
+ return ans
613
+ fp = dp_dtheta * pdf if f is None else f.format_evalx(f.eval(p))
614
+ ans = _gvar.BufferDict()
615
+ if hasattr(fp, 'keys'):
616
+ ans['pdf'] = dp_dtheta * pdf
617
+ for k in fp:
618
+ shape = numpy.shape(fp[k])
619
+ ans[('f(p)*pdf', k)] = fp[k] * ans['pdf'].reshape(shape[:1] + len(shape[1:]) * (1,))
620
+ else:
621
+ fp = numpy.asarray(fp)
622
+ ans['pdf'] = dp_dtheta * pdf
623
+ shape = fp.shape
624
+ fp *= ans['pdf'].reshape(shape[:1] + len(shape[1:]) * (1,))
625
+ ans['f(p)*pdf'] = fp
626
+ if not adapt_to_pdf:
627
+ ans_pdf = ans.pop('pdf')
628
+ ans['pdf'] = ans_pdf
629
+ return ans
630
+
631
+ def __call__(self, f=None, save=None, saveall=None, **kargs):
632
+ r""" Estimate expectation value of function ``f(p)``.
633
+
634
+ Uses module :mod:`vegas` to estimate the integral of
635
+ ``f(p)`` multiplied by the probability density function
636
+ associated with ``g`` (i.e., ``pdf(p)``). At the same
637
+ time it integrates the PDF. The ratio of the two integrals
638
+ is the expectation value.
639
+
640
+ Args:
641
+ f (function): Function ``f(p)`` to integrate. Integral is
642
+ the expectation value of the function with respect
643
+ to the distribution. The function can return a number,
644
+ an array of numbers, or a dictionary whose values are
645
+ numbers or arrays of numbers. Setting ``f=None`` means
646
+ that only the PDF is integrated. Integrals can be
647
+ substantially faster if ``f(p)`` (and ``pdf(p)`` if set)
648
+ are batch functions (see :mod:`vegas` documentation).
649
+
650
+ pdf: If specified, ``pdf(p)`` is used as the probability
651
+ density function rather than the Gaussian PDF
652
+ associated with ``g``. The Gaussian PDF is used if
653
+ ``pdf=None`` (default). Note that PDFs need not
654
+ be normalized.
655
+
656
+ adapt_to_pdf (bool): :mod:`vegas` adapts to the PDF
657
+ when ``adapt_to_pdf=True`` (default). :mod:`vegas` adapts
658
+ to ``pdf(p) * f(p)`` if ``adapt_to_pdf=False``.
659
+
660
+ save (str or file or None): Writes ``results`` into pickle
661
+ file specified by ``save`` at the end of each iteration.
662
+ For example, setting ``save='results.pkl'`` means that
663
+ the results returned by the last vegas iteration can be
664
+ reconstructed later using::
665
+
666
+ import pickle
667
+ with open('results.pkl', 'rb') as ifile:
668
+ results = pickle.load(ifile)
669
+
670
+ Ignored if ``save=None`` (default).
671
+
672
+ saveall (str or file or None): Writes ``(results, integrator)``
673
+ into pickle file specified by ``saveall`` at the end of
674
+ each iteration. For example, setting ``saveall='allresults.pkl'``
675
+ means that the results returned by the last vegas iteration,
676
+ together with a clone of the (adapted) integrator, can be
677
+ reconstructed later using::
678
+
679
+ import pickle
680
+ with open('allresults.pkl', 'rb') as ifile:
681
+ results, integrator = pickle.load(ifile)
682
+
683
+ Ignored if ``saveall=None`` (default).
684
+
685
+ All other keyword arguments are passed on to a :mod:`vegas`
686
+ integrator; see the :mod:`vegas` documentation for further information.
687
+
688
+ Returns:
689
+ Expectation value(s) of ``f(p)`` as object of type
690
+ :class:`vegas.PDFEV`, :class:`vegas.PDFEVArray`, or
691
+ :class:`vegas.PDFEVDict`.
692
+ """
693
+ if kargs and 'uses_jac' in kargs:
694
+ kargs = dict(kargs)
695
+ del kargs['uses_jac']
696
+ if kargs:
697
+ self.set(kargs)
698
+ if save is not None or saveall is not None:
699
+ self.set(analyzer=PDFAnalyzer(self, analyzer=self.analyzer, save=save, saveall=saveall))
700
+ if f is not None:
701
+ f = self._make_std_integrand(f, self.param_sample)
702
+ integrand = lbatchintegrand(functools.partial(
703
+ PDFIntegrator._f_lbatch, f=f, param_pdf=self.param_pdf,
704
+ pdf=self.pdf, scale=self.scale, adapt_to_pdf=self.adapt_to_pdf,
705
+ ))
706
+ results = super(PDFIntegrator, self).__call__(integrand)
707
+ if results['pdf'] == 0:
708
+ raise RuntimeError('Integral of PDF vanishes; increase neval?')
709
+ if f is None:
710
+ ans = results
711
+ ans.pdfnorm = results['pdf']
712
+ else:
713
+ ans = PDFIntegrator._make_ans(results)
714
+ if isinstance(self.analyzer, PDFAnalyzer):
715
+ self.set(analyzer=self.analyzer.analyzer)
716
+ return ans
717
+
718
+ @staticmethod
719
+ def _make_ans(results):
720
+ if 'f(p)*pdf' not in results:
721
+ ans = PDFEVDict(results)
722
+ elif numpy.ndim(results['f(p)*pdf']) == 0:
723
+ ans = PDFEV(results)
724
+ else:
725
+ ans = PDFEVArray(results)
726
+ return ans
727
+
728
+ def stats(self, f=None, moments=False, histograms=False, **kargs):
729
+ r""" Statistical analysis of function ``f(p)``.
730
+
731
+ Uses the :mod:`vegas` integrator to evaluate the expectation
732
+ values and (co)variances of ``f(p)`` with
733
+ respect to the probability density function associated
734
+ with the :class:`PDFIntegrator`. Typical usage
735
+ is illustrated by::
736
+
737
+ >>> import gvar as gv
738
+ >>> import vegas
739
+ >>> g = gv.gvar(dict(a='1.0(5)', b='2(1)')) * gv.gvar('1.0(5)')
740
+ >>> g_ev = vegas.PDFIntegrator(g)
741
+ >>> g_ev(neval=10_000) # adapt the integrator to the PDF
742
+ >>> @vegas.rbatchintegrand
743
+ ... def f(p):
744
+ ... fp = dict(a=p['a'], b=p['b'])
745
+ ... fp['a**2 * b'] = p['a']**2 * p['b']
746
+ ... return fp
747
+ >>> r = g_ev.stats(f)
748
+ >>> print(r)
749
+ {'a': 1.00(71), 'b': 2.0(1.4), 'a**2 * b': 4.0(6.1)}
750
+ >>> print(r.vegas_mean['a**2 * b'])
751
+ 3.9972(30)
752
+ >>> print(r.vegas_cov['a**2 * b', 'a**2 * b'] ** 0.5)
753
+ 6.073(13)
754
+
755
+ ``g_ev.stats(f)`` returns a dictionary of |GVar|\s whose
756
+ means and (co)variances are calculated from integrals of
757
+ ``f(p) * pdf(p)`` and ``f(p)**2 * pdf(p)``, where ``pdf(p)``
758
+ is the probability density function associated with ``g``.
759
+ The means and standard deviations for each component of ``f(p)``
760
+ are displayed by ``print(r)``. The values for the means
761
+ and standard deviations have uncertainties coming from the
762
+ integrations (|vegas| errors) but these are negligible compared
763
+ to the standard deviations. (The last two
764
+ print statements show the |vegas| results for the
765
+ mean and standard deviation in ``r['a**2 * b']``: 3.9972(30)
766
+ and 6.073(13), respectively.)
767
+
768
+ Th Gaussian approximation for the expectation value of
769
+ ``f(p)`` is given by ::
770
+
771
+ >>> print(f(g))
772
+ {'a': 1.00(71), 'b': 2.0(1.4), 'a**2 * b': 2.0(3.7)}
773
+
774
+ Results for ``a`` and ``b`` agree with the results from
775
+ ``g_ev.stats(f)``, as expected since the distributions
776
+ for these quantities are (obviously) Gaussian. Results
777
+ for ``a**2 * b``, however, are quite different, indicating
778
+ a distribution that is not Gaussian.
779
+
780
+ Additional statistical data are collected by setting keywords
781
+ ``moments=True`` and/or ``histogram=True``::
782
+
783
+ >>> r = g_ev.stats(f, moments=True, histograms=True)
784
+ >>> for k in r:
785
+ ... print(10 * '-', k)
786
+ ... print(r.stats[k])
787
+ ---------- a
788
+ mean = 0.99972(23) sdev = 0.70707(29) skew = -0.0036(20) ex_kurt = -0.0079(49)
789
+ split-normal: 1.0013(14) +/- 0.70862(97)/0.71091(98)
790
+ median: 0.99927(62) +/- 0.7077(10)/0.7063(10)
791
+ ---------- b
792
+ mean = 1.99954(47) sdev = 1.41424(72) skew = -0.0041(28) ex_kurt = -0.0074(65)
793
+ split-normal: 2.0042(33) +/- 1.4162(23)/1.4224(24)
794
+ median: 1.9977(11) +/- 1.4162(18)/1.4115(19)
795
+ ---------- a**2 * b
796
+ mean = 3.9957(29) sdev = 6.054(12) skew = 3.048(22) ex_kurt = 14.52(35)
797
+ split-normal: -0.4891(25) +/- 6.9578(88)/0.519(10)
798
+ median: 1.7447(24) +/- 6.284(12)/2.0693(26)
799
+
800
+ where the uncertainties are all |vegas| errors. Here the
801
+ integrator was used to calculate the first four moments
802
+ of the distributions for each component of ``f(p)``, from
803
+ which the mean, standard deviation, skewness, and excess
804
+ kurtosis of those distributions are calculated. As expected
805
+ the first two distribuitons here are clearly Gaussian,
806
+ but the distribution for ``a**2 * b`` is not.
807
+
808
+ The integrator also calculates histograms
809
+ for each of the distributions and fits them to two
810
+ different two-sided Gaussians: one is a continuous split-normal
811
+ distribution, and the other is centered on the median of the
812
+ distribution and is discontinuous there. (For more information
813
+ see the documentation for :class:`gvar.PDFStatistics`.)
814
+ Both models suggest large asymmetries in the distribution
815
+ for ``a**2 * b``. The histogram for this distribution can
816
+ be displayed using::
817
+
818
+ >>> r.stats['a**2 * b'].plot_histogram(show=True)
819
+
820
+ Note that |vegas| adaptation is turned off (``adapt=False``)
821
+ by default in :meth:`PDFIntegrator.stats`. This setting
822
+ can be overridden by setting the ``adapt`` parameter
823
+ explicitly, but this is not recommended.
824
+
825
+ Args:
826
+ f (callable): Statistics are calculated for the
827
+ components of the output from function ``f(p)``,
828
+ where ``p`` is a point drawn from the distribution
829
+ specified by the ``param`` or ``pdf`` associated with the
830
+ :class:`PDFIntegrator`. Parameters ``p`` have
831
+ the same structure as ``param`` (i.e., array or
832
+ dictionary). If ``f=None``, it is replaced by
833
+ ``f=lbatchintegrand(lambda p:p)``.
834
+
835
+ moments (bool): If ``True``, moments are calculated so
836
+ that the skewness and excess kurtosis can be determined.
837
+
838
+ histograms (bool or dict): Setting ``histograms=True``
839
+ causes histograms to be calculated for the
840
+ distributions associated with each component of
841
+ the output from ``f(p)``. Alternatively, ``histograms``
842
+ can be set equal to a dictionary to specify the
843
+ the width ``binwidth`` of each histogram bin, the total
844
+ number ``nbin`` of bins, and/or the location ``loc``
845
+ of each histogram: for example, ::
846
+
847
+ histograms=dict(
848
+ binwidth=0.5, nbin=12,
849
+ loc=gv.gvar({
850
+ 'a': '1.0(5)', 'b': '2(1)',
851
+ 'a**2 * b': '2.5(2.7)'
852
+ }),
853
+ )
854
+
855
+ where ``loc`` specifies the location of the center of the histogram
856
+ for each output quantity (e.g., ``loc['a'].mean``) and the width of
857
+ the bins (e.g., ``binwidth * loc['a'].sdev``). If ``loc`` is not
858
+ specified explicitly, it is determined from a simulation using
859
+ values drawn from the Gaussian distribution for ``self.g``
860
+ (or from the distribution described by ``self.pdf`` if it is specified).
861
+
862
+ kargs (dict): Additional keywords passed on to the
863
+ integrator.
864
+
865
+ Returns:
866
+ Expectation value(s) of ``f(p)`` as an object of type
867
+ :class:`vegas.PDFEV`, :class:`vegas.PDFEVArray`,
868
+ or :class:`vegas.PDFEVDict`.
869
+ """
870
+ oldsettings = {}
871
+ if 'adapt' not in kargs:
872
+ oldsettings['adapt'] = self.adapt
873
+ kargs['adapt'] = False
874
+
875
+ if f is None:
876
+ if self.param_sample.shape is None and not hasattr(self, 'extrakeys'):
877
+ self.extrakeys = []
878
+ for k in self.param_sample.all_keys():
879
+ if k not in self.param_sample:
880
+ self.extrakeys.append(k)
881
+ else:
882
+ self.extrakeys = None
883
+ f = lbatchintegrand(functools.partial(
884
+ PDFIntegrator.default_stats_f, jac=None, extrakeys=self.extrakeys
885
+ ))
886
+ f = self._make_std_integrand(f, xsample=self.param_sample)
887
+ fpsample = f(self.param_sample)
888
+
889
+ if histograms is not False:
890
+ if histograms is True:
891
+ histograms = {}
892
+ nbin = histograms.get('nbin', 12)
893
+ binwidth = histograms.get('binwidth', 0.5)
894
+ histograms['nbin'] = nbin
895
+ histograms['binwidth'] = binwidth
896
+ loc = histograms.get('loc', None)
897
+ # bins = histograms.get('bins', None)
898
+ if loc is not None:
899
+ if hasattr(loc, 'keys'):
900
+ loc = _gvar.asbufferdict(loc).flat[:]
901
+ else:
902
+ loc = numpy.asarray(loc).flat[:]
903
+ mean = _gvar.mean(loc)
904
+ sdev = _gvar.sdev(loc)
905
+ else:
906
+ @lbatchintegrand
907
+ def ff2(p):
908
+ if hasattr(p, 'keys'):
909
+ p = p.lbatch_buf
910
+ else:
911
+ p = p.reshape(p.shape[0], -1)
912
+ fp = f.eval(p)
913
+ return dict(f=fp, f2=fp ** 2)
914
+ oldnitn = self.nitn
915
+ r = self(ff2, nitn=1)
916
+ self.set(nitn=oldnitn)
917
+ mean = _gvar.mean(r['f'])
918
+ sdev = numpy.fabs(_gvar.mean(r['f2']) - mean * mean) ** 0.5
919
+ bins = []
920
+ halfwidth = nbin / 2 * binwidth
921
+ for i in range(mean.shape[0]):
922
+ bins.append(
923
+ mean[i] + numpy.linspace(-halfwidth * sdev[i], halfwidth * sdev[i], nbin+1)
924
+ )
925
+ histograms['bins'] = numpy.array(bins)
926
+ integrand = lbatchintegrand(functools.partial(
927
+ PDFIntegrator._stats_integrand, f=f, moments=moments, histograms=histograms
928
+ ))
929
+ integrand = self._make_std_integrand(integrand, xsample=self.param_sample.flat[:])
930
+ results = self(integrand, **kargs)
931
+ analyzer = functools.partial(
932
+ PDFIntegrator._stats_analyzer,
933
+ fpsample=fpsample, moments=moments, histograms=histograms
934
+ )
935
+ if fpsample.shape is None:
936
+ ans = PDFEVDict(results.results, analyzer)
937
+ elif fpsample.shape == ():
938
+ ans = PDFEV(results.results, analyzer)
939
+ else:
940
+ ans = PDFEVArray(results.results, analyzer)
941
+ if oldsettings:
942
+ self.set(**oldsettings)
943
+ return ans
944
+
945
+ @staticmethod
946
+ def _stats_analyzer(results, fpsample, moments, histograms):
947
+ r""" Create final stats results from Integrator results """
948
+ # convert from Integrator to PDFIntegrator results
949
+ tmp = _gvar.BufferDict()
950
+ for k in results:
951
+ if k == 'pdf':
952
+ continue
953
+ tmp[k[1]] = results[k]
954
+ results = tmp / results['pdf']
955
+
956
+ # 1) mean/cov
957
+ fp = results['fp']
958
+ meanfp = _gvar.mean(fp)
959
+ covfpfp = numpy.zeros(2 * meanfp.shape, dtype=object)
960
+ fp2 = numpy.array(len(meanfp) * [None])
961
+ fpfp = iter(results['fpfp'])
962
+ for i in range(meanfp.shape[0]):
963
+ for j in range(i + 1):
964
+ if i == j:
965
+ fp2[i] = next(fpfp)
966
+ covfpfp[i, i] = fp2[i] - fp[i] ** 2
967
+ else:
968
+ covfpfp[i, j] = covfpfp[j, i] = next(fpfp) - fp[i] * fp[j]
969
+ # add vegas errors to cov and create final result
970
+ covfpfp += _gvar.evalcov(fp)
971
+ ans = _gvar.gvar(meanfp, _gvar.mean(covfpfp))
972
+ if fpsample.shape is None:
973
+ ans = _gvar.BufferDict(fpsample, buf=ans)
974
+ mean = _gvar.BufferDict(fpsample, buf=fp)
975
+ tcov = _gvar.evalcov(mean)
976
+ cov = _gvar.BufferDict()
977
+ sdev= _gvar.BufferDict()
978
+ for k in mean:
979
+ ksl = mean.slice(k)
980
+ for l in mean:
981
+ lsl = mean.slice(l)
982
+ if tcov[k,l].shape == (1, 1) or tcov[k,l].shape == ():
983
+ cov[k, l] = covfpfp[ksl, lsl]
984
+ else:
985
+ cov[k, l] = covfpfp[ksl,lsl].reshape(tcov[k,l].shape)
986
+ sdev[k] = _gvar.fabs(cov[k, k]) ** 0.5
987
+ elif fpsample.shape == ():
988
+ ans = ans.flat[0]
989
+ mean = fp.flat[0]
990
+ cov = covfpfp
991
+ sdev = _gvar.fabs(cov) ** 0.5
992
+ else:
993
+ ans = ans.reshape(fpsample.shape)
994
+ mean = fp.reshape(fpsample.shape)
995
+ tcov = _gvar.evalcov(mean)
996
+ cov = covfpfp.reshape(tcov.shape)
997
+ sdev = _gvar.fabs(tcov.diagonal()).reshape(mean.shape) ** 0.5
998
+ # 2) moments and histogram
999
+ stats = numpy.array(fpsample.size * [None])
1000
+ for i in range(len(stats)):
1001
+ if moments:
1002
+ mom = [fp[i], fp2[i], results['fp**3'][i], results['fp**4'][i]]
1003
+ else:
1004
+ mom = [fp[i], fp2[i]]
1005
+ if histograms:
1006
+ hist = histograms['bins'][i], results['count'][i]
1007
+ else:
1008
+ hist = None
1009
+ stats[i] = _gvar.PDFStatistics(moments=mom, histogram=hist)
1010
+ if fpsample.shape is None:
1011
+ stats = _gvar.BufferDict(ans, buf=stats)
1012
+ elif fpsample.shape == ():
1013
+ stats = stats.flat[0]
1014
+ else:
1015
+ stats = stats.reshape(ans.shape)
1016
+ return ans, dict(stats=stats, vegas_mean=mean, vegas_cov=cov, vegas_sdev=sdev)
1017
+
1018
+ @staticmethod
1019
+ def _stats_integrand(p, f, moments=False, histograms=False):
1020
+ fp = f.eval(p)
1021
+ nfp = fp.shape[1]
1022
+ nbatch = fp.shape[0]
1023
+ fpfp = []
1024
+ for i in range(fp.shape[1]):
1025
+ for j in range(i + 1):
1026
+ fpfp.append(fp[:, i] * fp[:, j])
1027
+ fpfp = numpy.array(fpfp).T
1028
+ ans = _gvar.BufferDict(fp=fp, fpfp=fpfp)
1029
+ if moments:
1030
+ ans['fp**3'] = fp ** 3
1031
+ ans['fp**4'] = fp ** 4
1032
+ if histograms:
1033
+ count = numpy.zeros((nbatch, nfp, histograms['nbin'] + 2), dtype=float)
1034
+ idx = numpy.arange(nbatch)
1035
+ for j in range(nfp):
1036
+ jdx = numpy.searchsorted(histograms['bins'][j], fp[:, j], side='right')
1037
+ count[idx, j, jdx] = 1
1038
+ ans['count'] = count
1039
+ return ans
1040
+
1041
+ @staticmethod
1042
+ def default_stats_f(p, jac=None, extrakeys=None):
1043
+ if extrakeys is not None and extrakeys:
1044
+ for k in extrakeys:
1045
+ p[k] = p[k]
1046
+ return p
1047
+
1048
+ def sample(self, nbatch, mode='rbatch'):
1049
+ r""" Generate random samples from the integrator's PDF.
1050
+
1051
+ Typical usage is::
1052
+
1053
+ import gvar as gv
1054
+ import numpy as np
1055
+ import vegas
1056
+
1057
+ @vegas.rbatchintegrand
1058
+ def g_pdf(p):
1059
+ ans = 0
1060
+ h = 1.
1061
+ for p0 in [0.3, 0.6]:
1062
+ ans += h * np.exp(-np.sum((p-p0)**2, axis=0)/2/.01)
1063
+ h /= 2
1064
+ return ans
1065
+
1066
+ g_param = gv.gvar([0.5, 0.5], [[.25, .2], [.2, .25]])
1067
+ g_ev = vegas.PDFIntegrator(param=g_param, pdf=g_pdf)
1068
+
1069
+ # adapt integrator to g_pdf(p) and evaluate <p>
1070
+ g_ev(neval=4000, nitn=10)
1071
+ r = g_ev.stats()
1072
+ print('<p> =', r, '(vegas)')
1073
+
1074
+ # sample g_pdf(p)
1075
+ wgts, p_samples = g_ev.sample(nbatch=40_000)
1076
+ # evaluate mean values <p> and <cos(p0)>
1077
+ p_avg = np.sum(wgts * p_samples, axis=1)
1078
+ cosp0_avg = np.sum(wgts * np.cos(p_samples[0]))
1079
+ print('<p> =', p_avg, '(sample)')
1080
+ print('<cos(p0)> =', cosp0_avg, '(sample)')
1081
+
1082
+ Here ``p_samples[d, i]`` is a batch of about 40,000 random samples
1083
+ for parameter ``p[d]`` drawn from the (bimodal) distribution with
1084
+ PDF ``g_pdf(p)``. Index ``d=0,1`` labels directions in parameter
1085
+ space, while index ``i`` labels the sample. The samples
1086
+ are weighted by ``wgts[i]``; the sum of all weights equals one.
1087
+ The batch index in ``p_samples`` is the rightmost index because
1088
+ by default ``mode='rbatch'``. (Set ``mode='lbatch'`` to move the
1089
+ batch index to the leftmost position: ``p_samples[i, d]``.)
1090
+ The output from this script is::
1091
+
1092
+ <p> = [0.40(17) 0.40(17)] (vegas)
1093
+ <p> = [0.40011804 0.39999454] (sample)
1094
+ <cos(p0)> = 0.9074221724843065 (sample)
1095
+
1096
+ Samples are also useful for making histograms and contour
1097
+ plots of the probability density. For example, the following
1098
+ code uses the :mod:`corner` Python module to create histograms
1099
+ for each parameter, and a contour plot showing
1100
+ their joint distribution::
1101
+
1102
+ import corner
1103
+ import matplotlib.pyplot as plt
1104
+
1105
+ corner.corner(
1106
+ data=p_samples.T, weights=wgts, labels=['p[0]', 'p[1]'],
1107
+ range=[0.999, 0.999], show_titles=True, quantiles=[0.16, 0.5, 0.84],
1108
+ plot_datapoints=False, fill_contours=True, smooth=1,
1109
+ )
1110
+ plt.show()
1111
+
1112
+ The output, showing the bimodal structure, is:
1113
+
1114
+ .. image:: bimodal.png
1115
+ :width: 80%
1116
+
1117
+
1118
+ Args:
1119
+ nbatch (int): The integrator will return
1120
+ at least ``nbatch`` samples drawn from its PDF. The
1121
+ actual number of samples is the smallest multiple of
1122
+ ``self.last_neval`` that is equal to or larger than ``nbatch``.
1123
+ Results are packaged in arrays or dictionaries
1124
+ whose elements have an extra index labeling the different
1125
+ samples in the batch. The batch index is
1126
+ the rightmost index if ``mode='rbatch'``; it is
1127
+ the leftmost index if ``mode`` is ``'lbatch'``.
1128
+ mode (bool): Batch mode. Allowed
1129
+ modes are ``'rbatch'`` or ``'lbatch'``,
1130
+ corresponding to batch indices that are on the
1131
+ right or the left, respectively.
1132
+ Default is ``mode='rbatch'``.
1133
+
1134
+ Returns:
1135
+ A tuple ``(wgts,samples)`` containing samples drawn from the integrator's
1136
+ PDF, together with their weights ``wgts``. The weighted sample points
1137
+ are distributed through parameter space with a density proportional to
1138
+ the PDF.
1139
+
1140
+ In general, ``samples`` is either a dictionary or an array
1141
+ depending upon the format of |PDFIntegrator| parameter ``param``.
1142
+ For example, if ::
1143
+
1144
+ param = gv.gvar(dict(s='1.5(1)', v=['3.2(8)', '1.1(4)']))
1145
+
1146
+ then ``samples['s'][i]`` is a sample for parameter ``p['s']``
1147
+ where index ``i=0,1...nbatch(approx)`` labels the sample. The
1148
+ corresponding sample for ``p['v'][d]``, where ``d=0`` or ``1``,
1149
+ is ``samples['v'][d, i]`` provided ``mode='rbatch'``, which
1150
+ is the default. (Otherwise it is ``p['v'][i, d]``, for
1151
+ ``mode='lbatch'``.) The corresponding weight for this sample
1152
+ is ``wgts[i]``.
1153
+
1154
+ When ``param`` is an array, ``samples`` is an array with the same
1155
+ shape plus an extra sample index which is either on the right
1156
+ (``mode='rbatch'``, default) or left (``mode='lbatch'``).
1157
+ """
1158
+ neval = self.last_neval if hasattr(self, 'last_neval') else self.neval
1159
+ nit = 1 if nbatch is None else nbatch // neval
1160
+ if nit * neval < nbatch:
1161
+ nit += 1
1162
+ samples = []
1163
+ wgts = []
1164
+ for _ in range(nit):
1165
+ for theta, wgt in self.random_batch():
1166
+ # following code comes mostly from _f_lbatch
1167
+ tan_theta = numpy.tan(theta)
1168
+ chiv = self.scale * tan_theta
1169
+ # jac = dp_dtheta
1170
+ dp_dtheta = self.scale * numpy.prod((tan_theta ** 2 + 1.), axis=1) * self.param_pdf.dp_dchiv
1171
+ pflat = self.param_pdf.pflat(chiv, mode='lbatch')
1172
+ if self.pdf is None:
1173
+ # normalized in chiv space so don't want param_pdf.dpdchiv in jac
1174
+ pdf = numpy.prod(numpy.exp(-(chiv ** 2) / 2.) / numpy.sqrt(2 * numpy.pi), axis=1) / self.param_pdf.dp_dchiv
1175
+ else:
1176
+ pdf = numpy.prod(self.pdf.eval(pflat, jac=None), axis=1)
1177
+ p = self.param_pdf._unflatten(pflat, mode='lbatch')
1178
+ wgts.append(wgt * dp_dtheta * pdf)
1179
+ samples.append(pflat)
1180
+ samples = numpy.concatenate(samples, axis=0)
1181
+ wgts = numpy.concatenate(wgts)
1182
+ wgts /= numpy.sum(wgts)
1183
+ if mode == 'rbatch':
1184
+ samples = self.param_pdf._unflatten(samples.T, mode='rbatch')
1185
+ else:
1186
+ samples = self.param_pdf._unflatten(samples, mode='lbatch')
1187
+ return wgts, samples
1188
+
1189
+ class PDFAnalyzer(object):
1190
+ r""" |vegas| analyzer for implementing ``save``, ``saveall`` keywords for :class:`PDFIntegrator` """
1191
+ def __init__(self, pdfinteg, analyzer, save=None, saveall=None):
1192
+ self.pdfinteg = pdfinteg
1193
+ self.analyzer = analyzer
1194
+ self.save = save
1195
+ self.saveall = saveall
1196
+
1197
+ def begin(self, itn, integrator):
1198
+ if self.analyzer is not None:
1199
+ self.analyzer.begin(itn, integrator)
1200
+
1201
+ def end(self, itn_result, results):
1202
+ if self.analyzer is not None:
1203
+ self.analyzer.end(itn_result, results)
1204
+ if self.save is None and self.saveall is None:
1205
+ return
1206
+ ans = PDFIntegrator._make_ans(results)
1207
+ if isinstance(self.save, str):
1208
+ with open(self.save, 'wb') as ofile:
1209
+ pickle.dump(ans, ofile)
1210
+ elif self.save is not None:
1211
+ pickle.dump(ans, self.save)
1212
+ if isinstance(self.saveall, str):
1213
+ with open(self.saveall, 'wb') as ofile:
1214
+ pickle.dump((ans,self.pdfinteg), ofile)
1215
+ elif self.saveall is not None:
1216
+ pickle.dump((ans,self.pdfinteg), self.saveall)
1217
+
1218
+
1219
+ def ravg(reslist, weighted=None, rescale=None):
1220
+ r""" Create running average from list of :mod:`vegas` results.
1221
+
1222
+ This function is used to change how the weighted average of
1223
+ |vegas| results is calculated. For example, the following code
1224
+ discards the first five results (where |vegas| is still adapting)
1225
+ and does an unweighted average of the last five::
1226
+
1227
+ import vegas
1228
+
1229
+ def fcn(p):
1230
+ return p[0] * p[1] * p[2] * p[3] * 16.
1231
+
1232
+ itg = vegas.Integrator(4 * [[0,1]])
1233
+ r = itg(fcn)
1234
+ print(r.summary())
1235
+ ur = vegas.ravg(r.itn_results[5:], weighted=False)
1236
+ print(ur.summary())
1237
+
1238
+ The unweighted average can be useful because it is unbiased.
1239
+ The output is::
1240
+
1241
+ itn integral wgt average chi2/dof Q
1242
+ -------------------------------------------------------
1243
+ 1 1.013(19) 1.013(19) 0.00 1.00
1244
+ 2 0.997(14) 1.002(11) 0.45 0.50
1245
+ 3 1.021(12) 1.0112(80) 0.91 0.40
1246
+ 4 0.9785(97) 0.9980(62) 2.84 0.04
1247
+ 5 1.0067(85) 1.0010(50) 2.30 0.06
1248
+ 6 0.9996(75) 1.0006(42) 1.85 0.10
1249
+ 7 1.0020(61) 1.0010(34) 1.54 0.16
1250
+ 8 1.0051(52) 1.0022(29) 1.39 0.21
1251
+ 9 1.0046(47) 1.0029(24) 1.23 0.27
1252
+ 10 0.9976(47) 1.0018(22) 1.21 0.28
1253
+
1254
+ itn integral average chi2/dof Q
1255
+ -------------------------------------------------------
1256
+ 1 0.9996(75) 0.9996(75) 0.00 1.00
1257
+ 2 1.0020(61) 1.0008(48) 0.06 0.81
1258
+ 3 1.0051(52) 1.0022(37) 0.19 0.83
1259
+ 4 1.0046(47) 1.0028(30) 0.18 0.91
1260
+ 5 0.9976(47) 1.0018(26) 0.31 0.87
1261
+
1262
+ Args:
1263
+ reslist (list): List whose elements are |GVar|\s, arrays of
1264
+ |GVar|\s, or dictionaries whose values are |GVar|\s or
1265
+ arrays of |GVar|\s. Alternatively ``reslist`` can be
1266
+ the object returned by a call to a
1267
+ :class:`vegas.Integrator` object (i.e, an instance of
1268
+ any of :class:`vegas.RAvg`, :class:`vegas.RAvgArray`,
1269
+ :class:`vegas.RAvgArray`, :class:`vegas.PDFEV`,
1270
+ :class:`vegas.PDFEVArray`, :class:`vegas.PDFEVArray`).
1271
+ weighted (bool): Running average is weighted (by the inverse
1272
+ covariance matrix) if ``True``. Otherwise the
1273
+ average is unweighted, which makes most sense if ``reslist``
1274
+ items were generated by :mod:`vegas` with little or no
1275
+ adaptation (e.g., with ``adapt=False``). If ``weighted``
1276
+ is not specified (or is ``None``), it is set equal to
1277
+ ``getattr(reslist, 'weighted', True)``.
1278
+ rescale: Integration results are divided by ``rescale``
1279
+ before taking the weighted average if
1280
+ ``weighted=True``; otherwise ``rescale`` is ignored.
1281
+ Setting ``rescale=True`` is equivalent to setting
1282
+ ``rescale=reslist[-1]``. If ``rescale`` is not
1283
+ specified (or is ``None``), it is set equal to
1284
+ ``getattr(reslist, 'rescale', True)``.
1285
+ """
1286
+ for t in [PDFEV, PDFEVArray, PDFEVDict]:
1287
+ if isinstance(reslist, t):
1288
+ return t(ravg(reslist.itn_results, weighted=weighted, rescale=rescale))
1289
+ for t in [RAvg, RAvgArray, RAvgDict]:
1290
+ if isinstance(reslist, t):
1291
+ reslist = reslist.itn_results
1292
+ try:
1293
+ if len(reslist) < 1:
1294
+ raise ValueError('reslist empty')
1295
+ except:
1296
+ raise ValueError('improper type for reslist')
1297
+ if weighted is None:
1298
+ weighted = getattr(reslist, 'weighted', True)
1299
+ if rescale is None:
1300
+ rescale = getattr(reslist, 'rescale', reslist[-1])
1301
+ if hasattr(reslist[0], 'keys'):
1302
+ return RAvgDict(itn_results=reslist, weighted=weighted, rescale=rescale)
1303
+ try:
1304
+ shape = numpy.shape(reslist[0])
1305
+ except:
1306
+ raise ValueError('reslist[i] not GVar, array, or dictionary')
1307
+ if shape == ():
1308
+ return RAvg(itn_results=reslist, weighted=weighted)
1309
+ else:
1310
+ return RAvgArray(itn_results=reslist, weighted=weighted, rescale=rescale)
1311
+