vegas 6.4.1__cp313-cp313-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vegas/__init__.pxd +14 -0
- vegas/__init__.py +1592 -0
- vegas/_vegas.c +117028 -0
- vegas/_vegas.cpython-313-darwin.so +0 -0
- vegas/_vegas.pxd +80 -0
- vegas/_vegas.pyx +3473 -0
- vegas-6.4.1.dist-info/METADATA +56 -0
- vegas-6.4.1.dist-info/RECORD +11 -0
- vegas-6.4.1.dist-info/WHEEL +6 -0
- vegas-6.4.1.dist-info/licenses/LICENSE.txt +546 -0
- vegas-6.4.1.dist-info/top_level.txt +1 -0
vegas/__init__.py
ADDED
|
@@ -0,0 +1,1592 @@
|
|
|
1
|
+
""" Introduction
|
|
2
|
+
--------------------
|
|
3
|
+
This package provides tools for estimating multidimensional
|
|
4
|
+
integrals numerically using an enhanced version of
|
|
5
|
+
the adaptive Monte Carlo |vegas| algorithm (G. P. Lepage,
|
|
6
|
+
J. Comput. Phys. 27(1978) 192, and J. Comput. Phys. 439(2021)
|
|
7
|
+
110386).
|
|
8
|
+
|
|
9
|
+
A |vegas| code generally involves two objects, one representing
|
|
10
|
+
the integrand and the other representing an integration
|
|
11
|
+
operator for a particular multidimensional volume. A typical
|
|
12
|
+
code sequence for a D-dimensional integral has the structure::
|
|
13
|
+
|
|
14
|
+
# create the integrand
|
|
15
|
+
def f(x):
|
|
16
|
+
... compute the integrand at point x[d] d=0,1...D-1
|
|
17
|
+
...
|
|
18
|
+
|
|
19
|
+
# create an integrator for volume with
|
|
20
|
+
# xl0 <= x[0] <= xu0, xl1 <= x[1] <= xu1 ...
|
|
21
|
+
integration_region = [[xl0, xu0], [xl1, xu1], ...]
|
|
22
|
+
integrator = vegas.Integrator(integration_region)
|
|
23
|
+
|
|
24
|
+
# do the integral and print out the result
|
|
25
|
+
result = integrator(f, nitn=10, neval=10000)
|
|
26
|
+
print(result)
|
|
27
|
+
|
|
28
|
+
The algorithm iteratively adapts to the integrand over
|
|
29
|
+
``nitn`` iterations, each of which uses at most ``neval``
|
|
30
|
+
integrand samples to generate a Monte Carlo estimate of
|
|
31
|
+
the integral. The final result is the weighted average
|
|
32
|
+
of the results from all iterations. Increase ``neval``
|
|
33
|
+
to increase the precision of the result. Typically
|
|
34
|
+
``nitn`` is between 10 and 20. ``neval`` can be
|
|
35
|
+
1000s to millions, or more, depending upon
|
|
36
|
+
the integrand and the precision desired.
|
|
37
|
+
|
|
38
|
+
The integrator remembers how it adapted to ``f(x)``
|
|
39
|
+
and uses this information as its starting point if it is reapplied
|
|
40
|
+
to ``f(x)`` or applied to some other function ``g(x)``.
|
|
41
|
+
An integrator's state can be archived for future applications
|
|
42
|
+
using Python's :mod:`pickle` module.
|
|
43
|
+
|
|
44
|
+
See the extensive Tutorial in the first section of the |vegas| documentation.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
# Created by G. Peter Lepage (Cornell University) in 12/2013.
|
|
48
|
+
# Copyright (c) 2013-26 G. Peter Lepage.
|
|
49
|
+
#
|
|
50
|
+
# This program is free software: you can redistribute it and/or modify
|
|
51
|
+
# it under the terms of the GNU General Public License as published by
|
|
52
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
53
|
+
# any later version (see <http://www.gnu.org/licenses/>).
|
|
54
|
+
#
|
|
55
|
+
# This program is distributed in the hope that it will be useful,
|
|
56
|
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
57
|
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
58
|
+
# GNU General Public License for more details.
|
|
59
|
+
|
|
60
|
+
__version__ = '6.4.1'
|
|
61
|
+
|
|
62
|
+
from ._vegas import RAvg, RAvgArray, RAvgDict
|
|
63
|
+
from ._vegas import AdaptiveMap, Integrator, BatchIntegrand
|
|
64
|
+
from ._vegas import reporter, VegasIntegrand, batchintegrand
|
|
65
|
+
from ._vegas import rbatchintegrand, RBatchIntegrand
|
|
66
|
+
from ._vegas import lbatchintegrand, LBatchIntegrand
|
|
67
|
+
from ._vegas import MPIintegrand
|
|
68
|
+
|
|
69
|
+
# legacy names:
|
|
70
|
+
from ._vegas import vecintegrand, VecIntegrand
|
|
71
|
+
|
|
72
|
+
import gvar as _gvar
|
|
73
|
+
import functools
|
|
74
|
+
import numpy
|
|
75
|
+
import pickle
|
|
76
|
+
|
|
77
|
+
###############################################
|
|
78
|
+
# PDFEV, etc PDFIntegrator expectation values
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class PDFEV(_gvar.GVar):
|
|
82
|
+
r""" Expectation value from |PDFIntegrator|.
|
|
83
|
+
|
|
84
|
+
Expectation values are returned by
|
|
85
|
+
:meth:`vegas.PDFIntegrator.__call__` and
|
|
86
|
+
:class:`vegas.PDFIntegrator.stats`::
|
|
87
|
+
|
|
88
|
+
>>> g = gvar.gvar(['1(1)', '10(10)'])
|
|
89
|
+
>>> g_ev = vegas.PDFIntegrator(g)
|
|
90
|
+
>>> def f(p):
|
|
91
|
+
... return p[0] * p[1]
|
|
92
|
+
>>> print(g_ev(f))
|
|
93
|
+
10.051(57)
|
|
94
|
+
>>> print(g_ev.stats(f))
|
|
95
|
+
10(17)
|
|
96
|
+
|
|
97
|
+
In the first case, the quoted error is the uncertainty
|
|
98
|
+
in the :mod:`vegas` estimate of the mean of ``f(p)``.
|
|
99
|
+
In the second case, the quoted uncertainty is the
|
|
100
|
+
standard deviation evaluated with respect to the
|
|
101
|
+
Gaussian distribution associated with ``g`` (added
|
|
102
|
+
in quadrature to the :mod:`vegas` error, which
|
|
103
|
+
is negligible here).
|
|
104
|
+
|
|
105
|
+
:class:`vegas.PDFEV`\s have the following attributes:
|
|
106
|
+
|
|
107
|
+
Attributes:
|
|
108
|
+
|
|
109
|
+
pdfnorm: Divide PDF by ``pdfnorm`` to normalize it.
|
|
110
|
+
|
|
111
|
+
results (:class:`vegas.RAvgDict`): Results from
|
|
112
|
+
the underlying integrals.
|
|
113
|
+
|
|
114
|
+
In addition, they have all the attributes of the :class:`vegas.RAvgDict`
|
|
115
|
+
(``results``) corresponding to the underlying integrals.
|
|
116
|
+
|
|
117
|
+
A :class:`vegas.PDFEV` returned by
|
|
118
|
+
``vegas.PDFIntegrator.stats(self, f...)`` has three further attributes:
|
|
119
|
+
|
|
120
|
+
Attributes:
|
|
121
|
+
|
|
122
|
+
stats: An instance of :class:`gvar.PDFStatistics`
|
|
123
|
+
containing statistical information about
|
|
124
|
+
the distribution of ``f(p)``.
|
|
125
|
+
|
|
126
|
+
vegas_mean: |vegas| estimate for the mean value of
|
|
127
|
+
``f(p)``. The uncertainties in ``vegas_mean``
|
|
128
|
+
are the integration errors from |vegas|.
|
|
129
|
+
|
|
130
|
+
vegas_cov: |vegas| estimate for the covariance matrix
|
|
131
|
+
of ``f(p)``. The uncertainties in ``vegas_cov``
|
|
132
|
+
are the integration errors from |vegas|.
|
|
133
|
+
|
|
134
|
+
vegas_sdev: |vegas| estimate for the standard deviation
|
|
135
|
+
of ``f(p)``. The uncertainties in ``vegas_sdev``
|
|
136
|
+
are the integration errors from |vegas|.
|
|
137
|
+
"""
|
|
138
|
+
def __init__(self, results, analyzer=None):
|
|
139
|
+
self.results = pickle.loads(results) if isinstance(results, bytes) else results
|
|
140
|
+
if analyzer is None:
|
|
141
|
+
ans = self.results['f(p)*pdf'] / self.results['pdf']
|
|
142
|
+
super(PDFEV, self).__init__(*ans.internaldata)
|
|
143
|
+
self.analyzer = None
|
|
144
|
+
else:
|
|
145
|
+
ans, extras = analyzer(self.results)
|
|
146
|
+
super(PDFEV, self).__init__(*ans.internaldata)
|
|
147
|
+
for k in extras:
|
|
148
|
+
setattr(self, k, extras[k])
|
|
149
|
+
self.analyzer = analyzer
|
|
150
|
+
|
|
151
|
+
def extend(self, pdfev):
|
|
152
|
+
r""" Merge results from :class:`PDFEV` object ``pdfev`` after results currently in ``self``. """
|
|
153
|
+
self.results.extend(pdfev.results)
|
|
154
|
+
|
|
155
|
+
def __getattr__(self, k):
|
|
156
|
+
if k in ['keys']:
|
|
157
|
+
raise AttributeError('no keys method')
|
|
158
|
+
if k == 'pdfnorm':
|
|
159
|
+
return self.results['pdf']
|
|
160
|
+
return getattr(self.results, k)
|
|
161
|
+
|
|
162
|
+
def _remove_gvars(self, gvlist):
|
|
163
|
+
tmp = PDFEV(results=self.results, analyzer=self.analyzer)
|
|
164
|
+
tmp.results = _gvar.remove_gvars(tmp.results, gvlist)
|
|
165
|
+
tgvar = _gvar.gvar_factory() # small cov matrix
|
|
166
|
+
super(PDFEV, tmp).__init__(*tgvar(0,0).internaldata)
|
|
167
|
+
return tmp
|
|
168
|
+
|
|
169
|
+
def _distribute_gvars(self, gvlist):
|
|
170
|
+
return PDFEV(
|
|
171
|
+
results = _gvar.distribute_gvars(self.results, gvlist),
|
|
172
|
+
analyzer=self.analyzer
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
def __reduce_ex__(self, protocol):
|
|
176
|
+
return (PDFEV, (pickle.dumps(self.results), self.analyzer))
|
|
177
|
+
|
|
178
|
+
class PDFEVArray(numpy.ndarray):
|
|
179
|
+
r""" Array of expectation values from |PDFIntegrator|.
|
|
180
|
+
|
|
181
|
+
Expectation values are returned by
|
|
182
|
+
:meth:`vegas.PDFIntegrator.__call__` and
|
|
183
|
+
:class:`vegas.PDFIntegrator.stats`::
|
|
184
|
+
|
|
185
|
+
>>> g = gvar.gvar(['1(1)', '10(10)'])
|
|
186
|
+
>>> g_ev = vegas.PDFIntegrator(g)
|
|
187
|
+
>>> def f(p):
|
|
188
|
+
... return [p[0], p[1], p[0] * p[1]]
|
|
189
|
+
>>> print(g_ev(f))
|
|
190
|
+
[0.9992(31) 10.024(29) 10.051(57)]
|
|
191
|
+
>>> print(g_ev.stats(f))
|
|
192
|
+
[1.0(1.0) 10(10) 10(17)]
|
|
193
|
+
|
|
194
|
+
In the first case, the quoted errors are the uncertainties
|
|
195
|
+
in the :mod:`vegas` estimates of the means. In the second
|
|
196
|
+
case, the quoted uncertainties are the standard deviations
|
|
197
|
+
evaluated with respect to the Gaussian distribution
|
|
198
|
+
associated with ``g`` (added in quadrature to the
|
|
199
|
+
:mod:`vegas` errors, which are negligible here).
|
|
200
|
+
|
|
201
|
+
:class:`vegas.PDFEVArray`\s have the following attributes:
|
|
202
|
+
|
|
203
|
+
Attributes:
|
|
204
|
+
|
|
205
|
+
pdfnorm: Divide PDF by ``pdfnorm`` to normalize it.
|
|
206
|
+
|
|
207
|
+
results (:class:`vegas.RAvgDict`): Results from
|
|
208
|
+
the underlying integrals.
|
|
209
|
+
|
|
210
|
+
In addition, they have all the attributes of the :class:`vegas.RAvgDict`
|
|
211
|
+
(``results``) corresponding to the underlying integrals.
|
|
212
|
+
|
|
213
|
+
A :class:`vegas.PDFEVArray` ``s`` returned by
|
|
214
|
+
``vegas.PDFIntegrator.stats(self, f...)`` has three further
|
|
215
|
+
attributes:
|
|
216
|
+
|
|
217
|
+
Attributes:
|
|
218
|
+
|
|
219
|
+
stats: ``s.stats[i]`` is a :class:`gvar.PDFStatistics`
|
|
220
|
+
object containing statistical information about
|
|
221
|
+
the distribution of ``f(p)[i]``.
|
|
222
|
+
|
|
223
|
+
vegas_mean: |vegas| estimates for the mean values
|
|
224
|
+
of ``f(p)``. The uncertainties in ``vegas_mean``
|
|
225
|
+
are the integration errors from |vegas|.
|
|
226
|
+
|
|
227
|
+
vegas_cov: |vegas| estimate for the covariance matrix
|
|
228
|
+
of ``f(p)``. The uncertainties in ``vegas_cov``
|
|
229
|
+
are the integration errors from |vegas|.
|
|
230
|
+
|
|
231
|
+
vegas_sdev: |vegas| estimate for the standard deviation
|
|
232
|
+
of ``f(p)``. The uncertainties in ``vegas_sdev``
|
|
233
|
+
are the integration errors from |vegas|.
|
|
234
|
+
"""
|
|
235
|
+
def __new__(cls, results, analyzer=None):
|
|
236
|
+
results = pickle.loads(results) if isinstance(results, bytes) else results
|
|
237
|
+
if analyzer is None:
|
|
238
|
+
self = numpy.asarray(results['f(p)*pdf'] / results['pdf']).view(cls)
|
|
239
|
+
self.analyzer = None
|
|
240
|
+
else:
|
|
241
|
+
ans, extras = analyzer(results)
|
|
242
|
+
self = numpy.asarray(ans).view(cls)
|
|
243
|
+
for k in extras:
|
|
244
|
+
setattr(self, k, extras[k])
|
|
245
|
+
self.analyzer = analyzer
|
|
246
|
+
self.results = results
|
|
247
|
+
return self
|
|
248
|
+
|
|
249
|
+
def extend(self, pdfev):
|
|
250
|
+
r""" Merge results from :class:`PDFEVArray` object ``pdfev`` after results currently in ``self``. """
|
|
251
|
+
self.results.extend(pdfev.results)
|
|
252
|
+
|
|
253
|
+
def __getattr__(self, k):
|
|
254
|
+
if k in ['keys']:
|
|
255
|
+
raise AttributeError('no keys method')
|
|
256
|
+
if k == 'pdfnorm':
|
|
257
|
+
return self.results['pdf']
|
|
258
|
+
return getattr(self.results, k)
|
|
259
|
+
|
|
260
|
+
def _remove_gvars(self, gvlist):
|
|
261
|
+
tmp = PDFEVArray(results=self.results, analyzer=self.analyzer)
|
|
262
|
+
tmp.results = _gvar.remove_gvars(tmp.results, gvlist)
|
|
263
|
+
tmp.flat[:] = _gvar.remove_gvars(numpy.array(tmp), gvlist)
|
|
264
|
+
return tmp
|
|
265
|
+
|
|
266
|
+
def _distribute_gvars(self, gvlist):
|
|
267
|
+
return PDFEVArray(
|
|
268
|
+
results=_gvar.distribute_gvars(self.results, gvlist), analyzer=self.analyzer
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
def __reduce_ex__(self, protocol):
|
|
272
|
+
return (PDFEVArray, (pickle.dumps(self.results), self.analyzer))
|
|
273
|
+
|
|
274
|
+
class PDFEVDict(_gvar.BufferDict):
|
|
275
|
+
r""" Dictionary of expectation values from |PDFIntegrator|.
|
|
276
|
+
|
|
277
|
+
Expectation values are returned by
|
|
278
|
+
:meth:`vegas.PDFIntegrator.__call__` and
|
|
279
|
+
:class:`vegas.PDFIntegrator.stats`::
|
|
280
|
+
|
|
281
|
+
>>> g = gvar.gvar(['1(1)', '10(10)'])
|
|
282
|
+
>>> g_ev = vegas.PDFIntegrator(g)
|
|
283
|
+
>>> def f(p):
|
|
284
|
+
... return dict(p=p, prod=p[0] * p[1])
|
|
285
|
+
>>> print(g_ev(f))
|
|
286
|
+
{'p': array([0.9992(31), 10.024(29)], dtype=object), 'prod': 10.051(57)}
|
|
287
|
+
>>> print(g_ev.stats(f))
|
|
288
|
+
{'p': array([1.0(1.0), 10(10)], dtype=object), 'prod': 10(17)}
|
|
289
|
+
|
|
290
|
+
In the first case, the quoted errors are the uncertainties
|
|
291
|
+
in the :mod:`vegas` estimates of the means. In the second
|
|
292
|
+
case, the quoted uncertainties are the standard deviations
|
|
293
|
+
evaluated with respect to the Gaussian distribution
|
|
294
|
+
associated with ``g`` (added in quadrature to the
|
|
295
|
+
:mod:`vegas` errors, which are negligible here).
|
|
296
|
+
|
|
297
|
+
:class:`vegas.PDFEVDict` objects have the following attributes:
|
|
298
|
+
|
|
299
|
+
Attributes:
|
|
300
|
+
|
|
301
|
+
pdfnorm: Divide PDF by ``pdfnorm`` to normalize it.
|
|
302
|
+
|
|
303
|
+
results (:class:`vegas.RAvgDict`): Results from
|
|
304
|
+
the underlying integrals.
|
|
305
|
+
|
|
306
|
+
In addition, they have all the attributes of the :class:`vegas.RAvgDict`
|
|
307
|
+
(``results``) corresponding to the underlying integrals.
|
|
308
|
+
|
|
309
|
+
A :class:`vegas.PDFEVDict` object ``s`` returned by
|
|
310
|
+
:meth:`vegas.PDFIntegrator.stats` has three further attributes:
|
|
311
|
+
|
|
312
|
+
Attributes:
|
|
313
|
+
|
|
314
|
+
stats: ``s.stats[k]`` is a :class:`gvar.PDFStatistics`
|
|
315
|
+
object containing statistical information about
|
|
316
|
+
the distribution of ``f(p)[k]``.
|
|
317
|
+
|
|
318
|
+
vegas_mean: |vegas| estimates for the mean values
|
|
319
|
+
of ``f(p)``. The uncertainties in ``vegas_mean``
|
|
320
|
+
are the integration errors from |vegas|.
|
|
321
|
+
|
|
322
|
+
vegas_cov: |vegas| estimate for the covariance matrix
|
|
323
|
+
of ``f(p)``. The uncertainties in ``vegas_cov``
|
|
324
|
+
are the integration errors from |vegas|.
|
|
325
|
+
|
|
326
|
+
vegas_sdev: |vegas| estimate for the standard deviation
|
|
327
|
+
of ``f(p)``. The uncertainties in ``vegas_sdev``
|
|
328
|
+
are the integration errors from |vegas|.
|
|
329
|
+
"""
|
|
330
|
+
def __init__(self, results, analyzer=None):
|
|
331
|
+
super(PDFEVDict, self).__init__()
|
|
332
|
+
self.results = pickle.loads(results) if isinstance(results, bytes) else results
|
|
333
|
+
if analyzer is None:
|
|
334
|
+
for k in self.results:
|
|
335
|
+
if k == 'pdf':
|
|
336
|
+
continue
|
|
337
|
+
self[k[1]] = self.results[k]
|
|
338
|
+
self.buf[:] /= self.results['pdf']
|
|
339
|
+
self.analyzer = None
|
|
340
|
+
else:
|
|
341
|
+
ans, extras = analyzer(self.results)
|
|
342
|
+
for k in extras:
|
|
343
|
+
setattr(self, k, extras[k])
|
|
344
|
+
for k in ans:
|
|
345
|
+
self[k] = ans[k]
|
|
346
|
+
self.analyzer = analyzer
|
|
347
|
+
|
|
348
|
+
def extend(self, pdfev):
|
|
349
|
+
r""" Merge results from :class:`PDFEVDict` object ``pdfev`` after results currently in ``self``. """
|
|
350
|
+
self.results.extend(pdfev.results)
|
|
351
|
+
|
|
352
|
+
def _remove_gvars(self, gvlist):
|
|
353
|
+
tmp = PDFEVDict(results=self.results, analyzer=self.analyzer)
|
|
354
|
+
tmp.results = _gvar.remove_gvars(tmp.results, gvlist)
|
|
355
|
+
tmp._buf = _gvar.remove_gvars(tmp.buf, gvlist)
|
|
356
|
+
return tmp
|
|
357
|
+
|
|
358
|
+
def _distribute_gvars(self, gvlist):
|
|
359
|
+
return PDFEVDict(
|
|
360
|
+
results=_gvar.distribute_gvars(self.results, gvlist),
|
|
361
|
+
analyzer=self.analyzer
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
def __getattr__(self, k):
|
|
365
|
+
if k == 'pdfnorm':
|
|
366
|
+
return self.results['pdf']
|
|
367
|
+
return getattr(self.results, k)
|
|
368
|
+
|
|
369
|
+
def __reduce_ex__(self, protocol):
|
|
370
|
+
pickle.dumps(self.results)
|
|
371
|
+
return (PDFEVDict, (pickle.dumps(self.results), self.analyzer))
|
|
372
|
+
|
|
373
|
+
class PDFIntegrator(Integrator):
|
|
374
|
+
r""" :mod:`vegas` integrator for PDF expectation values.
|
|
375
|
+
|
|
376
|
+
``PDFIntegrator(param, pdf)`` creates a |vegas| integrator that
|
|
377
|
+
evaluates expectation values of arbitrary functions ``f(p)`` with
|
|
378
|
+
respect to the probability density function ``pdf(p)``, where
|
|
379
|
+
``p`` is a point in the parameter space defined by ``param``.
|
|
380
|
+
|
|
381
|
+
``param`` is a collection of :class:`gvar.GVar`\s (Gaussian random
|
|
382
|
+
variables) that together define a multi-dimensional Gaussian
|
|
383
|
+
distribution with the same parameter space as the distribution
|
|
384
|
+
described by ``pdf(p)``. ``PDFIntegrator`` internally
|
|
385
|
+
re-expresses the integrals over these parameters in terms
|
|
386
|
+
of new variables that emphasize the region defined by
|
|
387
|
+
``param`` (i.e., the region where the PDF associated with
|
|
388
|
+
the ``param``'s Gaussian distribution is large).
|
|
389
|
+
The new variables are also aligned with the principal axes
|
|
390
|
+
of ``param``'s correlation matrix, to facilitate integration.
|
|
391
|
+
|
|
392
|
+
``param``'s means and covariances are chosen to emphasize the
|
|
393
|
+
important regions of the ``pdf``'s distribution (e.g., ``param``
|
|
394
|
+
might be set equal to the prior in a Bayesian analysis).
|
|
395
|
+
``param`` is used to define and optimize the integration variables;
|
|
396
|
+
it does not affect the values of the integrals but can have a big
|
|
397
|
+
effect on the accuracy.
|
|
398
|
+
|
|
399
|
+
The Gaussian PDF associated with ``param`` is used if
|
|
400
|
+
``pdf`` is unspecified (i.e., ``pdf=None``, which is the default).
|
|
401
|
+
|
|
402
|
+
Typical usage is illustrated by the following code, where
|
|
403
|
+
dictionary ``g`` specifies both the parameterization (``param``)
|
|
404
|
+
and the PDF::
|
|
405
|
+
|
|
406
|
+
import vegas
|
|
407
|
+
import gvar as gv
|
|
408
|
+
import numpy as np
|
|
409
|
+
|
|
410
|
+
g = gv.BufferDict()
|
|
411
|
+
g['a'] = gv.gvar([10., 2.], [[1, 1.4], [1.4, 2]])
|
|
412
|
+
g['fb(b)'] = gv.BufferDict.uniform('fb', 2.9, 3.1)
|
|
413
|
+
|
|
414
|
+
g_ev = vegas.PDFIntegrator(g)
|
|
415
|
+
|
|
416
|
+
def f(p):
|
|
417
|
+
a = p['a']
|
|
418
|
+
b = p['b']
|
|
419
|
+
return a[0] + np.fabs(a[1]) ** b
|
|
420
|
+
|
|
421
|
+
result = g_ev(f, neval=10_000, nitn=5)
|
|
422
|
+
print('<f(p)> =', result)
|
|
423
|
+
|
|
424
|
+
Here ``g`` indicates a three-dimensional distribution
|
|
425
|
+
where the first two variables ``g['a'][0]`` and ``g['a'][1]``
|
|
426
|
+
are Gaussian with means 10 and 2, respectively, and covariance
|
|
427
|
+
matrix [[1, 1.4], [1.4, 2.]]. The last variable ``g['b']`` is
|
|
428
|
+
uniformly distributed on interval [2.9, 3.1]. The result
|
|
429
|
+
is: ``<f(p)> = 30.145(83)``.
|
|
430
|
+
|
|
431
|
+
``PDFIntegrator`` evaluates integrals of both ``f(p) * pdf(p)``
|
|
432
|
+
and ``pdf(p)``. The expectation value of ``f(p)`` is the ratio
|
|
433
|
+
of these two integrals (so ``pdf(p)`` need not be normalized).
|
|
434
|
+
The result of a ``PDFIntegrator`` integration
|
|
435
|
+
has an extra attribute, ``result.pdfnorm``, which is the
|
|
436
|
+
|vegas| estimate of the integral over the PDF.
|
|
437
|
+
|
|
438
|
+
Args:
|
|
439
|
+
param : A |GVar|, array of |GVar|\s, or dictionary, whose values
|
|
440
|
+
are |GVar|\s or arrays of |GVar|\s, that specifies the
|
|
441
|
+
integration parameters. When parameter ``pdf=None``, the
|
|
442
|
+
PDF is set equal to the Gaussian distribution corresponding
|
|
443
|
+
to ``param``.
|
|
444
|
+
|
|
445
|
+
pdf: The probability density function ``pdf(p)``.
|
|
446
|
+
The PDF's parameters ``p`` have the same layout
|
|
447
|
+
as ``param`` (arrays or dictionary), with the same
|
|
448
|
+
keys and/or shapes. The Gaussian PDF associated with
|
|
449
|
+
``param`` is used when ``pdf=None`` (default).
|
|
450
|
+
Note that PDFs need not be normalized.
|
|
451
|
+
|
|
452
|
+
adapt_to_pdf (bool): :mod:`vegas` adapts to the PDF
|
|
453
|
+
when ``adapt_to_pdf=True`` (default). :mod:`vegas` adapts
|
|
454
|
+
to ``pdf(p) * f(p)`` when calculating the expectation
|
|
455
|
+
value of ``f(p)`` if ``adapt_to_pdf=False``.
|
|
456
|
+
|
|
457
|
+
limit (positive float): Integration variables are determined from
|
|
458
|
+
``param``. ``limit`` limits the range of each variable to
|
|
459
|
+
a region of size ``limit`` times the standard deviation on
|
|
460
|
+
either side of the mean, where means and standard deviations
|
|
461
|
+
are specified by ``param``. This can be useful if the
|
|
462
|
+
functions being integrated misbehave for large parameter
|
|
463
|
+
values (e.g., ``numpy.exp`` overflows for a large range of
|
|
464
|
+
arguments). Default is ``limit=100``; results should become
|
|
465
|
+
independent of ``limit`` as it is increased.
|
|
466
|
+
|
|
467
|
+
scale (positive float): The integration variables are
|
|
468
|
+
rescaled to emphasize parameter values of order
|
|
469
|
+
``scale`` times the standard deviation measured from
|
|
470
|
+
the mean, where means and standard deviations are
|
|
471
|
+
specified by ``param``. The rescaling
|
|
472
|
+
does not change the value of the integral but it
|
|
473
|
+
can reduce uncertainties in the :mod:`vegas` estimate.
|
|
474
|
+
Default is ``scale=1.0``.
|
|
475
|
+
|
|
476
|
+
svdcut (non-negative float or None): If not ``None``, replace
|
|
477
|
+
correlation matrix of ``param`` with a new matrix whose
|
|
478
|
+
small eigenvalues are modified: eigenvalues smaller than
|
|
479
|
+
``svdcut`` times the maximum eigenvalue ``eig_max`` are
|
|
480
|
+
replaced by ``svdcut*eig_max``. This can ameliorate
|
|
481
|
+
problems caused by roundoff errors when inverting the
|
|
482
|
+
covariance matrix. It increases the uncertainty associated
|
|
483
|
+
with the modified eigenvalues and so is conservative.
|
|
484
|
+
Setting ``svdcut=None`` or ``svdcut=0`` leaves the
|
|
485
|
+
covariance matrix unchanged. Default is ``svdcut=1e-12``.
|
|
486
|
+
|
|
487
|
+
All other keyword parameters are passed on to the the underlying
|
|
488
|
+
:class:`vegas.Integrator`; the ``uses_jac`` keyword is ignored.
|
|
489
|
+
"""
|
|
490
|
+
def __init__(self, param, pdf=None, adapt_to_pdf=True, limit=100., scale=1., svdcut=1e-15, **kargs):
|
|
491
|
+
if 'g' in kargs and param is None:
|
|
492
|
+
# for legacy code
|
|
493
|
+
param = kargs['g']
|
|
494
|
+
del kargs['g']
|
|
495
|
+
if param is None:
|
|
496
|
+
raise ValueError('param must be specified')
|
|
497
|
+
if isinstance(param, PDFIntegrator):
|
|
498
|
+
super(PDFIntegrator, self).__init__(param)
|
|
499
|
+
for k in ['param_pdf', 'param_sample', 'pdf', 'adapt_to_pdf', 'limit', 'scale']:
|
|
500
|
+
setattr(self, k, getattr(param, k))
|
|
501
|
+
return
|
|
502
|
+
elif isinstance(param, _gvar.PDF):
|
|
503
|
+
self.param_pdf = param
|
|
504
|
+
else:
|
|
505
|
+
self.param_pdf = _gvar.PDF(param, svdcut=svdcut)
|
|
506
|
+
self.param_sample = self.param_pdf.sample(mode=None)
|
|
507
|
+
self.limit = abs(limit)
|
|
508
|
+
self.scale = abs(scale)
|
|
509
|
+
self.set(adapt_to_pdf=adapt_to_pdf, pdf=pdf)
|
|
510
|
+
integ_map = self._make_map(self.limit / self.scale)
|
|
511
|
+
if kargs and 'uses_jac' in kargs:
|
|
512
|
+
kargs = dict(kargs)
|
|
513
|
+
del kargs['uses_jac']
|
|
514
|
+
super(PDFIntegrator, self).__init__(
|
|
515
|
+
AdaptiveMap(self.param_pdf.size * [integ_map]), **kargs
|
|
516
|
+
)
|
|
517
|
+
if getattr(self, 'mpi') and getattr(self, 'sync_ran'):
|
|
518
|
+
# needed because of the Monte Carlo in _make_map()
|
|
519
|
+
Integrator.synchronize_random() # for mpi only
|
|
520
|
+
|
|
521
|
+
def __reduce__(self):
|
|
522
|
+
kargs = dict()
|
|
523
|
+
for k in Integrator.defaults:
|
|
524
|
+
if Integrator.defaults[k] != getattr(self, k) and k != 'uses_jac':
|
|
525
|
+
kargs[k] = getattr(self, k)
|
|
526
|
+
kargs['sigf'] = numpy.array(self.sigf)
|
|
527
|
+
return (
|
|
528
|
+
PDFIntegrator,
|
|
529
|
+
(self.param_pdf, self.pdf, self.adapt_to_pdf, self.limit, self.scale),
|
|
530
|
+
kargs,
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
def __setstate__(self, kargs):
|
|
534
|
+
self.set(**kargs)
|
|
535
|
+
|
|
536
|
+
def set(self, ka={}, **kargs):
|
|
537
|
+
r""" Reset default parameters in integrator.
|
|
538
|
+
|
|
539
|
+
Usage is analogous to the constructor
|
|
540
|
+
for :class:`PDFIntegrator`: for example, ::
|
|
541
|
+
|
|
542
|
+
old_defaults = pdf_itg.set(neval=1e6, nitn=20)
|
|
543
|
+
|
|
544
|
+
resets the default values for ``neval`` and ``nitn``
|
|
545
|
+
in :class:`PDFIntegrator` ``pdf_itg``. A dictionary,
|
|
546
|
+
here ``old_defaults``, is returned. It can be used
|
|
547
|
+
to restore the old defaults using, for example::
|
|
548
|
+
|
|
549
|
+
pdf_itg.set(old_defaults)
|
|
550
|
+
"""
|
|
551
|
+
if kargs:
|
|
552
|
+
kargs.update(ka)
|
|
553
|
+
else:
|
|
554
|
+
kargs = ka
|
|
555
|
+
old_defaults = {}
|
|
556
|
+
if 'param' in kargs:
|
|
557
|
+
raise ValueError("Can't reset param.")
|
|
558
|
+
if 'pdf' in kargs:
|
|
559
|
+
if hasattr(self, 'pdf'):
|
|
560
|
+
old_defaults['pdf'] = self.pdf
|
|
561
|
+
pdf = kargs['pdf']
|
|
562
|
+
self.pdf = (
|
|
563
|
+
pdf if pdf is None else
|
|
564
|
+
self._make_std_integrand(pdf, xsample=self.param_sample)
|
|
565
|
+
)
|
|
566
|
+
del kargs['pdf']
|
|
567
|
+
if 'adapt_to_pdf' in kargs:
|
|
568
|
+
if hasattr(self, 'adapt_to_pdf'):
|
|
569
|
+
old_defaults['adapt_to_pdf'] = self.adapt_to_pdf
|
|
570
|
+
self.adapt_to_pdf = kargs['adapt_to_pdf']
|
|
571
|
+
del kargs['adapt_to_pdf']
|
|
572
|
+
if kargs:
|
|
573
|
+
old_defaults.update(super(PDFIntegrator, self).set(kargs))
|
|
574
|
+
return old_defaults
|
|
575
|
+
|
|
576
|
+
def _make_map(self, limit):
|
|
577
|
+
r""" Make vegas grid that is adapted to the pdf. """
|
|
578
|
+
ny = 2000
|
|
579
|
+
y = _gvar.RNG.random((ny,1))
|
|
580
|
+
limit = numpy.arctan(limit)
|
|
581
|
+
m = AdaptiveMap([[-limit, limit]], ninc=100)
|
|
582
|
+
theta = numpy.empty(y.shape, float)
|
|
583
|
+
jac = numpy.empty(y.shape[0], float)
|
|
584
|
+
for itn in range(10):
|
|
585
|
+
m.map(y, theta, jac)
|
|
586
|
+
tan_theta = numpy.tan(theta[:, 0])
|
|
587
|
+
x = self.scale * tan_theta
|
|
588
|
+
fx = (tan_theta ** 2 + 1) * numpy.exp(-(x ** 2) / 2.)
|
|
589
|
+
m.add_training_data(y, (jac * fx) ** 2)
|
|
590
|
+
m.adapt(alpha=1.5)
|
|
591
|
+
return numpy.array(m.grid[0])
|
|
592
|
+
|
|
593
|
+
@staticmethod
|
|
594
|
+
def _f_lbatch(theta, f, param_pdf, pdf, scale, adapt_to_pdf):
|
|
595
|
+
r""" Integrand for PDFIntegrator.
|
|
596
|
+
|
|
597
|
+
N.B. Static method is more efficient because less to carry around
|
|
598
|
+
(eg, when nproc>1).
|
|
599
|
+
N.B. ``f`` has been converted to a ``VegasIntegrand`` object (as has
|
|
600
|
+
``self.pdf`` if it is defined externally.
|
|
601
|
+
"""
|
|
602
|
+
tan_theta = numpy.tan(theta)
|
|
603
|
+
chiv = scale * tan_theta
|
|
604
|
+
dp_dtheta = numpy.prod(scale * (tan_theta ** 2 + 1.), axis=1) * param_pdf.dp_dchiv
|
|
605
|
+
p = param_pdf.pflat(chiv, mode='lbatch')
|
|
606
|
+
if pdf is None:
|
|
607
|
+
# normalized in chiv space so don't want param_pdf.dp_dchiv in jac
|
|
608
|
+
pdf = numpy.prod(numpy.exp(-(chiv ** 2) / 2.) / numpy.sqrt(2 * numpy.pi), axis=1) / param_pdf.dp_dchiv
|
|
609
|
+
else:
|
|
610
|
+
pdf = numpy.prod(pdf.eval(p, jac=None), axis=1)
|
|
611
|
+
if f is None:
|
|
612
|
+
ans = _gvar.BufferDict(pdf=dp_dtheta * pdf)
|
|
613
|
+
return ans
|
|
614
|
+
fp = dp_dtheta * pdf if f is None else f.format_evalx(f.eval(p))
|
|
615
|
+
ans = _gvar.BufferDict()
|
|
616
|
+
if hasattr(fp, 'keys'):
|
|
617
|
+
ans['pdf'] = dp_dtheta * pdf
|
|
618
|
+
for k in fp:
|
|
619
|
+
shape = numpy.shape(fp[k])
|
|
620
|
+
ans[('f(p)*pdf', k)] = fp[k] * ans['pdf'].reshape(shape[:1] + len(shape[1:]) * (1,))
|
|
621
|
+
else:
|
|
622
|
+
fp = numpy.asarray(fp)
|
|
623
|
+
ans['pdf'] = dp_dtheta * pdf
|
|
624
|
+
shape = fp.shape
|
|
625
|
+
fp *= ans['pdf'].reshape(shape[:1] + len(shape[1:]) * (1,))
|
|
626
|
+
ans['f(p)*pdf'] = fp
|
|
627
|
+
if not adapt_to_pdf:
|
|
628
|
+
ans_pdf = ans.pop('pdf')
|
|
629
|
+
ans['pdf'] = ans_pdf
|
|
630
|
+
return ans
|
|
631
|
+
|
|
632
|
+
def __call__(self, f=None, save=None, saveall=None, **kargs):
|
|
633
|
+
r""" Estimate expectation value of function ``f(p)``.
|
|
634
|
+
|
|
635
|
+
Uses module :mod:`vegas` to estimate the integral of
|
|
636
|
+
``f(p)`` multiplied by the probability density function
|
|
637
|
+
associated with ``g`` (i.e., ``pdf(p)``). At the same
|
|
638
|
+
time it integrates the PDF. The ratio of the two integrals
|
|
639
|
+
is the expectation value.
|
|
640
|
+
|
|
641
|
+
Args:
|
|
642
|
+
f (function): Function ``f(p)`` to integrate. Integral is
|
|
643
|
+
the expectation value of the function with respect
|
|
644
|
+
to the distribution. The function can return a number,
|
|
645
|
+
an array of numbers, or a dictionary whose values are
|
|
646
|
+
numbers or arrays of numbers. Setting ``f=None`` means
|
|
647
|
+
that only the PDF is integrated. Integrals can be
|
|
648
|
+
substantially faster if ``f(p)`` (and ``pdf(p)`` if set)
|
|
649
|
+
are batch functions (see :mod:`vegas` documentation).
|
|
650
|
+
|
|
651
|
+
pdf: If specified, ``pdf(p)`` is used as the probability
|
|
652
|
+
density function rather than the Gaussian PDF
|
|
653
|
+
associated with ``g``. The Gaussian PDF is used if
|
|
654
|
+
``pdf=None`` (default). Note that PDFs need not
|
|
655
|
+
be normalized.
|
|
656
|
+
|
|
657
|
+
adapt_to_pdf (bool): :mod:`vegas` adapts to the PDF
|
|
658
|
+
when ``adapt_to_pdf=True`` (default). :mod:`vegas` adapts
|
|
659
|
+
to ``pdf(p) * f(p)`` if ``adapt_to_pdf=False``.
|
|
660
|
+
|
|
661
|
+
save (str or file or None): Writes ``results`` into pickle
|
|
662
|
+
file specified by ``save`` at the end of each iteration.
|
|
663
|
+
For example, setting ``save='results.pkl'`` means that
|
|
664
|
+
the results returned by the last vegas iteration can be
|
|
665
|
+
reconstructed later using::
|
|
666
|
+
|
|
667
|
+
import pickle
|
|
668
|
+
with open('results.pkl', 'rb') as ifile:
|
|
669
|
+
results = pickle.load(ifile)
|
|
670
|
+
|
|
671
|
+
Ignored if ``save=None`` (default).
|
|
672
|
+
|
|
673
|
+
saveall (str or file or None): Writes ``(results, integrator)``
|
|
674
|
+
into pickle file specified by ``saveall`` at the end of
|
|
675
|
+
each iteration. For example, setting ``saveall='allresults.pkl'``
|
|
676
|
+
means that the results returned by the last vegas iteration,
|
|
677
|
+
together with a clone of the (adapted) integrator, can be
|
|
678
|
+
reconstructed later using::
|
|
679
|
+
|
|
680
|
+
import pickle
|
|
681
|
+
with open('allresults.pkl', 'rb') as ifile:
|
|
682
|
+
results, integrator = pickle.load(ifile)
|
|
683
|
+
|
|
684
|
+
Ignored if ``saveall=None`` (default).
|
|
685
|
+
|
|
686
|
+
All other keyword arguments are passed on to a :mod:`vegas`
|
|
687
|
+
integrator; see the :mod:`vegas` documentation for further information.
|
|
688
|
+
|
|
689
|
+
Returns:
|
|
690
|
+
Expectation value(s) of ``f(p)`` as object of type
|
|
691
|
+
:class:`vegas.PDFEV`, :class:`vegas.PDFEVArray`, or
|
|
692
|
+
:class:`vegas.PDFEVDict`.
|
|
693
|
+
"""
|
|
694
|
+
if kargs and 'uses_jac' in kargs:
|
|
695
|
+
kargs = dict(kargs)
|
|
696
|
+
del kargs['uses_jac']
|
|
697
|
+
if kargs:
|
|
698
|
+
self.set(kargs)
|
|
699
|
+
if save is not None or saveall is not None:
|
|
700
|
+
self.set(analyzer=PDFAnalyzer(self, analyzer=self.analyzer, save=save, saveall=saveall))
|
|
701
|
+
if f is not None:
|
|
702
|
+
f = self._make_std_integrand(f, self.param_sample)
|
|
703
|
+
integrand = lbatchintegrand(functools.partial(
|
|
704
|
+
PDFIntegrator._f_lbatch, f=f, param_pdf=self.param_pdf,
|
|
705
|
+
pdf=self.pdf, scale=self.scale, adapt_to_pdf=self.adapt_to_pdf,
|
|
706
|
+
))
|
|
707
|
+
results = super(PDFIntegrator, self).__call__(integrand)
|
|
708
|
+
if results['pdf'] == 0:
|
|
709
|
+
raise RuntimeError('Integral of PDF vanishes; increase neval?')
|
|
710
|
+
if f is None:
|
|
711
|
+
ans = results
|
|
712
|
+
ans.pdfnorm = results['pdf']
|
|
713
|
+
else:
|
|
714
|
+
ans = PDFIntegrator._make_ans(results)
|
|
715
|
+
if isinstance(self.analyzer, PDFAnalyzer):
|
|
716
|
+
self.set(analyzer=self.analyzer.analyzer)
|
|
717
|
+
return ans
|
|
718
|
+
|
|
719
|
+
@staticmethod
|
|
720
|
+
def _make_ans(results):
|
|
721
|
+
if 'f(p)*pdf' not in results:
|
|
722
|
+
ans = PDFEVDict(results)
|
|
723
|
+
elif numpy.ndim(results['f(p)*pdf']) == 0:
|
|
724
|
+
ans = PDFEV(results)
|
|
725
|
+
else:
|
|
726
|
+
ans = PDFEVArray(results)
|
|
727
|
+
return ans
|
|
728
|
+
|
|
729
|
+
def stats(self, f=None, moments=False, histograms=False, **kargs):
|
|
730
|
+
r""" Statistical analysis of function ``f(p)``.
|
|
731
|
+
|
|
732
|
+
Uses the :mod:`vegas` integrator to evaluate the expectation
|
|
733
|
+
values and (co)variances of ``f(p)`` with
|
|
734
|
+
respect to the probability density function associated
|
|
735
|
+
with the :class:`PDFIntegrator`. Typical usage
|
|
736
|
+
is illustrated by::
|
|
737
|
+
|
|
738
|
+
>>> import gvar as gv
|
|
739
|
+
>>> import vegas
|
|
740
|
+
>>> g = gv.gvar(dict(a='1.0(5)', b='2(1)')) * gv.gvar('1.0(5)')
|
|
741
|
+
>>> g_ev = vegas.PDFIntegrator(g)
|
|
742
|
+
>>> g_ev(neval=10_000) # adapt the integrator to the PDF
|
|
743
|
+
>>> @vegas.rbatchintegrand
|
|
744
|
+
... def f(p):
|
|
745
|
+
... fp = dict(a=p['a'], b=p['b'])
|
|
746
|
+
... fp['a**2 * b'] = p['a']**2 * p['b']
|
|
747
|
+
... return fp
|
|
748
|
+
>>> r = g_ev.stats(f)
|
|
749
|
+
>>> print(r)
|
|
750
|
+
{'a': 1.00(71), 'b': 2.0(1.4), 'a**2 * b': 4.0(6.1)}
|
|
751
|
+
>>> print(r.vegas_mean['a**2 * b'])
|
|
752
|
+
3.9972(30)
|
|
753
|
+
>>> print(r.vegas_cov['a**2 * b', 'a**2 * b'] ** 0.5)
|
|
754
|
+
6.073(13)
|
|
755
|
+
|
|
756
|
+
``g_ev.stats(f)`` returns a dictionary of |GVar|\s whose
|
|
757
|
+
means and (co)variances are calculated from integrals of
|
|
758
|
+
``f(p) * pdf(p)`` and ``f(p)**2 * pdf(p)``, where ``pdf(p)``
|
|
759
|
+
is the probability density function associated with ``g``.
|
|
760
|
+
The means and standard deviations for each component of ``f(p)``
|
|
761
|
+
are displayed by ``print(r)``. The values for the means
|
|
762
|
+
and standard deviations have uncertainties coming from the
|
|
763
|
+
integrations (|vegas| errors) but these are negligible compared
|
|
764
|
+
to the standard deviations. (The last two
|
|
765
|
+
print statements show the |vegas| results for the
|
|
766
|
+
mean and standard deviation in ``r['a**2 * b']``: 3.9972(30)
|
|
767
|
+
and 6.073(13), respectively.)
|
|
768
|
+
|
|
769
|
+
Th Gaussian approximation for the expectation value of
|
|
770
|
+
``f(p)`` is given by ::
|
|
771
|
+
|
|
772
|
+
>>> print(f(g))
|
|
773
|
+
{'a': 1.00(71), 'b': 2.0(1.4), 'a**2 * b': 2.0(3.7)}
|
|
774
|
+
|
|
775
|
+
Results for ``a`` and ``b`` agree with the results from
|
|
776
|
+
``g_ev.stats(f)``, as expected since the distributions
|
|
777
|
+
for these quantities are (obviously) Gaussian. Results
|
|
778
|
+
for ``a**2 * b``, however, are quite different, indicating
|
|
779
|
+
a distribution that is not Gaussian.
|
|
780
|
+
|
|
781
|
+
Additional statistical data are collected by setting keywords
|
|
782
|
+
``moments=True`` and/or ``histogram=True``::
|
|
783
|
+
|
|
784
|
+
>>> r = g_ev.stats(f, moments=True, histograms=True)
|
|
785
|
+
>>> for k in r:
|
|
786
|
+
... print(10 * '-', k)
|
|
787
|
+
... print(r.stats[k])
|
|
788
|
+
---------- a
|
|
789
|
+
mean = 0.99972(23) sdev = 0.70707(29) skew = -0.0036(20) ex_kurt = -0.0079(49)
|
|
790
|
+
split-normal: 1.0013(14) +/- 0.70862(97)/0.71091(98)
|
|
791
|
+
median: 0.99927(62) +/- 0.7077(10)/0.7063(10)
|
|
792
|
+
---------- b
|
|
793
|
+
mean = 1.99954(47) sdev = 1.41424(72) skew = -0.0041(28) ex_kurt = -0.0074(65)
|
|
794
|
+
split-normal: 2.0042(33) +/- 1.4162(23)/1.4224(24)
|
|
795
|
+
median: 1.9977(11) +/- 1.4162(18)/1.4115(19)
|
|
796
|
+
---------- a**2 * b
|
|
797
|
+
mean = 3.9957(29) sdev = 6.054(12) skew = 3.048(22) ex_kurt = 14.52(35)
|
|
798
|
+
split-normal: -0.4891(25) +/- 6.9578(88)/0.519(10)
|
|
799
|
+
median: 1.7447(24) +/- 6.284(12)/2.0693(26)
|
|
800
|
+
|
|
801
|
+
where the uncertainties are all |vegas| errors. Here the
|
|
802
|
+
integrator was used to calculate the first four moments
|
|
803
|
+
of the distributions for each component of ``f(p)``, from
|
|
804
|
+
which the mean, standard deviation, skewness, and excess
|
|
805
|
+
kurtosis of those distributions are calculated. As expected
|
|
806
|
+
the first two distribuitons here are clearly Gaussian,
|
|
807
|
+
but the distribution for ``a**2 * b`` is not.
|
|
808
|
+
|
|
809
|
+
The integrator also calculates histograms
|
|
810
|
+
for each of the distributions and fits them to two
|
|
811
|
+
different two-sided Gaussians: one is a continuous split-normal
|
|
812
|
+
distribution, and the other is centered on the median of the
|
|
813
|
+
distribution and is discontinuous there. (For more information
|
|
814
|
+
see the documentation for :class:`gvar.PDFStatistics`.)
|
|
815
|
+
Both models suggest large asymmetries in the distribution
|
|
816
|
+
for ``a**2 * b``. The histogram for this distribution can
|
|
817
|
+
be displayed using::
|
|
818
|
+
|
|
819
|
+
>>> r.stats['a**2 * b'].plot_histogram(show=True)
|
|
820
|
+
|
|
821
|
+
Note that |vegas| adaptation is turned off (``adapt=False``)
|
|
822
|
+
by default in :meth:`PDFIntegrator.stats`. This setting
|
|
823
|
+
can be overridden by setting the ``adapt`` parameter
|
|
824
|
+
explicitly, but this is not recommended.
|
|
825
|
+
|
|
826
|
+
Args:
|
|
827
|
+
f (callable): Statistics are calculated for the
|
|
828
|
+
components of the output from function ``f(p)``,
|
|
829
|
+
where ``p`` is a point drawn from the distribution
|
|
830
|
+
specified by the ``param`` or ``pdf`` associated with the
|
|
831
|
+
:class:`PDFIntegrator`. Parameters ``p`` have
|
|
832
|
+
the same structure as ``param`` (i.e., array or
|
|
833
|
+
dictionary). If ``f=None``, it is replaced by
|
|
834
|
+
``f=lbatchintegrand(lambda p:p)``.
|
|
835
|
+
|
|
836
|
+
moments (bool): If ``True``, moments are calculated so
|
|
837
|
+
that the skewness and excess kurtosis can be determined.
|
|
838
|
+
|
|
839
|
+
histograms (bool or dict): Setting ``histograms=True``
|
|
840
|
+
causes histograms to be calculated for the
|
|
841
|
+
distributions associated with each component of
|
|
842
|
+
the output from ``f(p)``. Alternatively, ``histograms``
|
|
843
|
+
can be set equal to a dictionary to specify the
|
|
844
|
+
the width ``binwidth`` of each histogram bin, the total
|
|
845
|
+
number ``nbin`` of bins, and/or the location ``loc``
|
|
846
|
+
of each histogram: for example, ::
|
|
847
|
+
|
|
848
|
+
histograms=dict(
|
|
849
|
+
binwidth=0.5, nbin=12,
|
|
850
|
+
loc=gv.gvar({
|
|
851
|
+
'a': '1.0(5)', 'b': '2(1)',
|
|
852
|
+
'a**2 * b': '2.5(2.7)'
|
|
853
|
+
}),
|
|
854
|
+
)
|
|
855
|
+
|
|
856
|
+
where ``loc`` specifies the location of the center of the histogram
|
|
857
|
+
for each output quantity (e.g., ``loc['a'].mean``) and the width of
|
|
858
|
+
the bins (e.g., ``binwidth * loc['a'].sdev``). If ``loc`` is not
|
|
859
|
+
specified explicitly, it is determined from a simulation using
|
|
860
|
+
values drawn from the Gaussian distribution for ``self.g``
|
|
861
|
+
(or from the distribution described by ``self.pdf`` if it is specified).
|
|
862
|
+
|
|
863
|
+
kargs (dict): Additional keywords passed on to the
|
|
864
|
+
integrator.
|
|
865
|
+
|
|
866
|
+
Returns:
|
|
867
|
+
Expectation value(s) of ``f(p)`` as an object of type
|
|
868
|
+
:class:`vegas.PDFEV`, :class:`vegas.PDFEVArray`,
|
|
869
|
+
or :class:`vegas.PDFEVDict`.
|
|
870
|
+
"""
|
|
871
|
+
oldsettings = {}
|
|
872
|
+
if 'adapt' not in kargs:
|
|
873
|
+
oldsettings['adapt'] = self.adapt
|
|
874
|
+
kargs['adapt'] = False
|
|
875
|
+
|
|
876
|
+
if f is None:
|
|
877
|
+
if self.param_sample.shape is None and not hasattr(self, 'extrakeys'):
|
|
878
|
+
self.extrakeys = []
|
|
879
|
+
for k in self.param_sample.all_keys():
|
|
880
|
+
if k not in self.param_sample:
|
|
881
|
+
self.extrakeys.append(k)
|
|
882
|
+
else:
|
|
883
|
+
self.extrakeys = None
|
|
884
|
+
f = lbatchintegrand(functools.partial(
|
|
885
|
+
PDFIntegrator.default_stats_f, jac=None, extrakeys=self.extrakeys
|
|
886
|
+
))
|
|
887
|
+
f = self._make_std_integrand(f, xsample=self.param_sample)
|
|
888
|
+
fpsample = f(self.param_sample)
|
|
889
|
+
|
|
890
|
+
if histograms is not False:
|
|
891
|
+
if histograms is True:
|
|
892
|
+
histograms = {}
|
|
893
|
+
nbin = histograms.get('nbin', 12)
|
|
894
|
+
binwidth = histograms.get('binwidth', 0.5)
|
|
895
|
+
histograms['nbin'] = nbin
|
|
896
|
+
histograms['binwidth'] = binwidth
|
|
897
|
+
loc = histograms.get('loc', None)
|
|
898
|
+
# bins = histograms.get('bins', None)
|
|
899
|
+
if loc is not None:
|
|
900
|
+
if hasattr(loc, 'keys'):
|
|
901
|
+
loc = _gvar.asbufferdict(loc).flat[:]
|
|
902
|
+
else:
|
|
903
|
+
loc = numpy.asarray(loc).flat[:]
|
|
904
|
+
mean = _gvar.mean(loc)
|
|
905
|
+
sdev = _gvar.sdev(loc)
|
|
906
|
+
else:
|
|
907
|
+
@lbatchintegrand
|
|
908
|
+
def ff2(p):
|
|
909
|
+
if hasattr(p, 'keys'):
|
|
910
|
+
p = p.lbatch_buf
|
|
911
|
+
else:
|
|
912
|
+
p = p.reshape(p.shape[0], -1)
|
|
913
|
+
fp = f.eval(p)
|
|
914
|
+
return dict(f=fp, f2=fp ** 2)
|
|
915
|
+
oldnitn = self.nitn
|
|
916
|
+
r = self(ff2, nitn=1)
|
|
917
|
+
self.set(nitn=oldnitn)
|
|
918
|
+
mean = _gvar.mean(r['f'])
|
|
919
|
+
sdev = numpy.fabs(_gvar.mean(r['f2']) - mean * mean) ** 0.5
|
|
920
|
+
bins = []
|
|
921
|
+
halfwidth = nbin / 2 * binwidth
|
|
922
|
+
for i in range(mean.shape[0]):
|
|
923
|
+
bins.append(
|
|
924
|
+
mean[i] + numpy.linspace(-halfwidth * sdev[i], halfwidth * sdev[i], nbin+1)
|
|
925
|
+
)
|
|
926
|
+
histograms['bins'] = numpy.array(bins)
|
|
927
|
+
integrand = lbatchintegrand(functools.partial(
|
|
928
|
+
PDFIntegrator._stats_integrand, f=f, moments=moments, histograms=histograms
|
|
929
|
+
))
|
|
930
|
+
integrand = self._make_std_integrand(integrand, xsample=self.param_sample.flat[:])
|
|
931
|
+
results = self(integrand, **kargs)
|
|
932
|
+
analyzer = functools.partial(
|
|
933
|
+
PDFIntegrator._stats_analyzer,
|
|
934
|
+
fpsample=fpsample, moments=moments, histograms=histograms
|
|
935
|
+
)
|
|
936
|
+
if fpsample.shape is None:
|
|
937
|
+
ans = PDFEVDict(results.results, analyzer)
|
|
938
|
+
elif fpsample.shape == ():
|
|
939
|
+
ans = PDFEV(results.results, analyzer)
|
|
940
|
+
else:
|
|
941
|
+
ans = PDFEVArray(results.results, analyzer)
|
|
942
|
+
if oldsettings:
|
|
943
|
+
self.set(**oldsettings)
|
|
944
|
+
return ans
|
|
945
|
+
|
|
946
|
+
@staticmethod
|
|
947
|
+
def _stats_analyzer(results, fpsample, moments, histograms):
|
|
948
|
+
r""" Create final stats results from Integrator results """
|
|
949
|
+
# convert from Integrator to PDFIntegrator results
|
|
950
|
+
tmp = _gvar.BufferDict()
|
|
951
|
+
for k in results:
|
|
952
|
+
if k == 'pdf':
|
|
953
|
+
continue
|
|
954
|
+
tmp[k[1]] = results[k]
|
|
955
|
+
results = tmp / results['pdf']
|
|
956
|
+
|
|
957
|
+
# 1) mean/cov
|
|
958
|
+
fp = results['fp']
|
|
959
|
+
meanfp = _gvar.mean(fp)
|
|
960
|
+
covfpfp = numpy.zeros(2 * meanfp.shape, dtype=object)
|
|
961
|
+
fp2 = numpy.array(len(meanfp) * [None])
|
|
962
|
+
fpfp = iter(results['fpfp'])
|
|
963
|
+
for i in range(meanfp.shape[0]):
|
|
964
|
+
for j in range(i + 1):
|
|
965
|
+
if i == j:
|
|
966
|
+
fp2[i] = next(fpfp)
|
|
967
|
+
covfpfp[i, i] = fp2[i] - fp[i] ** 2
|
|
968
|
+
else:
|
|
969
|
+
covfpfp[i, j] = covfpfp[j, i] = next(fpfp) - fp[i] * fp[j]
|
|
970
|
+
# add vegas errors to cov and create final result
|
|
971
|
+
covfpfp += _gvar.evalcov(fp)
|
|
972
|
+
ans = _gvar.gvar(meanfp, _gvar.mean(covfpfp))
|
|
973
|
+
if fpsample.shape is None:
|
|
974
|
+
ans = _gvar.BufferDict(fpsample, buf=ans)
|
|
975
|
+
mean = _gvar.BufferDict(fpsample, buf=fp)
|
|
976
|
+
tcov = _gvar.evalcov(mean)
|
|
977
|
+
cov = _gvar.BufferDict()
|
|
978
|
+
sdev= _gvar.BufferDict()
|
|
979
|
+
for k in mean:
|
|
980
|
+
ksl = mean.slice(k)
|
|
981
|
+
for l in mean:
|
|
982
|
+
lsl = mean.slice(l)
|
|
983
|
+
if tcov[k,l].shape == (1, 1) or tcov[k,l].shape == ():
|
|
984
|
+
cov[k, l] = covfpfp[ksl, lsl]
|
|
985
|
+
else:
|
|
986
|
+
cov[k, l] = covfpfp[ksl,lsl].reshape(tcov[k,l].shape)
|
|
987
|
+
sdev[k] = _gvar.fabs(cov[k, k]) ** 0.5
|
|
988
|
+
elif fpsample.shape == ():
|
|
989
|
+
ans = ans.flat[0]
|
|
990
|
+
mean = fp.flat[0]
|
|
991
|
+
cov = covfpfp
|
|
992
|
+
sdev = _gvar.fabs(cov) ** 0.5
|
|
993
|
+
else:
|
|
994
|
+
ans = ans.reshape(fpsample.shape)
|
|
995
|
+
mean = fp.reshape(fpsample.shape)
|
|
996
|
+
tcov = _gvar.evalcov(mean)
|
|
997
|
+
cov = covfpfp.reshape(tcov.shape)
|
|
998
|
+
sdev = _gvar.fabs(tcov.diagonal()).reshape(mean.shape) ** 0.5
|
|
999
|
+
# 2) moments and histogram
|
|
1000
|
+
stats = numpy.array(fpsample.size * [None])
|
|
1001
|
+
for i in range(len(stats)):
|
|
1002
|
+
if moments:
|
|
1003
|
+
mom = [fp[i], fp2[i], results['fp**3'][i], results['fp**4'][i]]
|
|
1004
|
+
else:
|
|
1005
|
+
mom = [fp[i], fp2[i]]
|
|
1006
|
+
if histograms:
|
|
1007
|
+
hist = histograms['bins'][i], results['count'][i]
|
|
1008
|
+
else:
|
|
1009
|
+
hist = None
|
|
1010
|
+
stats[i] = _gvar.PDFStatistics(moments=mom, histogram=hist)
|
|
1011
|
+
if fpsample.shape is None:
|
|
1012
|
+
stats = _gvar.BufferDict(ans, buf=stats)
|
|
1013
|
+
elif fpsample.shape == ():
|
|
1014
|
+
stats = stats.flat[0]
|
|
1015
|
+
else:
|
|
1016
|
+
stats = stats.reshape(ans.shape)
|
|
1017
|
+
return ans, dict(stats=stats, vegas_mean=mean, vegas_cov=cov, vegas_sdev=sdev)
|
|
1018
|
+
|
|
1019
|
+
@staticmethod
|
|
1020
|
+
def _stats_integrand(p, f, moments=False, histograms=False):
|
|
1021
|
+
fp = f.eval(p)
|
|
1022
|
+
nfp = fp.shape[1]
|
|
1023
|
+
nbatch = fp.shape[0]
|
|
1024
|
+
fpfp = []
|
|
1025
|
+
for i in range(fp.shape[1]):
|
|
1026
|
+
for j in range(i + 1):
|
|
1027
|
+
fpfp.append(fp[:, i] * fp[:, j])
|
|
1028
|
+
fpfp = numpy.array(fpfp).T
|
|
1029
|
+
ans = _gvar.BufferDict(fp=fp, fpfp=fpfp)
|
|
1030
|
+
if moments:
|
|
1031
|
+
ans['fp**3'] = fp ** 3
|
|
1032
|
+
ans['fp**4'] = fp ** 4
|
|
1033
|
+
if histograms:
|
|
1034
|
+
count = numpy.zeros((nbatch, nfp, histograms['nbin'] + 2), dtype=float)
|
|
1035
|
+
idx = numpy.arange(nbatch)
|
|
1036
|
+
for j in range(nfp):
|
|
1037
|
+
jdx = numpy.searchsorted(histograms['bins'][j], fp[:, j], side='right')
|
|
1038
|
+
count[idx, j, jdx] = 1
|
|
1039
|
+
ans['count'] = count
|
|
1040
|
+
return ans
|
|
1041
|
+
|
|
1042
|
+
@staticmethod
|
|
1043
|
+
def default_stats_f(p, jac=None, extrakeys=None):
|
|
1044
|
+
if extrakeys is not None and extrakeys:
|
|
1045
|
+
for k in extrakeys:
|
|
1046
|
+
p[k] = p[k]
|
|
1047
|
+
return p
|
|
1048
|
+
|
|
1049
|
+
def sample(self, nbatch, mode='rbatch'):
|
|
1050
|
+
r""" Generate random samples from the integrator's PDF.
|
|
1051
|
+
|
|
1052
|
+
Typical usage is::
|
|
1053
|
+
|
|
1054
|
+
import gvar as gv
|
|
1055
|
+
import numpy as np
|
|
1056
|
+
import vegas
|
|
1057
|
+
|
|
1058
|
+
@vegas.rbatchintegrand
|
|
1059
|
+
def g_pdf(p):
|
|
1060
|
+
ans = 0
|
|
1061
|
+
h = 1.
|
|
1062
|
+
for p0 in [0.3, 0.6]:
|
|
1063
|
+
ans += h * np.exp(-np.sum((p-p0)**2, axis=0)/2/.01)
|
|
1064
|
+
h /= 2
|
|
1065
|
+
return ans
|
|
1066
|
+
|
|
1067
|
+
g_param = gv.gvar([0.5, 0.5], [[.25, .2], [.2, .25]])
|
|
1068
|
+
g_ev = vegas.PDFIntegrator(param=g_param, pdf=g_pdf)
|
|
1069
|
+
|
|
1070
|
+
# adapt integrator to g_pdf(p) and evaluate <p>
|
|
1071
|
+
g_ev(neval=4000, nitn=10)
|
|
1072
|
+
r = g_ev.stats()
|
|
1073
|
+
print('<p> =', r, '(vegas)')
|
|
1074
|
+
|
|
1075
|
+
# sample g_pdf(p)
|
|
1076
|
+
wgts, p_samples = g_ev.sample(nbatch=40_000)
|
|
1077
|
+
# evaluate mean values <p> and <cos(p0)>
|
|
1078
|
+
p_avg = np.sum(wgts * p_samples, axis=1)
|
|
1079
|
+
cosp0_avg = np.sum(wgts * np.cos(p_samples[0]))
|
|
1080
|
+
print('<p> =', p_avg, '(sample)')
|
|
1081
|
+
print('<cos(p0)> =', cosp0_avg, '(sample)')
|
|
1082
|
+
|
|
1083
|
+
Here ``p_samples[d, i]`` is a batch of about 40,000 random samples
|
|
1084
|
+
for parameter ``p[d]`` drawn from the (bimodal) distribution with
|
|
1085
|
+
PDF ``g_pdf(p)``. Index ``d=0,1`` labels directions in parameter
|
|
1086
|
+
space, while index ``i`` labels the sample. The samples
|
|
1087
|
+
are weighted by ``wgts[i]``; the sum of all weights equals one.
|
|
1088
|
+
The batch index in ``p_samples`` is the rightmost index because
|
|
1089
|
+
by default ``mode='rbatch'``. (Set ``mode='lbatch'`` to move the
|
|
1090
|
+
batch index to the leftmost position: ``p_samples[i, d]``.)
|
|
1091
|
+
The output from this script is::
|
|
1092
|
+
|
|
1093
|
+
<p> = [0.40(17) 0.40(17)] (vegas)
|
|
1094
|
+
<p> = [0.40011804 0.39999454] (sample)
|
|
1095
|
+
<cos(p0)> = 0.9074221724843065 (sample)
|
|
1096
|
+
|
|
1097
|
+
Samples are also useful for making histograms and contour
|
|
1098
|
+
plots of the probability density. For example, the following
|
|
1099
|
+
code uses the :mod:`corner` Python module to create histograms
|
|
1100
|
+
for each parameter, and a contour plot showing
|
|
1101
|
+
their joint distribution::
|
|
1102
|
+
|
|
1103
|
+
import corner
|
|
1104
|
+
import matplotlib.pyplot as plt
|
|
1105
|
+
|
|
1106
|
+
corner.corner(
|
|
1107
|
+
data=p_samples.T, weights=wgts, labels=['p[0]', 'p[1]'],
|
|
1108
|
+
range=[0.999, 0.999], show_titles=True, quantiles=[0.16, 0.5, 0.84],
|
|
1109
|
+
plot_datapoints=False, fill_contours=True, smooth=1,
|
|
1110
|
+
)
|
|
1111
|
+
plt.show()
|
|
1112
|
+
|
|
1113
|
+
The output, showing the bimodal structure, is:
|
|
1114
|
+
|
|
1115
|
+
.. image:: bimodal.png
|
|
1116
|
+
:width: 80%
|
|
1117
|
+
|
|
1118
|
+
|
|
1119
|
+
Args:
|
|
1120
|
+
nbatch (int): The integrator will return
|
|
1121
|
+
at least ``nbatch`` samples drawn from its PDF. The
|
|
1122
|
+
actual number of samples is the smallest multiple of
|
|
1123
|
+
``self.last_neval`` that is equal to or larger than ``nbatch``.
|
|
1124
|
+
Results are packaged in arrays or dictionaries
|
|
1125
|
+
whose elements have an extra index labeling the different
|
|
1126
|
+
samples in the batch. The batch index is
|
|
1127
|
+
the rightmost index if ``mode='rbatch'``; it is
|
|
1128
|
+
the leftmost index if ``mode`` is ``'lbatch'``.
|
|
1129
|
+
mode (bool): Batch mode. Allowed
|
|
1130
|
+
modes are ``'rbatch'`` or ``'lbatch'``,
|
|
1131
|
+
corresponding to batch indices that are on the
|
|
1132
|
+
right or the left, respectively.
|
|
1133
|
+
Default is ``mode='rbatch'``.
|
|
1134
|
+
|
|
1135
|
+
Returns:
|
|
1136
|
+
A tuple ``(wgts,samples)`` containing samples drawn from the integrator's
|
|
1137
|
+
PDF, together with their weights ``wgts``. The weighted sample points
|
|
1138
|
+
are distributed through parameter space with a density proportional to
|
|
1139
|
+
the PDF.
|
|
1140
|
+
|
|
1141
|
+
In general, ``samples`` is either a dictionary or an array
|
|
1142
|
+
depending upon the format of |PDFIntegrator| parameter ``param``.
|
|
1143
|
+
For example, if ::
|
|
1144
|
+
|
|
1145
|
+
param = gv.gvar(dict(s='1.5(1)', v=['3.2(8)', '1.1(4)']))
|
|
1146
|
+
|
|
1147
|
+
then ``samples['s'][i]`` is a sample for parameter ``p['s']``
|
|
1148
|
+
where index ``i=0,1...nbatch(approx)`` labels the sample. The
|
|
1149
|
+
corresponding sample for ``p['v'][d]``, where ``d=0`` or ``1``,
|
|
1150
|
+
is ``samples['v'][d, i]`` provided ``mode='rbatch'``, which
|
|
1151
|
+
is the default. (Otherwise it is ``p['v'][i, d]``, for
|
|
1152
|
+
``mode='lbatch'``.) The corresponding weight for this sample
|
|
1153
|
+
is ``wgts[i]``.
|
|
1154
|
+
|
|
1155
|
+
When ``param`` is an array, ``samples`` is an array with the same
|
|
1156
|
+
shape plus an extra sample index which is either on the right
|
|
1157
|
+
(``mode='rbatch'``, default) or left (``mode='lbatch'``).
|
|
1158
|
+
"""
|
|
1159
|
+
neval = self.last_neval if hasattr(self, 'last_neval') else self.neval
|
|
1160
|
+
nit = 1 if nbatch is None else nbatch // neval
|
|
1161
|
+
if nit * neval < nbatch:
|
|
1162
|
+
nit += 1
|
|
1163
|
+
samples = []
|
|
1164
|
+
wgts = []
|
|
1165
|
+
for _ in range(nit):
|
|
1166
|
+
for theta, wgt in self.random_batch():
|
|
1167
|
+
# following code comes mostly from _f_lbatch
|
|
1168
|
+
tan_theta = numpy.tan(theta)
|
|
1169
|
+
chiv = self.scale * tan_theta
|
|
1170
|
+
# jac = dp_dtheta
|
|
1171
|
+
dp_dtheta = self.scale * numpy.prod((tan_theta ** 2 + 1.), axis=1) * self.param_pdf.dp_dchiv
|
|
1172
|
+
pflat = self.param_pdf.pflat(chiv, mode='lbatch')
|
|
1173
|
+
if self.pdf is None:
|
|
1174
|
+
# normalized in chiv space so don't want param_pdf.dpdchiv in jac
|
|
1175
|
+
pdf = numpy.prod(numpy.exp(-(chiv ** 2) / 2.) / numpy.sqrt(2 * numpy.pi), axis=1) / self.param_pdf.dp_dchiv
|
|
1176
|
+
else:
|
|
1177
|
+
pdf = numpy.prod(self.pdf.eval(pflat, jac=None), axis=1)
|
|
1178
|
+
p = self.param_pdf._unflatten(pflat, mode='lbatch')
|
|
1179
|
+
wgts.append(wgt * dp_dtheta * pdf)
|
|
1180
|
+
samples.append(pflat)
|
|
1181
|
+
samples = numpy.concatenate(samples, axis=0)
|
|
1182
|
+
wgts = numpy.concatenate(wgts)
|
|
1183
|
+
wgts /= numpy.sum(wgts)
|
|
1184
|
+
if mode == 'rbatch':
|
|
1185
|
+
samples = self.param_pdf._unflatten(samples.T, mode='rbatch')
|
|
1186
|
+
else:
|
|
1187
|
+
samples = self.param_pdf._unflatten(samples, mode='lbatch')
|
|
1188
|
+
return wgts, samples
|
|
1189
|
+
|
|
1190
|
+
class PDFAnalyzer(object):
|
|
1191
|
+
r""" |vegas| analyzer for implementing ``save``, ``saveall`` keywords for :class:`PDFIntegrator` """
|
|
1192
|
+
def __init__(self, pdfinteg, analyzer, save=None, saveall=None):
|
|
1193
|
+
self.pdfinteg = pdfinteg
|
|
1194
|
+
self.analyzer = analyzer
|
|
1195
|
+
self.save = save
|
|
1196
|
+
self.saveall = saveall
|
|
1197
|
+
|
|
1198
|
+
def begin(self, itn, integrator):
|
|
1199
|
+
if self.analyzer is not None:
|
|
1200
|
+
self.analyzer.begin(itn, integrator)
|
|
1201
|
+
|
|
1202
|
+
def end(self, itn_result, results):
|
|
1203
|
+
if self.analyzer is not None:
|
|
1204
|
+
self.analyzer.end(itn_result, results)
|
|
1205
|
+
if self.save is None and self.saveall is None:
|
|
1206
|
+
return
|
|
1207
|
+
ans = PDFIntegrator._make_ans(results)
|
|
1208
|
+
if isinstance(self.save, str):
|
|
1209
|
+
with open(self.save, 'wb') as ofile:
|
|
1210
|
+
pickle.dump(ans, ofile)
|
|
1211
|
+
elif self.save is not None:
|
|
1212
|
+
pickle.dump(ans, self.save)
|
|
1213
|
+
if isinstance(self.saveall, str):
|
|
1214
|
+
with open(self.saveall, 'wb') as ofile:
|
|
1215
|
+
pickle.dump((ans,self.pdfinteg), ofile)
|
|
1216
|
+
elif self.saveall is not None:
|
|
1217
|
+
pickle.dump((ans,self.pdfinteg), self.saveall)
|
|
1218
|
+
|
|
1219
|
+
|
|
1220
|
+
def ravg(reslist, weighted=None, rescale=None):
|
|
1221
|
+
r""" Create running average from list of :mod:`vegas` results.
|
|
1222
|
+
|
|
1223
|
+
This function is used to change how the weighted average of
|
|
1224
|
+
|vegas| results is calculated. For example, the following code
|
|
1225
|
+
discards the first five results (where |vegas| is still adapting)
|
|
1226
|
+
and does an unweighted average of the last five::
|
|
1227
|
+
|
|
1228
|
+
import vegas
|
|
1229
|
+
|
|
1230
|
+
def fcn(p):
|
|
1231
|
+
return p[0] * p[1] * p[2] * p[3] * 16.
|
|
1232
|
+
|
|
1233
|
+
itg = vegas.Integrator(4 * [[0,1]])
|
|
1234
|
+
r = itg(fcn)
|
|
1235
|
+
print(r.summary())
|
|
1236
|
+
ur = vegas.ravg(r.itn_results[5:], weighted=False)
|
|
1237
|
+
print(ur.summary())
|
|
1238
|
+
|
|
1239
|
+
The unweighted average can be useful because it is unbiased.
|
|
1240
|
+
The output is::
|
|
1241
|
+
|
|
1242
|
+
itn integral wgt average chi2/dof Q
|
|
1243
|
+
-------------------------------------------------------
|
|
1244
|
+
1 1.013(19) 1.013(19) 0.00 1.00
|
|
1245
|
+
2 0.997(14) 1.002(11) 0.45 0.50
|
|
1246
|
+
3 1.021(12) 1.0112(80) 0.91 0.40
|
|
1247
|
+
4 0.9785(97) 0.9980(62) 2.84 0.04
|
|
1248
|
+
5 1.0067(85) 1.0010(50) 2.30 0.06
|
|
1249
|
+
6 0.9996(75) 1.0006(42) 1.85 0.10
|
|
1250
|
+
7 1.0020(61) 1.0010(34) 1.54 0.16
|
|
1251
|
+
8 1.0051(52) 1.0022(29) 1.39 0.21
|
|
1252
|
+
9 1.0046(47) 1.0029(24) 1.23 0.27
|
|
1253
|
+
10 0.9976(47) 1.0018(22) 1.21 0.28
|
|
1254
|
+
|
|
1255
|
+
itn integral average chi2/dof Q
|
|
1256
|
+
-------------------------------------------------------
|
|
1257
|
+
1 0.9996(75) 0.9996(75) 0.00 1.00
|
|
1258
|
+
2 1.0020(61) 1.0008(48) 0.06 0.81
|
|
1259
|
+
3 1.0051(52) 1.0022(37) 0.19 0.83
|
|
1260
|
+
4 1.0046(47) 1.0028(30) 0.18 0.91
|
|
1261
|
+
5 0.9976(47) 1.0018(26) 0.31 0.87
|
|
1262
|
+
|
|
1263
|
+
Args:
|
|
1264
|
+
reslist (list): List whose elements are |GVar|\s, arrays of
|
|
1265
|
+
|GVar|\s, or dictionaries whose values are |GVar|\s or
|
|
1266
|
+
arrays of |GVar|\s. Alternatively ``reslist`` can be
|
|
1267
|
+
the object returned by a call to a
|
|
1268
|
+
:class:`vegas.Integrator` object (i.e, an instance of
|
|
1269
|
+
any of :class:`vegas.RAvg`, :class:`vegas.RAvgArray`,
|
|
1270
|
+
:class:`vegas.RAvgArray`, :class:`vegas.PDFEV`,
|
|
1271
|
+
:class:`vegas.PDFEVArray`, :class:`vegas.PDFEVArray`).
|
|
1272
|
+
weighted (bool): Running average is weighted (by the inverse
|
|
1273
|
+
covariance matrix) if ``True``. Otherwise the
|
|
1274
|
+
average is unweighted, which makes most sense if ``reslist``
|
|
1275
|
+
items were generated by :mod:`vegas` with little or no
|
|
1276
|
+
adaptation (e.g., with ``adapt=False``). If ``weighted``
|
|
1277
|
+
is not specified (or is ``None``), it is set equal to
|
|
1278
|
+
``getattr(reslist, 'weighted', True)``.
|
|
1279
|
+
rescale: Integration results are divided by ``rescale``
|
|
1280
|
+
before taking the weighted average if
|
|
1281
|
+
``weighted=True``; otherwise ``rescale`` is ignored.
|
|
1282
|
+
Setting ``rescale=True`` is equivalent to setting
|
|
1283
|
+
``rescale=reslist[-1]``. If ``rescale`` is not
|
|
1284
|
+
specified (or is ``None``), it is set equal to
|
|
1285
|
+
``getattr(reslist, 'rescale', True)``.
|
|
1286
|
+
"""
|
|
1287
|
+
for t in [PDFEV, PDFEVArray, PDFEVDict]:
|
|
1288
|
+
if isinstance(reslist, t):
|
|
1289
|
+
return t(ravg(reslist.itn_results, weighted=weighted, rescale=rescale))
|
|
1290
|
+
for t in [RAvg, RAvgArray, RAvgDict]:
|
|
1291
|
+
if isinstance(reslist, t):
|
|
1292
|
+
reslist = reslist.itn_results
|
|
1293
|
+
try:
|
|
1294
|
+
if len(reslist) < 1:
|
|
1295
|
+
raise ValueError('reslist empty')
|
|
1296
|
+
except:
|
|
1297
|
+
raise ValueError('improper type for reslist')
|
|
1298
|
+
if weighted is None:
|
|
1299
|
+
weighted = getattr(reslist, 'weighted', True)
|
|
1300
|
+
if rescale is None:
|
|
1301
|
+
rescale = getattr(reslist, 'rescale', reslist[-1])
|
|
1302
|
+
if hasattr(reslist[0], 'keys'):
|
|
1303
|
+
return RAvgDict(itn_results=reslist, weighted=weighted, rescale=rescale)
|
|
1304
|
+
try:
|
|
1305
|
+
shape = numpy.shape(reslist[0])
|
|
1306
|
+
except:
|
|
1307
|
+
raise ValueError('reslist[i] not GVar, array, or dictionary')
|
|
1308
|
+
if shape == ():
|
|
1309
|
+
return RAvg(itn_results=reslist, weighted=weighted)
|
|
1310
|
+
else:
|
|
1311
|
+
return RAvgArray(itn_results=reslist, weighted=weighted, rescale=rescale)
|
|
1312
|
+
|
|
1313
|
+
# restratification
|
|
1314
|
+
class _restratify:
|
|
1315
|
+
def __init__(self,
|
|
1316
|
+
f, nitn, ndy, below_avg_nstrat, verbose, gamma, **vargs
|
|
1317
|
+
):
|
|
1318
|
+
self.set(vargs)
|
|
1319
|
+
self.f = self._make_std_integrand(f)
|
|
1320
|
+
self.ndy = ndy
|
|
1321
|
+
self.yst = numpy.linspace(0, 1, self.ndy + 1)
|
|
1322
|
+
self.old_nstrat = numpy.array(self.nstrat)
|
|
1323
|
+
|
|
1324
|
+
# evaluate I, dI
|
|
1325
|
+
save = self.set(correlate_integrals=False, nitn=nitn, alpha=0, adapt=True)
|
|
1326
|
+
result = self(self._make_std_integrand(self._I_dI_integrand))
|
|
1327
|
+
self.set(save)
|
|
1328
|
+
self.I = result['I']
|
|
1329
|
+
self.dI = result['dI']
|
|
1330
|
+
self.Q = result.Q
|
|
1331
|
+
if verbose:
|
|
1332
|
+
print('\n==================== restratify')
|
|
1333
|
+
print('BEFORE:')
|
|
1334
|
+
print(result.summary()[:-1])
|
|
1335
|
+
print('nstrat =', numpy.array2string(self.old_nstrat, max_line_width=60, prefix=9 * ' '))
|
|
1336
|
+
print('nhcube =', numpy.prod(self.old_nstrat), '\n')
|
|
1337
|
+
|
|
1338
|
+
# calculate weight[mu]
|
|
1339
|
+
dim = len(self.old_nstrat)
|
|
1340
|
+
self.weight = numpy.zeros(dim, object)
|
|
1341
|
+
dIavg = result['I'] / self.ndy
|
|
1342
|
+
for mu in range(dim):
|
|
1343
|
+
self.weight[mu] = numpy.sum((result['dI'][mu] - dIavg)** 2) * self.ndy
|
|
1344
|
+
if verbose:
|
|
1345
|
+
ncol = 1 if self.dim < 15 else 3
|
|
1346
|
+
print('WEIGHTS:')
|
|
1347
|
+
print(_gvar.tabulate(dict(weight=self.weight), ncol=ncol))
|
|
1348
|
+
|
|
1349
|
+
# calculate new stratification, starting with smallest nstrat[mu]s
|
|
1350
|
+
w_avg = numpy.average(self.weight)
|
|
1351
|
+
w_gm = numpy.prod(self.weight) ** (1/dim)
|
|
1352
|
+
nstrat_gm = numpy.prod(self.old_nstrat) ** (1/dim)
|
|
1353
|
+
nstrat = self.old_nstrat * _gvar.mean((self.weight / w_gm) * nstrat_gm / self.old_nstrat) ** gamma
|
|
1354
|
+
nleft = dim
|
|
1355
|
+
musort = numpy.array(numpy.argsort(nstrat))
|
|
1356
|
+
new_nstrat = numpy.array(self.nstrat) # copies size and type from self.nstrat
|
|
1357
|
+
for mu in musort:
|
|
1358
|
+
new_nstrat[mu] = nstrat[mu] if nstrat[mu] > 1 else 1
|
|
1359
|
+
if below_avg_nstrat and self.weight[mu] < w_avg:
|
|
1360
|
+
new_nstrat[mu] = below_avg_nstrat
|
|
1361
|
+
nleft -= 1
|
|
1362
|
+
if nleft > 0:
|
|
1363
|
+
nstrat[musort[-nleft:]] *= (nstrat[mu] / new_nstrat[mu]) ** (1 / nleft)
|
|
1364
|
+
nstrat[mu] = new_nstrat[mu]
|
|
1365
|
+
self.set(nstrat=new_nstrat, neval=self.neval)
|
|
1366
|
+
# activate adaptive stratified sampling
|
|
1367
|
+
save = self.set(nitn=nitn, adapt=True)
|
|
1368
|
+
training = self(f)
|
|
1369
|
+
if isinstance(self, PDFIntegrator):
|
|
1370
|
+
# remove (redundant) f(p)*pdf from results
|
|
1371
|
+
tmp = RAvgDict(dict(pdf=training.itn_results[0]['pdf']))
|
|
1372
|
+
for r in training.itn_results:
|
|
1373
|
+
tmp.add(dict(pdf=r['pdf']))
|
|
1374
|
+
training = tmp
|
|
1375
|
+
self.set(save)
|
|
1376
|
+
if verbose:
|
|
1377
|
+
print('\nAFTER:')
|
|
1378
|
+
print(training.summary()[:-1])
|
|
1379
|
+
print('nstrat =', numpy.array2string(numpy.asarray(self.nstrat), max_line_width=60, prefix=9 * ' '))
|
|
1380
|
+
print('nhcube =', numpy.prod(self.nstrat))
|
|
1381
|
+
print(20 * '=')
|
|
1382
|
+
|
|
1383
|
+
# if show_plot == True:
|
|
1384
|
+
# x = (self.yst[:-1] + self.yst[1:]) / 2
|
|
1385
|
+
# for mu in range(dim):
|
|
1386
|
+
# plt.plot(x, _gvar.mean(result['dI'][mu]), label=f'mu={mu}')
|
|
1387
|
+
# plt.legend(bbox_to_anchor=(1.0, 1), loc="upper left")
|
|
1388
|
+
# plt.show()
|
|
1389
|
+
|
|
1390
|
+
@lbatchintegrand
|
|
1391
|
+
def _I_dI_integrand(self, xx, jac=None):
|
|
1392
|
+
if self.xsample.shape is None:
|
|
1393
|
+
xx = xx.lbatch_buf
|
|
1394
|
+
else:
|
|
1395
|
+
xx = xx.reshape(xx.shape[0], -1)
|
|
1396
|
+
x = numpy.empty(xx.shape, float)
|
|
1397
|
+
x.flat[:] = xx.flat[:]
|
|
1398
|
+
nbatch, dim = numpy.shape(x)
|
|
1399
|
+
|
|
1400
|
+
# calculate y
|
|
1401
|
+
y = numpy.zeros(x.shape, float)
|
|
1402
|
+
jac = numpy.zeros(nbatch, float)
|
|
1403
|
+
self.map.invmap(x, y, jac)
|
|
1404
|
+
y = y.T
|
|
1405
|
+
|
|
1406
|
+
# calculate I, dI
|
|
1407
|
+
I = self.f.eval(x, jac=jac if self.uses_jac else None)[:, 0]
|
|
1408
|
+
# dI -- rbatch (to facilitate the BufferDict below)
|
|
1409
|
+
dI = numpy.zeros((dim, self.ndy, nbatch), float)
|
|
1410
|
+
for mu in range(dim):
|
|
1411
|
+
for i in range(self.ndy):
|
|
1412
|
+
idx = (self.yst[i] <= y[mu]) & (y[mu] <= self.yst[i+1])
|
|
1413
|
+
dI[mu, i, idx] += I[idx]
|
|
1414
|
+
|
|
1415
|
+
# create final dictionary
|
|
1416
|
+
ans = _gvar.BufferDict()
|
|
1417
|
+
ans['I'] = I
|
|
1418
|
+
ans['dI'] = dI
|
|
1419
|
+
return ans._r2lbatch()[0]
|
|
1420
|
+
|
|
1421
|
+
class restratifyIntegrator(Integrator, _restratify):
|
|
1422
|
+
def __init__(self,
|
|
1423
|
+
integ, f, nitn=1, ndy=5, below_avg_nstrat=None, verbose=False,
|
|
1424
|
+
gamma=1., **vargs
|
|
1425
|
+
):
|
|
1426
|
+
super().__init__(integ)
|
|
1427
|
+
super(Integrator, self).__init__(
|
|
1428
|
+
f=f, nitn=nitn, ndy=ndy, below_avg_nstrat=below_avg_nstrat,
|
|
1429
|
+
verbose=verbose, gamma=gamma,**vargs
|
|
1430
|
+
)
|
|
1431
|
+
|
|
1432
|
+
@lbatchintegrand
|
|
1433
|
+
def _ones(self, x):
|
|
1434
|
+
return numpy.ones(len(x[:,0]), float)
|
|
1435
|
+
|
|
1436
|
+
class restratifyPDFIntegrator(PDFIntegrator, _restratify):
|
|
1437
|
+
def __init__(self,
|
|
1438
|
+
integ, nitn=1, ndy=5, below_avg_nstrat=None, verbose=False,
|
|
1439
|
+
gamma=1., **vargs
|
|
1440
|
+
):
|
|
1441
|
+
super().__init__(integ)
|
|
1442
|
+
super(Integrator, self).__init__(
|
|
1443
|
+
self._ones, nitn=nitn, ndy=ndy, below_avg_nstrat=below_avg_nstrat,
|
|
1444
|
+
verbose=verbose, gamma=gamma, **vargs
|
|
1445
|
+
)
|
|
1446
|
+
|
|
1447
|
+
@lbatchintegrand
|
|
1448
|
+
def _ones(self, xx):
|
|
1449
|
+
if xx.shape is None:
|
|
1450
|
+
xx = xx.lbatch_buf
|
|
1451
|
+
else:
|
|
1452
|
+
xx = xx.reshape(xx.shape[0], -1)
|
|
1453
|
+
x = numpy.empty(xx.shape, float)
|
|
1454
|
+
x.flat[:] = xx.flat[:]
|
|
1455
|
+
return numpy.ones(len(x[:, 0]), float)
|
|
1456
|
+
|
|
1457
|
+
def restratify(
|
|
1458
|
+
integ, f=None, nitn=1, ndy=5, below_avg_nstrat=None, verbose=False,
|
|
1459
|
+
gamma=1., **vargs
|
|
1460
|
+
):
|
|
1461
|
+
""" Return |vegas| integrator with y-space stratification optimized for integrand f.
|
|
1462
|
+
|
|
1463
|
+
|vegas| uses a |vegas| map to transform the original integral of ``f(x)`` in
|
|
1464
|
+
x-space into an integral in y-space whose integrand has been flattened
|
|
1465
|
+
by the Jacobian of the transformation for x to y. |vegas| uses
|
|
1466
|
+
stratified Monte Carlo sampling to evaluate the integral in y-space.
|
|
1467
|
+
By default, |vegas| distributes strata evenly across all directions.
|
|
1468
|
+
``restratify(integ, f)`` uses the integrator to calculate a weight,
|
|
1469
|
+
equal to the variance of the function ``dI/dy[d]``, for each
|
|
1470
|
+
direction ``d`` where ``I`` is the total integral of ``f(x)``. It
|
|
1471
|
+
then creates a new integrator whose strata are concentrated in
|
|
1472
|
+
directions where the weights are largest. This can reduce Monte Carlo
|
|
1473
|
+
uncertainties significantly, particularly for high dimension
|
|
1474
|
+
integrals.
|
|
1475
|
+
|
|
1476
|
+
Typical usage is ::
|
|
1477
|
+
|
|
1478
|
+
import vegas
|
|
1479
|
+
|
|
1480
|
+
def f(x):
|
|
1481
|
+
# integrand
|
|
1482
|
+
...
|
|
1483
|
+
|
|
1484
|
+
integ = vegas.Integrator(20 * [[0,1]])
|
|
1485
|
+
integ(f, neval=400_000, nitn=10) #1 training
|
|
1486
|
+
integ = vegas.restratify(integ, f, nitn=3, verbose=True) #2 new stratification
|
|
1487
|
+
result = integ(f, nitn=10) #3 evaluate integral
|
|
1488
|
+
...
|
|
1489
|
+
|
|
1490
|
+
where ``f(x)`` is the integrand for 20-dimensional integral. The first
|
|
1491
|
+
``nitn=10`` iterations are discarded, as |vegas| adapts to the
|
|
1492
|
+
integrand (#1). The integrator is then replaced by a copy whose
|
|
1493
|
+
y-space strata have been moved between axes to optimize integration
|
|
1494
|
+
of ``f(x)`` (#2). The final integral is then calculated (#3).
|
|
1495
|
+
|
|
1496
|
+
Here :func:`vegas.restratify` uses the original integrator ``integ``,
|
|
1497
|
+
with ``nitn=3`` iterations, to calculate the stratification weights
|
|
1498
|
+
for each directon. The number of strata used by the new integrator
|
|
1499
|
+
in each direction is (approximately) proportional to the weight for
|
|
1500
|
+
that direction. Setting ``verbose=True`` causes
|
|
1501
|
+
:func:`vegas.restratify` to print out a summary of its analysis,
|
|
1502
|
+
which in this case looks like ::
|
|
1503
|
+
|
|
1504
|
+
==================== restratify
|
|
1505
|
+
BEFORE:
|
|
1506
|
+
itn integral wgt average chi2/dof Q
|
|
1507
|
+
-------------------------------------------------------
|
|
1508
|
+
1 0.9973(49) 0.9973(49) 0.00 1.00
|
|
1509
|
+
2 1.0011(49) 0.9992(34) 0.86 0.84
|
|
1510
|
+
3 0.9963(49) 0.9983(28) 0.89 0.88
|
|
1511
|
+
nstrat = [2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1]
|
|
1512
|
+
nhcube = 32768
|
|
1513
|
+
|
|
1514
|
+
WEIGHTS:
|
|
1515
|
+
key/index value key/index value key/index value
|
|
1516
|
+
---------------------------- ---------------------------- ----------------------------
|
|
1517
|
+
weight 0 0.0160 (12) 7 0.000139 (70) 14 8.7(5.5)e-05
|
|
1518
|
+
1 0.0145 (11) 8 7.9(5.2)e-05 15 8.4(5.5)e-05
|
|
1519
|
+
2 0.02342 (97) 9 5.1(4.2)e-05 16 0.000159 (75)
|
|
1520
|
+
3 2.4(2.9)e-05 10 6.2(4.6)e-05 17 4.2(3.9)e-05
|
|
1521
|
+
4 0.000144 (71) 11 4.5(3.9)e-05 18 5.8(4.6)e-05
|
|
1522
|
+
5 0.000102 (60) 12 9.5(5.7)e-05 19 6.3(4.7)e-05
|
|
1523
|
+
6 8.3(5.5)e-05 13 0.000145 (70)
|
|
1524
|
+
|
|
1525
|
+
AFTER:
|
|
1526
|
+
itn integral wgt average chi2/dof Q
|
|
1527
|
+
-------------------------------------------------------
|
|
1528
|
+
1 0.99579(87) 0.99579(87) 0.00 1.00
|
|
1529
|
+
2 0.99494(36) 0.99507(33) 0.81 0.37
|
|
1530
|
+
3 0.99535(38) 0.99519(25) 0.56 0.57
|
|
1531
|
+
nstrat = [29 26 43 1 1 1 1 1 1 1 1 1 1 1 1 1
|
|
1532
|
+
1 1 1 1]
|
|
1533
|
+
nhcube = 32422
|
|
1534
|
+
====================
|
|
1535
|
+
|
|
1536
|
+
This shows that strata are spread more or less evenly
|
|
1537
|
+
initially, with at most 2 in any given direction. The weights for
|
|
1538
|
+
the first three directions are much larger than for the remaining
|
|
1539
|
+
directions, and so all of the strata for the new integrator are
|
|
1540
|
+
concentrated in those directions. The total number of hypercubes created by
|
|
1541
|
+
the new stratification (32,422) is approximately the same
|
|
1542
|
+
as for the original stratification (32,768). The 'AFTER' integrals
|
|
1543
|
+
of ``f(x)``, with the new statification, are a bit more than
|
|
1544
|
+
ten times as accurate that the 'BEFORE' integrals.
|
|
1545
|
+
|
|
1546
|
+
Args:
|
|
1547
|
+
integ: |vegas| integrator of type :class:`vegas.Integrator`
|
|
1548
|
+
or :class:`vegas.PDFIntegrator` whose stratification
|
|
1549
|
+
is to be changed. The integrator should already be
|
|
1550
|
+
adapted to the integrand ``f(x)``.
|
|
1551
|
+
f (callable or None): The new stratification is designed to
|
|
1552
|
+
optimize integrals of integrand ``f(x)``. The integrand
|
|
1553
|
+
is the PDF itself when ``integ`` is a ``PDFIntegrator``;
|
|
1554
|
+
parameter ``f`` is ignored in this case.
|
|
1555
|
+
nitn (int): Number of |vegas| iterations used when calculating
|
|
1556
|
+
the weights that determine the new startification. Default
|
|
1557
|
+
is ``nitn=1``, which is often sufficient.
|
|
1558
|
+
ndy (int): The weights equal the variance in the y-space
|
|
1559
|
+
function ``dI/dy[d]``, where ``I`` is the
|
|
1560
|
+
total integral of ``f(x)``. This function is approximated
|
|
1561
|
+
by a step function with ``ndy`` steps (between ``y[d]=0``
|
|
1562
|
+
and ``y[d]=1``). Devault is ``ndy=5``.
|
|
1563
|
+
below_avg_nstrat (int): If not ``None``, ``below_avg_nstrat`` is
|
|
1564
|
+
the number of strata assigned to directions whose weights
|
|
1565
|
+
are below the average. Ignored if set to ``None`` (default).
|
|
1566
|
+
verbose (bool): Prints out a summary of the analysis if ``True``;
|
|
1567
|
+
ignored otherwise (default).
|
|
1568
|
+
gamma (float): Damping factor for restratification. Setting
|
|
1569
|
+
``0 <= gamma < 1`` modederates the changes in the
|
|
1570
|
+
stratification. Default is ``gamma=1``
|
|
1571
|
+
vargs (dict): Additional settings for the integrator.
|
|
1572
|
+
|
|
1573
|
+
Integrators returned by :func:`vegas.restratify` have the following
|
|
1574
|
+
attributes, in addition to the standard integrator attributes.
|
|
1575
|
+
|
|
1576
|
+
Attributes:
|
|
1577
|
+
|
|
1578
|
+
integ.I: Integral of ``f(x)``.
|
|
1579
|
+
integ.dI: ``integ.dI[d][i]`` is the contribution to ``I`` coming from
|
|
1580
|
+
``i/ndy <= y[d] <= (i+1)/ndy``.
|
|
1581
|
+
integ.weight: ``integ.weight[d]`` is the stratification weight for
|
|
1582
|
+
direction ``d``.
|
|
1583
|
+
"""
|
|
1584
|
+
if isinstance(integ, PDFIntegrator):
|
|
1585
|
+
return restratifyPDFIntegrator(
|
|
1586
|
+
integ, nitn=nitn, ndy=ndy, below_avg_nstrat=below_avg_nstrat, verbose=verbose,
|
|
1587
|
+
gamma=gamma, **vargs
|
|
1588
|
+
)
|
|
1589
|
+
else:
|
|
1590
|
+
return restratifyIntegrator(
|
|
1591
|
+
integ, f=f, nitn=nitn, ndy=ndy, below_avg_nstrat=below_avg_nstrat, verbose=verbose,
|
|
1592
|
+
gamma=gamma, **vargs)
|